1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// This is the parent TargetLowering class for hardware code gen
12 /// targets.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #define AMDGPU_LOG2E_F     1.44269504088896340735992468100189214f
17 #define AMDGPU_LN2_F       0.693147180559945309417232121458176568f
18 #define AMDGPU_LN10_F      2.30258509299404568401799145468436421f
19 
20 #include "AMDGPUISelLowering.h"
21 #include "AMDGPU.h"
22 #include "AMDGPUCallLowering.h"
23 #include "AMDGPUFrameLowering.h"
24 #include "AMDGPUIntrinsicInfo.h"
25 #include "AMDGPURegisterInfo.h"
26 #include "AMDGPUSubtarget.h"
27 #include "AMDGPUTargetMachine.h"
28 #include "Utils/AMDGPUBaseInfo.h"
29 #include "R600MachineFunctionInfo.h"
30 #include "SIInstrInfo.h"
31 #include "SIMachineFunctionInfo.h"
32 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
33 #include "llvm/CodeGen/Analysis.h"
34 #include "llvm/CodeGen/CallingConvLower.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/SelectionDAG.h"
38 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DiagnosticInfo.h"
41 #include "llvm/Support/KnownBits.h"
42 using namespace llvm;
43 
44 static bool allocateCCRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
45                            CCValAssign::LocInfo LocInfo,
46                            ISD::ArgFlagsTy ArgFlags, CCState &State,
47                            const TargetRegisterClass *RC,
48                            unsigned NumRegs) {
49   ArrayRef<MCPhysReg> RegList = makeArrayRef(RC->begin(), NumRegs);
50   unsigned RegResult = State.AllocateReg(RegList);
51   if (RegResult == AMDGPU::NoRegister)
52     return false;
53 
54   State.addLoc(CCValAssign::getReg(ValNo, ValVT, RegResult, LocVT, LocInfo));
55   return true;
56 }
57 
58 static bool allocateSGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT,
59                               CCValAssign::LocInfo LocInfo,
60                               ISD::ArgFlagsTy ArgFlags, CCState &State) {
61   switch (LocVT.SimpleTy) {
62   case MVT::i64:
63   case MVT::f64:
64   case MVT::v2i32:
65   case MVT::v2f32:
66   case MVT::v4i16:
67   case MVT::v4f16: {
68     // Up to SGPR0-SGPR39
69     return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
70                           &AMDGPU::SGPR_64RegClass, 20);
71   }
72   default:
73     return false;
74   }
75 }
76 
77 // Allocate up to VGPR31.
78 //
79 // TODO: Since there are no VGPR alignent requirements would it be better to
80 // split into individual scalar registers?
81 static bool allocateVGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT,
82                               CCValAssign::LocInfo LocInfo,
83                               ISD::ArgFlagsTy ArgFlags, CCState &State) {
84   switch (LocVT.SimpleTy) {
85   case MVT::i64:
86   case MVT::f64:
87   case MVT::v2i32:
88   case MVT::v2f32:
89   case MVT::v4i16:
90   case MVT::v4f16: {
91     return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
92                           &AMDGPU::VReg_64RegClass, 31);
93   }
94   case MVT::v4i32:
95   case MVT::v4f32:
96   case MVT::v2i64:
97   case MVT::v2f64: {
98     return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
99                           &AMDGPU::VReg_128RegClass, 29);
100   }
101   case MVT::v8i32:
102   case MVT::v8f32: {
103     return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
104                           &AMDGPU::VReg_256RegClass, 25);
105 
106   }
107   case MVT::v16i32:
108   case MVT::v16f32: {
109     return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
110                           &AMDGPU::VReg_512RegClass, 17);
111 
112   }
113   default:
114     return false;
115   }
116 }
117 
118 #include "AMDGPUGenCallingConv.inc"
119 
120 // Find a larger type to do a load / store of a vector with.
121 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
122   unsigned StoreSize = VT.getStoreSizeInBits();
123   if (StoreSize <= 32)
124     return EVT::getIntegerVT(Ctx, StoreSize);
125 
126   assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
127   return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
128 }
129 
130 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) {
131   KnownBits Known;
132   EVT VT = Op.getValueType();
133   DAG.computeKnownBits(Op, Known);
134 
135   return VT.getSizeInBits() - Known.countMinLeadingZeros();
136 }
137 
138 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) {
139   EVT VT = Op.getValueType();
140 
141   // In order for this to be a signed 24-bit value, bit 23, must
142   // be a sign bit.
143   return VT.getSizeInBits() - DAG.ComputeNumSignBits(Op);
144 }
145 
146 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
147                                            const AMDGPUSubtarget &STI)
148     : TargetLowering(TM), Subtarget(&STI) {
149   AMDGPUASI = AMDGPU::getAMDGPUAS(TM);
150   // Lower floating point store/load to integer store/load to reduce the number
151   // of patterns in tablegen.
152   setOperationAction(ISD::LOAD, MVT::f32, Promote);
153   AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
154 
155   setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
156   AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
157 
158   setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
159   AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
160 
161   setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
162   AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
163 
164   setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
165   AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
166 
167   setOperationAction(ISD::LOAD, MVT::i64, Promote);
168   AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
169 
170   setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
171   AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
172 
173   setOperationAction(ISD::LOAD, MVT::f64, Promote);
174   AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32);
175 
176   setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
177   AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
178 
179   // There are no 64-bit extloads. These should be done as a 32-bit extload and
180   // an extension to 64-bit.
181   for (MVT VT : MVT::integer_valuetypes()) {
182     setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
183     setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
184     setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
185   }
186 
187   for (MVT VT : MVT::integer_valuetypes()) {
188     if (VT == MVT::i64)
189       continue;
190 
191     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
192     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal);
193     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal);
194     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
195 
196     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
197     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal);
198     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal);
199     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
200 
201     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
202     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal);
203     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal);
204     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
205   }
206 
207   for (MVT VT : MVT::integer_vector_valuetypes()) {
208     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
209     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
210     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
211     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
212     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
213     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
214     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
215     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
216     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
217     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
218     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
219     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
220   }
221 
222   setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
223   setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
224   setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
225   setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
226 
227   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
228   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
229   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
230   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand);
231 
232   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
233   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
234   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
235   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
236 
237   setOperationAction(ISD::STORE, MVT::f32, Promote);
238   AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
239 
240   setOperationAction(ISD::STORE, MVT::v2f32, Promote);
241   AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
242 
243   setOperationAction(ISD::STORE, MVT::v4f32, Promote);
244   AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
245 
246   setOperationAction(ISD::STORE, MVT::v8f32, Promote);
247   AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
248 
249   setOperationAction(ISD::STORE, MVT::v16f32, Promote);
250   AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
251 
252   setOperationAction(ISD::STORE, MVT::i64, Promote);
253   AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
254 
255   setOperationAction(ISD::STORE, MVT::v2i64, Promote);
256   AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
257 
258   setOperationAction(ISD::STORE, MVT::f64, Promote);
259   AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32);
260 
261   setOperationAction(ISD::STORE, MVT::v2f64, Promote);
262   AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
263 
264   setTruncStoreAction(MVT::i64, MVT::i1, Expand);
265   setTruncStoreAction(MVT::i64, MVT::i8, Expand);
266   setTruncStoreAction(MVT::i64, MVT::i16, Expand);
267   setTruncStoreAction(MVT::i64, MVT::i32, Expand);
268 
269   setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
270   setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand);
271   setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand);
272   setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
273 
274   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
275   setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
276   setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
277   setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
278 
279   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
280   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
281 
282   setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
283   setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
284 
285   setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
286   setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
287 
288   setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand);
289   setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand);
290 
291 
292   setOperationAction(ISD::Constant, MVT::i32, Legal);
293   setOperationAction(ISD::Constant, MVT::i64, Legal);
294   setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
295   setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
296 
297   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
298   setOperationAction(ISD::BRIND, MVT::Other, Expand);
299 
300   // This is totally unsupported, just custom lower to produce an error.
301   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
302 
303   // Library functions.  These default to Expand, but we have instructions
304   // for them.
305   setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
306   setOperationAction(ISD::FEXP2,  MVT::f32, Legal);
307   setOperationAction(ISD::FPOW,   MVT::f32, Legal);
308   setOperationAction(ISD::FLOG2,  MVT::f32, Legal);
309   setOperationAction(ISD::FABS,   MVT::f32, Legal);
310   setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
311   setOperationAction(ISD::FRINT,  MVT::f32, Legal);
312   setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
313   setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
314   setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
315 
316   setOperationAction(ISD::FROUND, MVT::f32, Custom);
317   setOperationAction(ISD::FROUND, MVT::f64, Custom);
318 
319   setOperationAction(ISD::FLOG, MVT::f32, Custom);
320   setOperationAction(ISD::FLOG10, MVT::f32, Custom);
321 
322 
323   setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
324   setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
325 
326   setOperationAction(ISD::FREM, MVT::f32, Custom);
327   setOperationAction(ISD::FREM, MVT::f64, Custom);
328 
329   // Expand to fneg + fadd.
330   setOperationAction(ISD::FSUB, MVT::f64, Expand);
331 
332   setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
333   setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
334   setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
335   setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
336   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
337   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
338   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
339   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
340   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
341   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
342 
343   setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
344   setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom);
345   setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom);
346 
347   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
348   for (MVT VT : ScalarIntVTs) {
349     // These should use [SU]DIVREM, so set them to expand
350     setOperationAction(ISD::SDIV, VT, Expand);
351     setOperationAction(ISD::UDIV, VT, Expand);
352     setOperationAction(ISD::SREM, VT, Expand);
353     setOperationAction(ISD::UREM, VT, Expand);
354 
355     // GPU does not have divrem function for signed or unsigned.
356     setOperationAction(ISD::SDIVREM, VT, Custom);
357     setOperationAction(ISD::UDIVREM, VT, Custom);
358 
359     // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
360     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
361     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
362 
363     setOperationAction(ISD::BSWAP, VT, Expand);
364     setOperationAction(ISD::CTTZ, VT, Expand);
365     setOperationAction(ISD::CTLZ, VT, Expand);
366 
367     // AMDGPU uses ADDC/SUBC/ADDE/SUBE
368     setOperationAction(ISD::ADDC, VT, Legal);
369     setOperationAction(ISD::SUBC, VT, Legal);
370     setOperationAction(ISD::ADDE, VT, Legal);
371     setOperationAction(ISD::SUBE, VT, Legal);
372   }
373 
374   // The hardware supports 32-bit ROTR, but not ROTL.
375   setOperationAction(ISD::ROTL, MVT::i32, Expand);
376   setOperationAction(ISD::ROTL, MVT::i64, Expand);
377   setOperationAction(ISD::ROTR, MVT::i64, Expand);
378 
379   setOperationAction(ISD::MUL, MVT::i64, Expand);
380   setOperationAction(ISD::MULHU, MVT::i64, Expand);
381   setOperationAction(ISD::MULHS, MVT::i64, Expand);
382   setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
383   setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
384   setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
385   setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
386   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
387 
388   setOperationAction(ISD::SMIN, MVT::i32, Legal);
389   setOperationAction(ISD::UMIN, MVT::i32, Legal);
390   setOperationAction(ISD::SMAX, MVT::i32, Legal);
391   setOperationAction(ISD::UMAX, MVT::i32, Legal);
392 
393   setOperationAction(ISD::CTTZ, MVT::i64, Custom);
394   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom);
395   setOperationAction(ISD::CTLZ, MVT::i64, Custom);
396   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
397 
398   static const MVT::SimpleValueType VectorIntTypes[] = {
399     MVT::v2i32, MVT::v4i32
400   };
401 
402   for (MVT VT : VectorIntTypes) {
403     // Expand the following operations for the current type by default.
404     setOperationAction(ISD::ADD,  VT, Expand);
405     setOperationAction(ISD::AND,  VT, Expand);
406     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
407     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
408     setOperationAction(ISD::MUL,  VT, Expand);
409     setOperationAction(ISD::MULHU, VT, Expand);
410     setOperationAction(ISD::MULHS, VT, Expand);
411     setOperationAction(ISD::OR,   VT, Expand);
412     setOperationAction(ISD::SHL,  VT, Expand);
413     setOperationAction(ISD::SRA,  VT, Expand);
414     setOperationAction(ISD::SRL,  VT, Expand);
415     setOperationAction(ISD::ROTL, VT, Expand);
416     setOperationAction(ISD::ROTR, VT, Expand);
417     setOperationAction(ISD::SUB,  VT, Expand);
418     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
419     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
420     setOperationAction(ISD::SDIV, VT, Expand);
421     setOperationAction(ISD::UDIV, VT, Expand);
422     setOperationAction(ISD::SREM, VT, Expand);
423     setOperationAction(ISD::UREM, VT, Expand);
424     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
425     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
426     setOperationAction(ISD::SDIVREM, VT, Custom);
427     setOperationAction(ISD::UDIVREM, VT, Expand);
428     setOperationAction(ISD::SELECT, VT, Expand);
429     setOperationAction(ISD::VSELECT, VT, Expand);
430     setOperationAction(ISD::SELECT_CC, VT, Expand);
431     setOperationAction(ISD::XOR,  VT, Expand);
432     setOperationAction(ISD::BSWAP, VT, Expand);
433     setOperationAction(ISD::CTPOP, VT, Expand);
434     setOperationAction(ISD::CTTZ, VT, Expand);
435     setOperationAction(ISD::CTLZ, VT, Expand);
436     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
437     setOperationAction(ISD::SETCC, VT, Expand);
438   }
439 
440   static const MVT::SimpleValueType FloatVectorTypes[] = {
441     MVT::v2f32, MVT::v4f32
442   };
443 
444   for (MVT VT : FloatVectorTypes) {
445     setOperationAction(ISD::FABS, VT, Expand);
446     setOperationAction(ISD::FMINNUM, VT, Expand);
447     setOperationAction(ISD::FMAXNUM, VT, Expand);
448     setOperationAction(ISD::FADD, VT, Expand);
449     setOperationAction(ISD::FCEIL, VT, Expand);
450     setOperationAction(ISD::FCOS, VT, Expand);
451     setOperationAction(ISD::FDIV, VT, Expand);
452     setOperationAction(ISD::FEXP2, VT, Expand);
453     setOperationAction(ISD::FLOG2, VT, Expand);
454     setOperationAction(ISD::FREM, VT, Expand);
455     setOperationAction(ISD::FLOG, VT, Expand);
456     setOperationAction(ISD::FLOG10, VT, Expand);
457     setOperationAction(ISD::FPOW, VT, Expand);
458     setOperationAction(ISD::FFLOOR, VT, Expand);
459     setOperationAction(ISD::FTRUNC, VT, Expand);
460     setOperationAction(ISD::FMUL, VT, Expand);
461     setOperationAction(ISD::FMA, VT, Expand);
462     setOperationAction(ISD::FRINT, VT, Expand);
463     setOperationAction(ISD::FNEARBYINT, VT, Expand);
464     setOperationAction(ISD::FSQRT, VT, Expand);
465     setOperationAction(ISD::FSIN, VT, Expand);
466     setOperationAction(ISD::FSUB, VT, Expand);
467     setOperationAction(ISD::FNEG, VT, Expand);
468     setOperationAction(ISD::VSELECT, VT, Expand);
469     setOperationAction(ISD::SELECT_CC, VT, Expand);
470     setOperationAction(ISD::FCOPYSIGN, VT, Expand);
471     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
472     setOperationAction(ISD::SETCC, VT, Expand);
473   }
474 
475   // This causes using an unrolled select operation rather than expansion with
476   // bit operations. This is in general better, but the alternative using BFI
477   // instructions may be better if the select sources are SGPRs.
478   setOperationAction(ISD::SELECT, MVT::v2f32, Promote);
479   AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32);
480 
481   setOperationAction(ISD::SELECT, MVT::v4f32, Promote);
482   AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32);
483 
484   // There are no libcalls of any kind.
485   for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I)
486     setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr);
487 
488   setBooleanContents(ZeroOrNegativeOneBooleanContent);
489   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
490 
491   setSchedulingPreference(Sched::RegPressure);
492   setJumpIsExpensive(true);
493 
494   // FIXME: This is only partially true. If we have to do vector compares, any
495   // SGPR pair can be a condition register. If we have a uniform condition, we
496   // are better off doing SALU operations, where there is only one SCC. For now,
497   // we don't have a way of knowing during instruction selection if a condition
498   // will be uniform and we always use vector compares. Assume we are using
499   // vector compares until that is fixed.
500   setHasMultipleConditionRegisters(true);
501 
502   PredictableSelectIsExpensive = false;
503 
504   // We want to find all load dependencies for long chains of stores to enable
505   // merging into very wide vectors. The problem is with vectors with > 4
506   // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
507   // vectors are a legal type, even though we have to split the loads
508   // usually. When we can more precisely specify load legality per address
509   // space, we should be able to make FindBetterChain/MergeConsecutiveStores
510   // smarter so that they can figure out what to do in 2 iterations without all
511   // N > 4 stores on the same chain.
512   GatherAllAliasesMaxDepth = 16;
513 
514   // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry
515   // about these during lowering.
516   MaxStoresPerMemcpy  = 0xffffffff;
517   MaxStoresPerMemmove = 0xffffffff;
518   MaxStoresPerMemset  = 0xffffffff;
519 
520   setTargetDAGCombine(ISD::BITCAST);
521   setTargetDAGCombine(ISD::SHL);
522   setTargetDAGCombine(ISD::SRA);
523   setTargetDAGCombine(ISD::SRL);
524   setTargetDAGCombine(ISD::TRUNCATE);
525   setTargetDAGCombine(ISD::MUL);
526   setTargetDAGCombine(ISD::MULHU);
527   setTargetDAGCombine(ISD::MULHS);
528   setTargetDAGCombine(ISD::SELECT);
529   setTargetDAGCombine(ISD::SELECT_CC);
530   setTargetDAGCombine(ISD::STORE);
531   setTargetDAGCombine(ISD::FADD);
532   setTargetDAGCombine(ISD::FSUB);
533   setTargetDAGCombine(ISD::FNEG);
534   setTargetDAGCombine(ISD::FABS);
535   setTargetDAGCombine(ISD::AssertZext);
536   setTargetDAGCombine(ISD::AssertSext);
537 }
538 
539 //===----------------------------------------------------------------------===//
540 // Target Information
541 //===----------------------------------------------------------------------===//
542 
543 LLVM_READNONE
544 static bool fnegFoldsIntoOp(unsigned Opc) {
545   switch (Opc) {
546   case ISD::FADD:
547   case ISD::FSUB:
548   case ISD::FMUL:
549   case ISD::FMA:
550   case ISD::FMAD:
551   case ISD::FMINNUM:
552   case ISD::FMAXNUM:
553   case ISD::FSIN:
554   case ISD::FTRUNC:
555   case ISD::FRINT:
556   case ISD::FNEARBYINT:
557   case AMDGPUISD::RCP:
558   case AMDGPUISD::RCP_LEGACY:
559   case AMDGPUISD::RCP_IFLAG:
560   case AMDGPUISD::SIN_HW:
561   case AMDGPUISD::FMUL_LEGACY:
562   case AMDGPUISD::FMIN_LEGACY:
563   case AMDGPUISD::FMAX_LEGACY:
564     return true;
565   default:
566     return false;
567   }
568 }
569 
570 /// \p returns true if the operation will definitely need to use a 64-bit
571 /// encoding, and thus will use a VOP3 encoding regardless of the source
572 /// modifiers.
573 LLVM_READONLY
574 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) {
575   return N->getNumOperands() > 2 || VT == MVT::f64;
576 }
577 
578 // Most FP instructions support source modifiers, but this could be refined
579 // slightly.
580 LLVM_READONLY
581 static bool hasSourceMods(const SDNode *N) {
582   if (isa<MemSDNode>(N))
583     return false;
584 
585   switch (N->getOpcode()) {
586   case ISD::CopyToReg:
587   case ISD::SELECT:
588   case ISD::FDIV:
589   case ISD::FREM:
590   case ISD::INLINEASM:
591   case AMDGPUISD::INTERP_P1:
592   case AMDGPUISD::INTERP_P2:
593   case AMDGPUISD::DIV_SCALE:
594 
595   // TODO: Should really be looking at the users of the bitcast. These are
596   // problematic because bitcasts are used to legalize all stores to integer
597   // types.
598   case ISD::BITCAST:
599     return false;
600   default:
601     return true;
602   }
603 }
604 
605 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N,
606                                                  unsigned CostThreshold) {
607   // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
608   // it is truly free to use a source modifier in all cases. If there are
609   // multiple users but for each one will necessitate using VOP3, there will be
610   // a code size increase. Try to avoid increasing code size unless we know it
611   // will save on the instruction count.
612   unsigned NumMayIncreaseSize = 0;
613   MVT VT = N->getValueType(0).getScalarType().getSimpleVT();
614 
615   // XXX - Should this limit number of uses to check?
616   for (const SDNode *U : N->uses()) {
617     if (!hasSourceMods(U))
618       return false;
619 
620     if (!opMustUseVOP3Encoding(U, VT)) {
621       if (++NumMayIncreaseSize > CostThreshold)
622         return false;
623     }
624   }
625 
626   return true;
627 }
628 
629 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
630   return MVT::i32;
631 }
632 
633 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
634   return true;
635 }
636 
637 // The backend supports 32 and 64 bit floating point immediates.
638 // FIXME: Why are we reporting vectors of FP immediates as legal?
639 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
640   EVT ScalarVT = VT.getScalarType();
641   return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 ||
642          (ScalarVT == MVT::f16 && Subtarget->has16BitInsts()));
643 }
644 
645 // We don't want to shrink f64 / f32 constants.
646 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
647   EVT ScalarVT = VT.getScalarType();
648   return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
649 }
650 
651 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
652                                                  ISD::LoadExtType,
653                                                  EVT NewVT) const {
654 
655   unsigned NewSize = NewVT.getStoreSizeInBits();
656 
657   // If we are reducing to a 32-bit load, this is always better.
658   if (NewSize == 32)
659     return true;
660 
661   EVT OldVT = N->getValueType(0);
662   unsigned OldSize = OldVT.getStoreSizeInBits();
663 
664   // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
665   // extloads, so doing one requires using a buffer_load. In cases where we
666   // still couldn't use a scalar load, using the wider load shouldn't really
667   // hurt anything.
668 
669   // If the old size already had to be an extload, there's no harm in continuing
670   // to reduce the width.
671   return (OldSize < 32);
672 }
673 
674 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
675                                                    EVT CastTy) const {
676 
677   assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits());
678 
679   if (LoadTy.getScalarType() == MVT::i32)
680     return false;
681 
682   unsigned LScalarSize = LoadTy.getScalarSizeInBits();
683   unsigned CastScalarSize = CastTy.getScalarSizeInBits();
684 
685   return (LScalarSize < CastScalarSize) ||
686          (CastScalarSize >= 32);
687 }
688 
689 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
690 // profitable with the expansion for 64-bit since it's generally good to
691 // speculate things.
692 // FIXME: These should really have the size as a parameter.
693 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
694   return true;
695 }
696 
697 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
698   return true;
699 }
700 
701 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode * N) const {
702   switch (N->getOpcode()) {
703     default:
704     return false;
705     case ISD::EntryToken:
706     case ISD::TokenFactor:
707       return true;
708     case ISD::INTRINSIC_WO_CHAIN:
709     {
710       unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
711       switch (IntrID) {
712         default:
713         return false;
714         case Intrinsic::amdgcn_readfirstlane:
715         case Intrinsic::amdgcn_readlane:
716           return true;
717       }
718     }
719     break;
720     case ISD::LOAD:
721     {
722       const LoadSDNode * L = dyn_cast<LoadSDNode>(N);
723       if (L->getMemOperand()->getAddrSpace()
724       == AMDGPUASI.CONSTANT_ADDRESS_32BIT)
725         return true;
726       return false;
727     }
728     break;
729   }
730 }
731 
732 //===---------------------------------------------------------------------===//
733 // Target Properties
734 //===---------------------------------------------------------------------===//
735 
736 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
737   assert(VT.isFloatingPoint());
738 
739   // Packed operations do not have a fabs modifier.
740   return VT == MVT::f32 || VT == MVT::f64 ||
741          (Subtarget->has16BitInsts() && VT == MVT::f16);
742 }
743 
744 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
745   assert(VT.isFloatingPoint());
746   return VT == MVT::f32 || VT == MVT::f64 ||
747          (Subtarget->has16BitInsts() && VT == MVT::f16) ||
748          (Subtarget->hasVOP3PInsts() && VT == MVT::v2f16);
749 }
750 
751 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
752                                                          unsigned NumElem,
753                                                          unsigned AS) const {
754   return true;
755 }
756 
757 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
758   // There are few operations which truly have vector input operands. Any vector
759   // operation is going to involve operations on each component, and a
760   // build_vector will be a copy per element, so it always makes sense to use a
761   // build_vector input in place of the extracted element to avoid a copy into a
762   // super register.
763   //
764   // We should probably only do this if all users are extracts only, but this
765   // should be the common case.
766   return true;
767 }
768 
769 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
770   // Truncate is just accessing a subregister.
771 
772   unsigned SrcSize = Source.getSizeInBits();
773   unsigned DestSize = Dest.getSizeInBits();
774 
775   return DestSize < SrcSize && DestSize % 32 == 0 ;
776 }
777 
778 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
779   // Truncate is just accessing a subregister.
780 
781   unsigned SrcSize = Source->getScalarSizeInBits();
782   unsigned DestSize = Dest->getScalarSizeInBits();
783 
784   if (DestSize== 16 && Subtarget->has16BitInsts())
785     return SrcSize >= 32;
786 
787   return DestSize < SrcSize && DestSize % 32 == 0;
788 }
789 
790 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
791   unsigned SrcSize = Src->getScalarSizeInBits();
792   unsigned DestSize = Dest->getScalarSizeInBits();
793 
794   if (SrcSize == 16 && Subtarget->has16BitInsts())
795     return DestSize >= 32;
796 
797   return SrcSize == 32 && DestSize == 64;
798 }
799 
800 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
801   // Any register load of a 64-bit value really requires 2 32-bit moves. For all
802   // practical purposes, the extra mov 0 to load a 64-bit is free.  As used,
803   // this will enable reducing 64-bit operations the 32-bit, which is always
804   // good.
805 
806   if (Src == MVT::i16)
807     return Dest == MVT::i32 ||Dest == MVT::i64 ;
808 
809   return Src == MVT::i32 && Dest == MVT::i64;
810 }
811 
812 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
813   return isZExtFree(Val.getValueType(), VT2);
814 }
815 
816 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
817   // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
818   // limited number of native 64-bit operations. Shrinking an operation to fit
819   // in a single 32-bit register should always be helpful. As currently used,
820   // this is much less general than the name suggests, and is only used in
821   // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
822   // not profitable, and may actually be harmful.
823   return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
824 }
825 
826 //===---------------------------------------------------------------------===//
827 // TargetLowering Callbacks
828 //===---------------------------------------------------------------------===//
829 
830 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC,
831                                                   bool IsVarArg) {
832   switch (CC) {
833   case CallingConv::AMDGPU_KERNEL:
834   case CallingConv::SPIR_KERNEL:
835     llvm_unreachable("kernels should not be handled here");
836   case CallingConv::AMDGPU_VS:
837   case CallingConv::AMDGPU_GS:
838   case CallingConv::AMDGPU_PS:
839   case CallingConv::AMDGPU_CS:
840   case CallingConv::AMDGPU_HS:
841   case CallingConv::AMDGPU_ES:
842   case CallingConv::AMDGPU_LS:
843     return CC_AMDGPU;
844   case CallingConv::C:
845   case CallingConv::Fast:
846   case CallingConv::Cold:
847     return CC_AMDGPU_Func;
848   default:
849     report_fatal_error("Unsupported calling convention.");
850   }
851 }
852 
853 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC,
854                                                     bool IsVarArg) {
855   switch (CC) {
856   case CallingConv::AMDGPU_KERNEL:
857   case CallingConv::SPIR_KERNEL:
858     llvm_unreachable("kernels should not be handled here");
859   case CallingConv::AMDGPU_VS:
860   case CallingConv::AMDGPU_GS:
861   case CallingConv::AMDGPU_PS:
862   case CallingConv::AMDGPU_CS:
863   case CallingConv::AMDGPU_HS:
864   case CallingConv::AMDGPU_ES:
865   case CallingConv::AMDGPU_LS:
866     return RetCC_SI_Shader;
867   case CallingConv::C:
868   case CallingConv::Fast:
869   case CallingConv::Cold:
870     return RetCC_AMDGPU_Func;
871   default:
872     report_fatal_error("Unsupported calling convention.");
873   }
874 }
875 
876 /// The SelectionDAGBuilder will automatically promote function arguments
877 /// with illegal types.  However, this does not work for the AMDGPU targets
878 /// since the function arguments are stored in memory as these illegal types.
879 /// In order to handle this properly we need to get the original types sizes
880 /// from the LLVM IR Function and fixup the ISD:InputArg values before
881 /// passing them to AnalyzeFormalArguments()
882 
883 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting
884 /// input values across multiple registers.  Each item in the Ins array
885 /// represents a single value that will be stored in registers.  Ins[x].VT is
886 /// the value type of the value that will be stored in the register, so
887 /// whatever SDNode we lower the argument to needs to be this type.
888 ///
889 /// In order to correctly lower the arguments we need to know the size of each
890 /// argument.  Since Ins[x].VT gives us the size of the register that will
891 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type
892 /// for the orignal function argument so that we can deduce the correct memory
893 /// type to use for Ins[x].  In most cases the correct memory type will be
894 /// Ins[x].ArgVT.  However, this will not always be the case.  If, for example,
895 /// we have a kernel argument of type v8i8, this argument will be split into
896 /// 8 parts and each part will be represented by its own item in the Ins array.
897 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of
898 /// the argument before it was split.  From this, we deduce that the memory type
899 /// for each individual part is i8.  We pass the memory type as LocVT to the
900 /// calling convention analysis function and the register type (Ins[x].VT) as
901 /// the ValVT.
902 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
903   CCState &State,
904   const SmallVectorImpl<ISD::InputArg> &Ins) const {
905   const MachineFunction &MF = State.getMachineFunction();
906   const Function &Fn = MF.getFunction();
907   LLVMContext &Ctx = Fn.getParent()->getContext();
908   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
909   const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn);
910 
911   unsigned MaxAlign = 1;
912   uint64_t ExplicitArgOffset = 0;
913   const DataLayout &DL = Fn.getParent()->getDataLayout();
914 
915   unsigned InIndex = 0;
916 
917   for (const Argument &Arg : Fn.args()) {
918     Type *BaseArgTy = Arg.getType();
919     unsigned Align = DL.getABITypeAlignment(BaseArgTy);
920     MaxAlign = std::max(Align, MaxAlign);
921     unsigned AllocSize = DL.getTypeAllocSize(BaseArgTy);
922 
923     uint64_t ArgOffset = alignTo(ExplicitArgOffset, Align) + ExplicitOffset;
924     ExplicitArgOffset = alignTo(ExplicitArgOffset, Align) + AllocSize;
925 
926     // We're basically throwing away everything passed into us and starting over
927     // to get accurate in-memory offsets. The "PartOffset" is completely useless
928     // to us as computed in Ins.
929     //
930     // We also need to figure out what type legalization is trying to do to get
931     // the correct memory offsets.
932 
933     SmallVector<EVT, 16> ValueVTs;
934     SmallVector<uint64_t, 16> Offsets;
935     ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset);
936 
937     for (unsigned Value = 0, NumValues = ValueVTs.size();
938          Value != NumValues; ++Value) {
939       uint64_t BasePartOffset = Offsets[Value];
940 
941       EVT ArgVT = ValueVTs[Value];
942       EVT MemVT = ArgVT;
943       MVT RegisterVT =
944         getRegisterTypeForCallingConv(Ctx, ArgVT);
945       unsigned NumRegs =
946         getNumRegistersForCallingConv(Ctx, ArgVT);
947 
948       if (!Subtarget->isAmdHsaOS() &&
949           (ArgVT == MVT::i16 || ArgVT == MVT::i8 || ArgVT == MVT::f16)) {
950         // The ABI says the caller will extend these values to 32-bits.
951         MemVT = ArgVT.isInteger() ? MVT::i32 : MVT::f32;
952       } else if (NumRegs == 1) {
953         // This argument is not split, so the IR type is the memory type.
954         if (ArgVT.isExtended()) {
955           // We have an extended type, like i24, so we should just use the
956           // register type.
957           MemVT = RegisterVT;
958         } else {
959           MemVT = ArgVT;
960         }
961       } else if (ArgVT.isVector() && RegisterVT.isVector() &&
962                  ArgVT.getScalarType() == RegisterVT.getScalarType()) {
963         assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements());
964         // We have a vector value which has been split into a vector with
965         // the same scalar type, but fewer elements.  This should handle
966         // all the floating-point vector types.
967         MemVT = RegisterVT;
968       } else if (ArgVT.isVector() &&
969                  ArgVT.getVectorNumElements() == NumRegs) {
970         // This arg has been split so that each element is stored in a separate
971         // register.
972         MemVT = ArgVT.getScalarType();
973       } else if (ArgVT.isExtended()) {
974         // We have an extended type, like i65.
975         MemVT = RegisterVT;
976       } else {
977         unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs;
978         assert(ArgVT.getStoreSizeInBits() % NumRegs == 0);
979         if (RegisterVT.isInteger()) {
980           MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
981         } else if (RegisterVT.isVector()) {
982           assert(!RegisterVT.getScalarType().isFloatingPoint());
983           unsigned NumElements = RegisterVT.getVectorNumElements();
984           assert(MemoryBits % NumElements == 0);
985           // This vector type has been split into another vector type with
986           // a different elements size.
987           EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
988                                            MemoryBits / NumElements);
989           MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements);
990         } else {
991           llvm_unreachable("cannot deduce memory type.");
992         }
993       }
994 
995       // Convert one element vectors to scalar.
996       if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
997         MemVT = MemVT.getScalarType();
998 
999       if (MemVT.isExtended()) {
1000         // This should really only happen if we have vec3 arguments
1001         assert(MemVT.isVector() && MemVT.getVectorNumElements() == 3);
1002         MemVT = MemVT.getPow2VectorType(State.getContext());
1003       }
1004 
1005       unsigned PartOffset = 0;
1006       for (unsigned i = 0; i != NumRegs; ++i) {
1007         State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT,
1008                                                BasePartOffset + PartOffset,
1009                                                MemVT.getSimpleVT(),
1010                                                CCValAssign::Full));
1011         PartOffset += MemVT.getStoreSize();
1012       }
1013     }
1014   }
1015 }
1016 
1017 SDValue AMDGPUTargetLowering::LowerReturn(
1018   SDValue Chain, CallingConv::ID CallConv,
1019   bool isVarArg,
1020   const SmallVectorImpl<ISD::OutputArg> &Outs,
1021   const SmallVectorImpl<SDValue> &OutVals,
1022   const SDLoc &DL, SelectionDAG &DAG) const {
1023   // FIXME: Fails for r600 tests
1024   //assert(!isVarArg && Outs.empty() && OutVals.empty() &&
1025   // "wave terminate should not have return values");
1026   return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain);
1027 }
1028 
1029 //===---------------------------------------------------------------------===//
1030 // Target specific lowering
1031 //===---------------------------------------------------------------------===//
1032 
1033 /// Selects the correct CCAssignFn for a given CallingConvention value.
1034 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1035                                                     bool IsVarArg) {
1036   return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg);
1037 }
1038 
1039 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1040                                                       bool IsVarArg) {
1041   return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg);
1042 }
1043 
1044 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain,
1045                                                   SelectionDAG &DAG,
1046                                                   MachineFrameInfo &MFI,
1047                                                   int ClobberedFI) const {
1048   SmallVector<SDValue, 8> ArgChains;
1049   int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
1050   int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
1051 
1052   // Include the original chain at the beginning of the list. When this is
1053   // used by target LowerCall hooks, this helps legalize find the
1054   // CALLSEQ_BEGIN node.
1055   ArgChains.push_back(Chain);
1056 
1057   // Add a chain value for each stack argument corresponding
1058   for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
1059                             UE = DAG.getEntryNode().getNode()->use_end();
1060        U != UE; ++U) {
1061     if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) {
1062       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) {
1063         if (FI->getIndex() < 0) {
1064           int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
1065           int64_t InLastByte = InFirstByte;
1066           InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
1067 
1068           if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1069               (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1070             ArgChains.push_back(SDValue(L, 1));
1071         }
1072       }
1073     }
1074   }
1075 
1076   // Build a tokenfactor for all the chains.
1077   return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
1078 }
1079 
1080 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI,
1081                                                  SmallVectorImpl<SDValue> &InVals,
1082                                                  StringRef Reason) const {
1083   SDValue Callee = CLI.Callee;
1084   SelectionDAG &DAG = CLI.DAG;
1085 
1086   const Function &Fn = DAG.getMachineFunction().getFunction();
1087 
1088   StringRef FuncName("<unknown>");
1089 
1090   if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
1091     FuncName = G->getSymbol();
1092   else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1093     FuncName = G->getGlobal()->getName();
1094 
1095   DiagnosticInfoUnsupported NoCalls(
1096     Fn, Reason + FuncName, CLI.DL.getDebugLoc());
1097   DAG.getContext()->diagnose(NoCalls);
1098 
1099   if (!CLI.IsTailCall) {
1100     for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
1101       InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
1102   }
1103 
1104   return DAG.getEntryNode();
1105 }
1106 
1107 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
1108                                         SmallVectorImpl<SDValue> &InVals) const {
1109   return lowerUnhandledCall(CLI, InVals, "unsupported call to function ");
1110 }
1111 
1112 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
1113                                                       SelectionDAG &DAG) const {
1114   const Function &Fn = DAG.getMachineFunction().getFunction();
1115 
1116   DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
1117                                             SDLoc(Op).getDebugLoc());
1118   DAG.getContext()->diagnose(NoDynamicAlloca);
1119   auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
1120   return DAG.getMergeValues(Ops, SDLoc());
1121 }
1122 
1123 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
1124                                              SelectionDAG &DAG) const {
1125   switch (Op.getOpcode()) {
1126   default:
1127     Op->print(errs(), &DAG);
1128     llvm_unreachable("Custom lowering code for this"
1129                      "instruction is not implemented yet!");
1130     break;
1131   case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
1132   case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
1133   case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
1134   case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
1135   case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
1136   case ISD::FREM: return LowerFREM(Op, DAG);
1137   case ISD::FCEIL: return LowerFCEIL(Op, DAG);
1138   case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
1139   case ISD::FRINT: return LowerFRINT(Op, DAG);
1140   case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
1141   case ISD::FROUND: return LowerFROUND(Op, DAG);
1142   case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
1143   case ISD::FLOG:
1144     return LowerFLOG(Op, DAG, 1 / AMDGPU_LOG2E_F);
1145   case ISD::FLOG10:
1146     return LowerFLOG(Op, DAG, AMDGPU_LN2_F / AMDGPU_LN10_F);
1147   case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
1148   case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
1149   case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
1150   case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
1151   case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
1152   case ISD::CTTZ:
1153   case ISD::CTTZ_ZERO_UNDEF:
1154   case ISD::CTLZ:
1155   case ISD::CTLZ_ZERO_UNDEF:
1156     return LowerCTLZ_CTTZ(Op, DAG);
1157   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1158   }
1159   return Op;
1160 }
1161 
1162 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
1163                                               SmallVectorImpl<SDValue> &Results,
1164                                               SelectionDAG &DAG) const {
1165   switch (N->getOpcode()) {
1166   case ISD::SIGN_EXTEND_INREG:
1167     // Different parts of legalization seem to interpret which type of
1168     // sign_extend_inreg is the one to check for custom lowering. The extended
1169     // from type is what really matters, but some places check for custom
1170     // lowering of the result type. This results in trying to use
1171     // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
1172     // nothing here and let the illegal result integer be handled normally.
1173     return;
1174   default:
1175     return;
1176   }
1177 }
1178 
1179 static bool hasDefinedInitializer(const GlobalValue *GV) {
1180   const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1181   if (!GVar || !GVar->hasInitializer())
1182     return false;
1183 
1184   return !isa<UndefValue>(GVar->getInitializer());
1185 }
1186 
1187 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
1188                                                  SDValue Op,
1189                                                  SelectionDAG &DAG) const {
1190 
1191   const DataLayout &DL = DAG.getDataLayout();
1192   GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
1193   const GlobalValue *GV = G->getGlobal();
1194 
1195   if (G->getAddressSpace() == AMDGPUASI.LOCAL_ADDRESS ||
1196       G->getAddressSpace() == AMDGPUASI.REGION_ADDRESS) {
1197     if (!MFI->isEntryFunction()) {
1198       const Function &Fn = DAG.getMachineFunction().getFunction();
1199       DiagnosticInfoUnsupported BadLDSDecl(
1200         Fn, "local memory global used by non-kernel function", SDLoc(Op).getDebugLoc());
1201       DAG.getContext()->diagnose(BadLDSDecl);
1202     }
1203 
1204     // XXX: What does the value of G->getOffset() mean?
1205     assert(G->getOffset() == 0 &&
1206          "Do not know what to do with an non-zero offset");
1207 
1208     // TODO: We could emit code to handle the initialization somewhere.
1209     if (!hasDefinedInitializer(GV)) {
1210       unsigned Offset = MFI->allocateLDSGlobal(DL, *GV);
1211       return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType());
1212     }
1213   }
1214 
1215   const Function &Fn = DAG.getMachineFunction().getFunction();
1216   DiagnosticInfoUnsupported BadInit(
1217       Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc());
1218   DAG.getContext()->diagnose(BadInit);
1219   return SDValue();
1220 }
1221 
1222 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
1223                                                   SelectionDAG &DAG) const {
1224   SmallVector<SDValue, 8> Args;
1225 
1226   EVT VT = Op.getValueType();
1227   if (VT == MVT::v4i16 || VT == MVT::v4f16) {
1228     SDLoc SL(Op);
1229     SDValue Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(0));
1230     SDValue Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(1));
1231 
1232     SDValue BV = DAG.getBuildVector(MVT::v2i32, SL, { Lo, Hi });
1233     return DAG.getNode(ISD::BITCAST, SL, VT, BV);
1234   }
1235 
1236   for (const SDUse &U : Op->ops())
1237     DAG.ExtractVectorElements(U.get(), Args);
1238 
1239   return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1240 }
1241 
1242 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
1243                                                      SelectionDAG &DAG) const {
1244 
1245   SmallVector<SDValue, 8> Args;
1246   unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1247   EVT VT = Op.getValueType();
1248   DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
1249                             VT.getVectorNumElements());
1250 
1251   return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1252 }
1253 
1254 /// Generate Min/Max node
1255 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT,
1256                                                    SDValue LHS, SDValue RHS,
1257                                                    SDValue True, SDValue False,
1258                                                    SDValue CC,
1259                                                    DAGCombinerInfo &DCI) const {
1260   if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
1261     return SDValue();
1262 
1263   SelectionDAG &DAG = DCI.DAG;
1264   ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1265   switch (CCOpcode) {
1266   case ISD::SETOEQ:
1267   case ISD::SETONE:
1268   case ISD::SETUNE:
1269   case ISD::SETNE:
1270   case ISD::SETUEQ:
1271   case ISD::SETEQ:
1272   case ISD::SETFALSE:
1273   case ISD::SETFALSE2:
1274   case ISD::SETTRUE:
1275   case ISD::SETTRUE2:
1276   case ISD::SETUO:
1277   case ISD::SETO:
1278     break;
1279   case ISD::SETULE:
1280   case ISD::SETULT: {
1281     if (LHS == True)
1282       return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1283     return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1284   }
1285   case ISD::SETOLE:
1286   case ISD::SETOLT:
1287   case ISD::SETLE:
1288   case ISD::SETLT: {
1289     // Ordered. Assume ordered for undefined.
1290 
1291     // Only do this after legalization to avoid interfering with other combines
1292     // which might occur.
1293     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1294         !DCI.isCalledByLegalizer())
1295       return SDValue();
1296 
1297     // We need to permute the operands to get the correct NaN behavior. The
1298     // selected operand is the second one based on the failing compare with NaN,
1299     // so permute it based on the compare type the hardware uses.
1300     if (LHS == True)
1301       return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1302     return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1303   }
1304   case ISD::SETUGE:
1305   case ISD::SETUGT: {
1306     if (LHS == True)
1307       return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1308     return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1309   }
1310   case ISD::SETGT:
1311   case ISD::SETGE:
1312   case ISD::SETOGE:
1313   case ISD::SETOGT: {
1314     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1315         !DCI.isCalledByLegalizer())
1316       return SDValue();
1317 
1318     if (LHS == True)
1319       return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1320     return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1321   }
1322   case ISD::SETCC_INVALID:
1323     llvm_unreachable("Invalid setcc condcode!");
1324   }
1325   return SDValue();
1326 }
1327 
1328 std::pair<SDValue, SDValue>
1329 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
1330   SDLoc SL(Op);
1331 
1332   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1333 
1334   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1335   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1336 
1337   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1338   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1339 
1340   return std::make_pair(Lo, Hi);
1341 }
1342 
1343 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
1344   SDLoc SL(Op);
1345 
1346   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1347   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1348   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1349 }
1350 
1351 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
1352   SDLoc SL(Op);
1353 
1354   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1355   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1356   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1357 }
1358 
1359 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1360                                               SelectionDAG &DAG) const {
1361   LoadSDNode *Load = cast<LoadSDNode>(Op);
1362   EVT VT = Op.getValueType();
1363 
1364 
1365   // If this is a 2 element vector, we really want to scalarize and not create
1366   // weird 1 element vectors.
1367   if (VT.getVectorNumElements() == 2)
1368     return scalarizeVectorLoad(Load, DAG);
1369 
1370   SDValue BasePtr = Load->getBasePtr();
1371   EVT MemVT = Load->getMemoryVT();
1372   SDLoc SL(Op);
1373 
1374   const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1375 
1376   EVT LoVT, HiVT;
1377   EVT LoMemVT, HiMemVT;
1378   SDValue Lo, Hi;
1379 
1380   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1381   std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1382   std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT);
1383 
1384   unsigned Size = LoMemVT.getStoreSize();
1385   unsigned BaseAlign = Load->getAlignment();
1386   unsigned HiAlign = MinAlign(BaseAlign, Size);
1387 
1388   SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1389                                   Load->getChain(), BasePtr, SrcValue, LoMemVT,
1390                                   BaseAlign, Load->getMemOperand()->getFlags());
1391   SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, Size);
1392   SDValue HiLoad =
1393       DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
1394                      HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1395                      HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
1396 
1397   SDValue Ops[] = {
1398     DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad),
1399     DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1400                 LoLoad.getValue(1), HiLoad.getValue(1))
1401   };
1402 
1403   return DAG.getMergeValues(Ops, SL);
1404 }
1405 
1406 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1407                                                SelectionDAG &DAG) const {
1408   StoreSDNode *Store = cast<StoreSDNode>(Op);
1409   SDValue Val = Store->getValue();
1410   EVT VT = Val.getValueType();
1411 
1412   // If this is a 2 element vector, we really want to scalarize and not create
1413   // weird 1 element vectors.
1414   if (VT.getVectorNumElements() == 2)
1415     return scalarizeVectorStore(Store, DAG);
1416 
1417   EVT MemVT = Store->getMemoryVT();
1418   SDValue Chain = Store->getChain();
1419   SDValue BasePtr = Store->getBasePtr();
1420   SDLoc SL(Op);
1421 
1422   EVT LoVT, HiVT;
1423   EVT LoMemVT, HiMemVT;
1424   SDValue Lo, Hi;
1425 
1426   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1427   std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1428   std::tie(Lo, Hi) = DAG.SplitVector(Val, SL, LoVT, HiVT);
1429 
1430   SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize());
1431 
1432   const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1433   unsigned BaseAlign = Store->getAlignment();
1434   unsigned Size = LoMemVT.getStoreSize();
1435   unsigned HiAlign = MinAlign(BaseAlign, Size);
1436 
1437   SDValue LoStore =
1438       DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1439                         Store->getMemOperand()->getFlags());
1440   SDValue HiStore =
1441       DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size),
1442                         HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
1443 
1444   return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1445 }
1446 
1447 // This is a shortcut for integer division because we have fast i32<->f32
1448 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1449 // float is enough to accurately represent up to a 24-bit signed integer.
1450 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG,
1451                                             bool Sign) const {
1452   SDLoc DL(Op);
1453   EVT VT = Op.getValueType();
1454   SDValue LHS = Op.getOperand(0);
1455   SDValue RHS = Op.getOperand(1);
1456   MVT IntVT = MVT::i32;
1457   MVT FltVT = MVT::f32;
1458 
1459   unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS);
1460   if (LHSSignBits < 9)
1461     return SDValue();
1462 
1463   unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS);
1464   if (RHSSignBits < 9)
1465     return SDValue();
1466 
1467   unsigned BitSize = VT.getSizeInBits();
1468   unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1469   unsigned DivBits = BitSize - SignBits;
1470   if (Sign)
1471     ++DivBits;
1472 
1473   ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1474   ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1475 
1476   SDValue jq = DAG.getConstant(1, DL, IntVT);
1477 
1478   if (Sign) {
1479     // char|short jq = ia ^ ib;
1480     jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1481 
1482     // jq = jq >> (bitsize - 2)
1483     jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1484                      DAG.getConstant(BitSize - 2, DL, VT));
1485 
1486     // jq = jq | 0x1
1487     jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1488   }
1489 
1490   // int ia = (int)LHS;
1491   SDValue ia = LHS;
1492 
1493   // int ib, (int)RHS;
1494   SDValue ib = RHS;
1495 
1496   // float fa = (float)ia;
1497   SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1498 
1499   // float fb = (float)ib;
1500   SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1501 
1502   SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1503                            fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1504 
1505   // fq = trunc(fq);
1506   fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1507 
1508   // float fqneg = -fq;
1509   SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1510 
1511   // float fr = mad(fqneg, fb, fa);
1512   unsigned OpCode = Subtarget->hasFP32Denormals() ?
1513                     (unsigned)AMDGPUISD::FMAD_FTZ :
1514                     (unsigned)ISD::FMAD;
1515   SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa);
1516 
1517   // int iq = (int)fq;
1518   SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1519 
1520   // fr = fabs(fr);
1521   fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1522 
1523   // fb = fabs(fb);
1524   fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1525 
1526   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1527 
1528   // int cv = fr >= fb;
1529   SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1530 
1531   // jq = (cv ? jq : 0);
1532   jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1533 
1534   // dst = iq + jq;
1535   SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1536 
1537   // Rem needs compensation, it's easier to recompute it
1538   SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1539   Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1540 
1541   // Truncate to number of bits this divide really is.
1542   if (Sign) {
1543     SDValue InRegSize
1544       = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits));
1545     Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize);
1546     Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize);
1547   } else {
1548     SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
1549     Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask);
1550     Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask);
1551   }
1552 
1553   return DAG.getMergeValues({ Div, Rem }, DL);
1554 }
1555 
1556 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1557                                       SelectionDAG &DAG,
1558                                       SmallVectorImpl<SDValue> &Results) const {
1559   SDLoc DL(Op);
1560   EVT VT = Op.getValueType();
1561 
1562   assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64");
1563 
1564   EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1565 
1566   SDValue One = DAG.getConstant(1, DL, HalfVT);
1567   SDValue Zero = DAG.getConstant(0, DL, HalfVT);
1568 
1569   //HiLo split
1570   SDValue LHS = Op.getOperand(0);
1571   SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1572   SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, One);
1573 
1574   SDValue RHS = Op.getOperand(1);
1575   SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1576   SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, One);
1577 
1578   if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1579       DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1580 
1581     SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1582                               LHS_Lo, RHS_Lo);
1583 
1584     SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero});
1585     SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero});
1586 
1587     Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
1588     Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
1589     return;
1590   }
1591 
1592   if (isTypeLegal(MVT::i64)) {
1593     // Compute denominator reciprocal.
1594     unsigned FMAD = Subtarget->hasFP32Denormals() ?
1595                     (unsigned)AMDGPUISD::FMAD_FTZ :
1596                     (unsigned)ISD::FMAD;
1597 
1598     SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo);
1599     SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi);
1600     SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi,
1601       DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32),
1602       Cvt_Lo);
1603     SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1);
1604     SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp,
1605       DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32));
1606     SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1,
1607       DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32));
1608     SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2);
1609     SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc,
1610       DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32),
1611       Mul1);
1612     SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2);
1613     SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc);
1614     SDValue Rcp64 = DAG.getBitcast(VT,
1615                         DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi}));
1616 
1617     SDValue Zero64 = DAG.getConstant(0, DL, VT);
1618     SDValue One64  = DAG.getConstant(1, DL, VT);
1619     SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1);
1620     SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1);
1621 
1622     SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS);
1623     SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64);
1624     SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1);
1625     SDValue Mulhi1_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1,
1626                                     Zero);
1627     SDValue Mulhi1_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1,
1628                                     One);
1629 
1630     SDValue Add1_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Lo,
1631                                   Mulhi1_Lo, Zero1);
1632     SDValue Add1_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Hi,
1633                                   Mulhi1_Hi, Add1_Lo.getValue(1));
1634     SDValue Add1_HiNc = DAG.getNode(ISD::ADD, DL, HalfVT, Rcp_Hi, Mulhi1_Hi);
1635     SDValue Add1 = DAG.getBitcast(VT,
1636                         DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi}));
1637 
1638     SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1);
1639     SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2);
1640     SDValue Mulhi2_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2,
1641                                     Zero);
1642     SDValue Mulhi2_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2,
1643                                     One);
1644 
1645     SDValue Add2_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Lo,
1646                                   Mulhi2_Lo, Zero1);
1647     SDValue Add2_HiC = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_HiNc,
1648                                    Mulhi2_Hi, Add1_Lo.getValue(1));
1649     SDValue Add2_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add2_HiC,
1650                                   Zero, Add2_Lo.getValue(1));
1651     SDValue Add2 = DAG.getBitcast(VT,
1652                         DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi}));
1653     SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2);
1654 
1655     SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3);
1656 
1657     SDValue Mul3_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, Zero);
1658     SDValue Mul3_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, One);
1659     SDValue Sub1_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Lo,
1660                                   Mul3_Lo, Zero1);
1661     SDValue Sub1_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Hi,
1662                                   Mul3_Hi, Sub1_Lo.getValue(1));
1663     SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi);
1664     SDValue Sub1 = DAG.getBitcast(VT,
1665                         DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi}));
1666 
1667     SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT);
1668     SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero,
1669                                  ISD::SETUGE);
1670     SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero,
1671                                  ISD::SETUGE);
1672     SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ);
1673 
1674     // TODO: Here and below portions of the code can be enclosed into if/endif.
1675     // Currently control flow is unconditional and we have 4 selects after
1676     // potential endif to substitute PHIs.
1677 
1678     // if C3 != 0 ...
1679     SDValue Sub2_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Lo,
1680                                   RHS_Lo, Zero1);
1681     SDValue Sub2_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Mi,
1682                                   RHS_Hi, Sub1_Lo.getValue(1));
1683     SDValue Sub2_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1684                                   Zero, Sub2_Lo.getValue(1));
1685     SDValue Sub2 = DAG.getBitcast(VT,
1686                         DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi}));
1687 
1688     SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64);
1689 
1690     SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero,
1691                                  ISD::SETUGE);
1692     SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero,
1693                                  ISD::SETUGE);
1694     SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ);
1695 
1696     // if (C6 != 0)
1697     SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64);
1698 
1699     SDValue Sub3_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Lo,
1700                                   RHS_Lo, Zero1);
1701     SDValue Sub3_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1702                                   RHS_Hi, Sub2_Lo.getValue(1));
1703     SDValue Sub3_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub3_Mi,
1704                                   Zero, Sub3_Lo.getValue(1));
1705     SDValue Sub3 = DAG.getBitcast(VT,
1706                         DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi}));
1707 
1708     // endif C6
1709     // endif C3
1710 
1711     SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE);
1712     SDValue Div  = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE);
1713 
1714     SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE);
1715     SDValue Rem  = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE);
1716 
1717     Results.push_back(Div);
1718     Results.push_back(Rem);
1719 
1720     return;
1721   }
1722 
1723   // r600 expandion.
1724   // Get Speculative values
1725   SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
1726   SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
1727 
1728   SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ);
1729   SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero});
1730   REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
1731 
1732   SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ);
1733   SDValue DIV_Lo = Zero;
1734 
1735   const unsigned halfBitWidth = HalfVT.getSizeInBits();
1736 
1737   for (unsigned i = 0; i < halfBitWidth; ++i) {
1738     const unsigned bitPos = halfBitWidth - i - 1;
1739     SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
1740     // Get value of high bit
1741     SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
1742     HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One);
1743     HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
1744 
1745     // Shift
1746     REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
1747     // Add LHS high bit
1748     REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
1749 
1750     SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
1751     SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE);
1752 
1753     DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
1754 
1755     // Update REM
1756     SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
1757     REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
1758   }
1759 
1760   SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
1761   DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
1762   Results.push_back(DIV);
1763   Results.push_back(REM);
1764 }
1765 
1766 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1767                                            SelectionDAG &DAG) const {
1768   SDLoc DL(Op);
1769   EVT VT = Op.getValueType();
1770 
1771   if (VT == MVT::i64) {
1772     SmallVector<SDValue, 2> Results;
1773     LowerUDIVREM64(Op, DAG, Results);
1774     return DAG.getMergeValues(Results, DL);
1775   }
1776 
1777   if (VT == MVT::i32) {
1778     if (SDValue Res = LowerDIVREM24(Op, DAG, false))
1779       return Res;
1780   }
1781 
1782   SDValue Num = Op.getOperand(0);
1783   SDValue Den = Op.getOperand(1);
1784 
1785   // RCP =  URECIP(Den) = 2^32 / Den + e
1786   // e is rounding error.
1787   SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1788 
1789   // RCP_LO = mul(RCP, Den) */
1790   SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den);
1791 
1792   // RCP_HI = mulhu (RCP, Den) */
1793   SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1794 
1795   // NEG_RCP_LO = -RCP_LO
1796   SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
1797                                                      RCP_LO);
1798 
1799   // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
1800   SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1801                                            NEG_RCP_LO, RCP_LO,
1802                                            ISD::SETEQ);
1803   // Calculate the rounding error from the URECIP instruction
1804   // E = mulhu(ABS_RCP_LO, RCP)
1805   SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1806 
1807   // RCP_A_E = RCP + E
1808   SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1809 
1810   // RCP_S_E = RCP - E
1811   SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1812 
1813   // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
1814   SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1815                                      RCP_A_E, RCP_S_E,
1816                                      ISD::SETEQ);
1817   // Quotient = mulhu(Tmp0, Num)
1818   SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1819 
1820   // Num_S_Remainder = Quotient * Den
1821   SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den);
1822 
1823   // Remainder = Num - Num_S_Remainder
1824   SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1825 
1826   // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1827   SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1828                                                  DAG.getConstant(-1, DL, VT),
1829                                                  DAG.getConstant(0, DL, VT),
1830                                                  ISD::SETUGE);
1831   // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1832   SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1833                                                   Num_S_Remainder,
1834                                                   DAG.getConstant(-1, DL, VT),
1835                                                   DAG.getConstant(0, DL, VT),
1836                                                   ISD::SETUGE);
1837   // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1838   SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1839                                                Remainder_GE_Zero);
1840 
1841   // Calculate Division result:
1842 
1843   // Quotient_A_One = Quotient + 1
1844   SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1845                                        DAG.getConstant(1, DL, VT));
1846 
1847   // Quotient_S_One = Quotient - 1
1848   SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1849                                        DAG.getConstant(1, DL, VT));
1850 
1851   // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1852   SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1853                                      Quotient, Quotient_A_One, ISD::SETEQ);
1854 
1855   // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1856   Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1857                             Quotient_S_One, Div, ISD::SETEQ);
1858 
1859   // Calculate Rem result:
1860 
1861   // Remainder_S_Den = Remainder - Den
1862   SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1863 
1864   // Remainder_A_Den = Remainder + Den
1865   SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1866 
1867   // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
1868   SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1869                                     Remainder, Remainder_S_Den, ISD::SETEQ);
1870 
1871   // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
1872   Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1873                             Remainder_A_Den, Rem, ISD::SETEQ);
1874   SDValue Ops[2] = {
1875     Div,
1876     Rem
1877   };
1878   return DAG.getMergeValues(Ops, DL);
1879 }
1880 
1881 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
1882                                            SelectionDAG &DAG) const {
1883   SDLoc DL(Op);
1884   EVT VT = Op.getValueType();
1885 
1886   SDValue LHS = Op.getOperand(0);
1887   SDValue RHS = Op.getOperand(1);
1888 
1889   SDValue Zero = DAG.getConstant(0, DL, VT);
1890   SDValue NegOne = DAG.getConstant(-1, DL, VT);
1891 
1892   if (VT == MVT::i32) {
1893     if (SDValue Res = LowerDIVREM24(Op, DAG, true))
1894       return Res;
1895   }
1896 
1897   if (VT == MVT::i64 &&
1898       DAG.ComputeNumSignBits(LHS) > 32 &&
1899       DAG.ComputeNumSignBits(RHS) > 32) {
1900     EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1901 
1902     //HiLo split
1903     SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1904     SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1905     SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1906                                  LHS_Lo, RHS_Lo);
1907     SDValue Res[2] = {
1908       DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
1909       DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
1910     };
1911     return DAG.getMergeValues(Res, DL);
1912   }
1913 
1914   SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
1915   SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
1916   SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
1917   SDValue RSign = LHSign; // Remainder sign is the same as LHS
1918 
1919   LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
1920   RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
1921 
1922   LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
1923   RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
1924 
1925   SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
1926   SDValue Rem = Div.getValue(1);
1927 
1928   Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
1929   Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
1930 
1931   Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
1932   Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
1933 
1934   SDValue Res[2] = {
1935     Div,
1936     Rem
1937   };
1938   return DAG.getMergeValues(Res, DL);
1939 }
1940 
1941 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y))
1942 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
1943   SDLoc SL(Op);
1944   EVT VT = Op.getValueType();
1945   SDValue X = Op.getOperand(0);
1946   SDValue Y = Op.getOperand(1);
1947 
1948   // TODO: Should this propagate fast-math-flags?
1949 
1950   SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y);
1951   SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div);
1952   SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y);
1953 
1954   return DAG.getNode(ISD::FSUB, SL, VT, X, Mul);
1955 }
1956 
1957 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
1958   SDLoc SL(Op);
1959   SDValue Src = Op.getOperand(0);
1960 
1961   // result = trunc(src)
1962   // if (src > 0.0 && src != result)
1963   //   result += 1.0
1964 
1965   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1966 
1967   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
1968   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
1969 
1970   EVT SetCCVT =
1971       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
1972 
1973   SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
1974   SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1975   SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1976 
1977   SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
1978   // TODO: Should this propagate fast-math-flags?
1979   return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1980 }
1981 
1982 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
1983                                   SelectionDAG &DAG) {
1984   const unsigned FractBits = 52;
1985   const unsigned ExpBits = 11;
1986 
1987   SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
1988                                 Hi,
1989                                 DAG.getConstant(FractBits - 32, SL, MVT::i32),
1990                                 DAG.getConstant(ExpBits, SL, MVT::i32));
1991   SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
1992                             DAG.getConstant(1023, SL, MVT::i32));
1993 
1994   return Exp;
1995 }
1996 
1997 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
1998   SDLoc SL(Op);
1999   SDValue Src = Op.getOperand(0);
2000 
2001   assert(Op.getValueType() == MVT::f64);
2002 
2003   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2004   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2005 
2006   SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2007 
2008   // Extract the upper half, since this is where we will find the sign and
2009   // exponent.
2010   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
2011 
2012   SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2013 
2014   const unsigned FractBits = 52;
2015 
2016   // Extract the sign bit.
2017   const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
2018   SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
2019 
2020   // Extend back to 64-bits.
2021   SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
2022   SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
2023 
2024   SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
2025   const SDValue FractMask
2026     = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2027 
2028   SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
2029   SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
2030   SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
2031 
2032   EVT SetCCVT =
2033       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2034 
2035   const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
2036 
2037   SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2038   SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2039 
2040   SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
2041   SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
2042 
2043   return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
2044 }
2045 
2046 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
2047   SDLoc SL(Op);
2048   SDValue Src = Op.getOperand(0);
2049 
2050   assert(Op.getValueType() == MVT::f64);
2051 
2052   APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
2053   SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
2054   SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
2055 
2056   // TODO: Should this propagate fast-math-flags?
2057 
2058   SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
2059   SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
2060 
2061   SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
2062 
2063   APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
2064   SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
2065 
2066   EVT SetCCVT =
2067       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2068   SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
2069 
2070   return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
2071 }
2072 
2073 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
2074   // FNEARBYINT and FRINT are the same, except in their handling of FP
2075   // exceptions. Those aren't really meaningful for us, and OpenCL only has
2076   // rint, so just treat them as equivalent.
2077   return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
2078 }
2079 
2080 // XXX - May require not supporting f32 denormals?
2081 
2082 // Don't handle v2f16. The extra instructions to scalarize and repack around the
2083 // compare and vselect end up producing worse code than scalarizing the whole
2084 // operation.
2085 SDValue AMDGPUTargetLowering::LowerFROUND32_16(SDValue Op, SelectionDAG &DAG) const {
2086   SDLoc SL(Op);
2087   SDValue X = Op.getOperand(0);
2088   EVT VT = Op.getValueType();
2089 
2090   SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X);
2091 
2092   // TODO: Should this propagate fast-math-flags?
2093 
2094   SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T);
2095 
2096   SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff);
2097 
2098   const SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2099   const SDValue One = DAG.getConstantFP(1.0, SL, VT);
2100   const SDValue Half = DAG.getConstantFP(0.5, SL, VT);
2101 
2102   SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X);
2103 
2104   EVT SetCCVT =
2105       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2106 
2107   SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
2108 
2109   SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero);
2110 
2111   return DAG.getNode(ISD::FADD, SL, VT, T, Sel);
2112 }
2113 
2114 SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const {
2115   SDLoc SL(Op);
2116   SDValue X = Op.getOperand(0);
2117 
2118   SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X);
2119 
2120   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2121   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2122   const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32);
2123   const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32);
2124   EVT SetCCVT =
2125       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2126 
2127   SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
2128 
2129   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One);
2130 
2131   SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2132 
2133   const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL,
2134                                        MVT::i64);
2135 
2136   SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp);
2137   SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64,
2138                           DAG.getConstant(INT64_C(0x0008000000000000), SL,
2139                                           MVT::i64),
2140                           Exp);
2141 
2142   SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M);
2143   SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT,
2144                               DAG.getConstant(0, SL, MVT::i64), Tmp0,
2145                               ISD::SETNE);
2146 
2147   SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1,
2148                              D, DAG.getConstant(0, SL, MVT::i64));
2149   SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2);
2150 
2151   K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64));
2152   K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K);
2153 
2154   SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2155   SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2156   SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ);
2157 
2158   SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64,
2159                             ExpEqNegOne,
2160                             DAG.getConstantFP(1.0, SL, MVT::f64),
2161                             DAG.getConstantFP(0.0, SL, MVT::f64));
2162 
2163   SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X);
2164 
2165   K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K);
2166   K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K);
2167 
2168   return K;
2169 }
2170 
2171 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2172   EVT VT = Op.getValueType();
2173 
2174   if (VT == MVT::f32 || VT == MVT::f16)
2175     return LowerFROUND32_16(Op, DAG);
2176 
2177   if (VT == MVT::f64)
2178     return LowerFROUND64(Op, DAG);
2179 
2180   llvm_unreachable("unhandled type");
2181 }
2182 
2183 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
2184   SDLoc SL(Op);
2185   SDValue Src = Op.getOperand(0);
2186 
2187   // result = trunc(src);
2188   // if (src < 0.0 && src != result)
2189   //   result += -1.0.
2190 
2191   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2192 
2193   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2194   const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
2195 
2196   EVT SetCCVT =
2197       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2198 
2199   SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
2200   SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2201   SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2202 
2203   SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
2204   // TODO: Should this propagate fast-math-flags?
2205   return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2206 }
2207 
2208 SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG,
2209                                         double Log2BaseInverted) const {
2210   EVT VT = Op.getValueType();
2211 
2212   SDLoc SL(Op);
2213   SDValue Operand = Op.getOperand(0);
2214   SDValue Log2Operand = DAG.getNode(ISD::FLOG2, SL, VT, Operand);
2215   SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT);
2216 
2217   return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand);
2218 }
2219 
2220 static bool isCtlzOpc(unsigned Opc) {
2221   return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
2222 }
2223 
2224 static bool isCttzOpc(unsigned Opc) {
2225   return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF;
2226 }
2227 
2228 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const {
2229   SDLoc SL(Op);
2230   SDValue Src = Op.getOperand(0);
2231   bool ZeroUndef = Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
2232                    Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF;
2233 
2234   unsigned ISDOpc, NewOpc;
2235   if (isCtlzOpc(Op.getOpcode())) {
2236     ISDOpc = ISD::CTLZ_ZERO_UNDEF;
2237     NewOpc = AMDGPUISD::FFBH_U32;
2238   } else if (isCttzOpc(Op.getOpcode())) {
2239     ISDOpc = ISD::CTTZ_ZERO_UNDEF;
2240     NewOpc = AMDGPUISD::FFBL_B32;
2241   } else
2242     llvm_unreachable("Unexpected OPCode!!!");
2243 
2244 
2245   if (ZeroUndef && Src.getValueType() == MVT::i32)
2246     return DAG.getNode(NewOpc, SL, MVT::i32, Src);
2247 
2248   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2249 
2250   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2251   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2252 
2253   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
2254   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
2255 
2256   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2257                                    *DAG.getContext(), MVT::i32);
2258 
2259   SDValue HiOrLo = isCtlzOpc(Op.getOpcode()) ? Hi : Lo;
2260   SDValue Hi0orLo0 = DAG.getSetCC(SL, SetCCVT, HiOrLo, Zero, ISD::SETEQ);
2261 
2262   SDValue OprLo = DAG.getNode(ISDOpc, SL, MVT::i32, Lo);
2263   SDValue OprHi = DAG.getNode(ISDOpc, SL, MVT::i32, Hi);
2264 
2265   const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32);
2266   SDValue Add, NewOpr;
2267   if (isCtlzOpc(Op.getOpcode())) {
2268     Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprLo, Bits32);
2269     // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x))
2270     NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprHi);
2271   } else {
2272     Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprHi, Bits32);
2273     // cttz(x) = lo_32(x) == 0 ? cttz(hi_32(x)) + 32 : cttz(lo_32(x))
2274     NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprLo);
2275   }
2276 
2277   if (!ZeroUndef) {
2278     // Test if the full 64-bit input is zero.
2279 
2280     // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32,
2281     // which we probably don't want.
2282     SDValue LoOrHi = isCtlzOpc(Op.getOpcode()) ? Lo : Hi;
2283     SDValue Lo0OrHi0 = DAG.getSetCC(SL, SetCCVT, LoOrHi, Zero, ISD::SETEQ);
2284     SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0OrHi0, Hi0orLo0);
2285 
2286     // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction
2287     // with the same cycles, otherwise it is slower.
2288     // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src,
2289     // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ);
2290 
2291     const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32);
2292 
2293     // The instruction returns -1 for 0 input, but the defined intrinsic
2294     // behavior is to return the number of bits.
2295     NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32,
2296                          SrcIsZero, Bits32, NewOpr);
2297   }
2298 
2299   return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr);
2300 }
2301 
2302 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
2303                                                bool Signed) const {
2304   // Unsigned
2305   // cul2f(ulong u)
2306   //{
2307   //  uint lz = clz(u);
2308   //  uint e = (u != 0) ? 127U + 63U - lz : 0;
2309   //  u = (u << lz) & 0x7fffffffffffffffUL;
2310   //  ulong t = u & 0xffffffffffUL;
2311   //  uint v = (e << 23) | (uint)(u >> 40);
2312   //  uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U);
2313   //  return as_float(v + r);
2314   //}
2315   // Signed
2316   // cl2f(long l)
2317   //{
2318   //  long s = l >> 63;
2319   //  float r = cul2f((l + s) ^ s);
2320   //  return s ? -r : r;
2321   //}
2322 
2323   SDLoc SL(Op);
2324   SDValue Src = Op.getOperand(0);
2325   SDValue L = Src;
2326 
2327   SDValue S;
2328   if (Signed) {
2329     const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64);
2330     S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit);
2331 
2332     SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S);
2333     L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S);
2334   }
2335 
2336   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2337                                    *DAG.getContext(), MVT::f32);
2338 
2339 
2340   SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32);
2341   SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64);
2342   SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L);
2343   LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ);
2344 
2345   SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32);
2346   SDValue E = DAG.getSelect(SL, MVT::i32,
2347     DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE),
2348     DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ),
2349     ZeroI32);
2350 
2351   SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64,
2352     DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ),
2353     DAG.getConstant((-1ULL) >> 1, SL, MVT::i64));
2354 
2355   SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U,
2356                           DAG.getConstant(0xffffffffffULL, SL, MVT::i64));
2357 
2358   SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64,
2359                              U, DAG.getConstant(40, SL, MVT::i64));
2360 
2361   SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32,
2362     DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)),
2363     DAG.getNode(ISD::TRUNCATE, SL, MVT::i32,  UShl));
2364 
2365   SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64);
2366   SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT);
2367   SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ);
2368 
2369   SDValue One = DAG.getConstant(1, SL, MVT::i32);
2370 
2371   SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One);
2372 
2373   SDValue R = DAG.getSelect(SL, MVT::i32,
2374     RCmp,
2375     One,
2376     DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32));
2377   R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R);
2378   R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R);
2379 
2380   if (!Signed)
2381     return R;
2382 
2383   SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R);
2384   return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R);
2385 }
2386 
2387 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
2388                                                bool Signed) const {
2389   SDLoc SL(Op);
2390   SDValue Src = Op.getOperand(0);
2391 
2392   SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2393 
2394   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2395                            DAG.getConstant(0, SL, MVT::i32));
2396   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2397                            DAG.getConstant(1, SL, MVT::i32));
2398 
2399   SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
2400                               SL, MVT::f64, Hi);
2401 
2402   SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
2403 
2404   SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
2405                               DAG.getConstant(32, SL, MVT::i32));
2406   // TODO: Should this propagate fast-math-flags?
2407   return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
2408 }
2409 
2410 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
2411                                                SelectionDAG &DAG) const {
2412   assert(Op.getOperand(0).getValueType() == MVT::i64 &&
2413          "operation should be legal");
2414 
2415   // TODO: Factor out code common with LowerSINT_TO_FP.
2416 
2417   EVT DestVT = Op.getValueType();
2418   if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2419     SDLoc DL(Op);
2420     SDValue Src = Op.getOperand(0);
2421 
2422     SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2423     SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2424     SDValue FPRound =
2425         DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2426 
2427     return FPRound;
2428   }
2429 
2430   if (DestVT == MVT::f32)
2431     return LowerINT_TO_FP32(Op, DAG, false);
2432 
2433   assert(DestVT == MVT::f64);
2434   return LowerINT_TO_FP64(Op, DAG, false);
2435 }
2436 
2437 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
2438                                               SelectionDAG &DAG) const {
2439   assert(Op.getOperand(0).getValueType() == MVT::i64 &&
2440          "operation should be legal");
2441 
2442   // TODO: Factor out code common with LowerUINT_TO_FP.
2443 
2444   EVT DestVT = Op.getValueType();
2445   if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2446     SDLoc DL(Op);
2447     SDValue Src = Op.getOperand(0);
2448 
2449     SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2450     SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2451     SDValue FPRound =
2452         DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2453 
2454     return FPRound;
2455   }
2456 
2457   if (DestVT == MVT::f32)
2458     return LowerINT_TO_FP32(Op, DAG, true);
2459 
2460   assert(DestVT == MVT::f64);
2461   return LowerINT_TO_FP64(Op, DAG, true);
2462 }
2463 
2464 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
2465                                                bool Signed) const {
2466   SDLoc SL(Op);
2467 
2468   SDValue Src = Op.getOperand(0);
2469 
2470   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2471 
2472   SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL,
2473                                  MVT::f64);
2474   SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL,
2475                                  MVT::f64);
2476   // TODO: Should this propagate fast-math-flags?
2477   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0);
2478 
2479   SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul);
2480 
2481 
2482   SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc);
2483 
2484   SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL,
2485                            MVT::i32, FloorMul);
2486   SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
2487 
2488   SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi});
2489 
2490   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
2491 }
2492 
2493 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const {
2494   SDLoc DL(Op);
2495   SDValue N0 = Op.getOperand(0);
2496 
2497   // Convert to target node to get known bits
2498   if (N0.getValueType() == MVT::f32)
2499     return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
2500 
2501   if (getTargetMachine().Options.UnsafeFPMath) {
2502     // There is a generic expand for FP_TO_FP16 with unsafe fast math.
2503     return SDValue();
2504   }
2505 
2506   assert(N0.getSimpleValueType() == MVT::f64);
2507 
2508   // f64 -> f16 conversion using round-to-nearest-even rounding mode.
2509   const unsigned ExpMask = 0x7ff;
2510   const unsigned ExpBiasf64 = 1023;
2511   const unsigned ExpBiasf16 = 15;
2512   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
2513   SDValue One = DAG.getConstant(1, DL, MVT::i32);
2514   SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0);
2515   SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U,
2516                            DAG.getConstant(32, DL, MVT::i64));
2517   UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32);
2518   U = DAG.getZExtOrTrunc(U, DL, MVT::i32);
2519   SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2520                           DAG.getConstant(20, DL, MVT::i64));
2521   E = DAG.getNode(ISD::AND, DL, MVT::i32, E,
2522                   DAG.getConstant(ExpMask, DL, MVT::i32));
2523   // Subtract the fp64 exponent bias (1023) to get the real exponent and
2524   // add the f16 bias (15) to get the biased exponent for the f16 format.
2525   E = DAG.getNode(ISD::ADD, DL, MVT::i32, E,
2526                   DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32));
2527 
2528   SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2529                           DAG.getConstant(8, DL, MVT::i32));
2530   M = DAG.getNode(ISD::AND, DL, MVT::i32, M,
2531                   DAG.getConstant(0xffe, DL, MVT::i32));
2532 
2533   SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH,
2534                                   DAG.getConstant(0x1ff, DL, MVT::i32));
2535   MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U);
2536 
2537   SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ);
2538   M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set);
2539 
2540   // (M != 0 ? 0x0200 : 0) | 0x7c00;
2541   SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32,
2542       DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32),
2543                       Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32));
2544 
2545   // N = M | (E << 12);
2546   SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2547       DAG.getNode(ISD::SHL, DL, MVT::i32, E,
2548                   DAG.getConstant(12, DL, MVT::i32)));
2549 
2550   // B = clamp(1-E, 0, 13);
2551   SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32,
2552                                   One, E);
2553   SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero);
2554   B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B,
2555                   DAG.getConstant(13, DL, MVT::i32));
2556 
2557   SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2558                                    DAG.getConstant(0x1000, DL, MVT::i32));
2559 
2560   SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B);
2561   SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B);
2562   SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE);
2563   D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1);
2564 
2565   SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT);
2566   SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V,
2567                               DAG.getConstant(0x7, DL, MVT::i32));
2568   V = DAG.getNode(ISD::SRL, DL, MVT::i32, V,
2569                   DAG.getConstant(2, DL, MVT::i32));
2570   SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32),
2571                                One, Zero, ISD::SETEQ);
2572   SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32),
2573                                One, Zero, ISD::SETGT);
2574   V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1);
2575   V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1);
2576 
2577   V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32),
2578                       DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT);
2579   V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32),
2580                       I, V, ISD::SETEQ);
2581 
2582   // Extract the sign bit.
2583   SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2584                             DAG.getConstant(16, DL, MVT::i32));
2585   Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign,
2586                      DAG.getConstant(0x8000, DL, MVT::i32));
2587 
2588   V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V);
2589   return DAG.getZExtOrTrunc(V, DL, Op.getValueType());
2590 }
2591 
2592 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op,
2593                                               SelectionDAG &DAG) const {
2594   SDValue Src = Op.getOperand(0);
2595 
2596   // TODO: Factor out code common with LowerFP_TO_UINT.
2597 
2598   EVT SrcVT = Src.getValueType();
2599   if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) {
2600     SDLoc DL(Op);
2601 
2602     SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
2603     SDValue FpToInt32 =
2604         DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend);
2605 
2606     return FpToInt32;
2607   }
2608 
2609   if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2610     return LowerFP64_TO_INT(Op, DAG, true);
2611 
2612   return SDValue();
2613 }
2614 
2615 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op,
2616                                               SelectionDAG &DAG) const {
2617   SDValue Src = Op.getOperand(0);
2618 
2619   // TODO: Factor out code common with LowerFP_TO_SINT.
2620 
2621   EVT SrcVT = Src.getValueType();
2622   if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) {
2623     SDLoc DL(Op);
2624 
2625     SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
2626     SDValue FpToInt32 =
2627         DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend);
2628 
2629     return FpToInt32;
2630   }
2631 
2632   if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2633     return LowerFP64_TO_INT(Op, DAG, false);
2634 
2635   return SDValue();
2636 }
2637 
2638 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2639                                                      SelectionDAG &DAG) const {
2640   EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2641   MVT VT = Op.getSimpleValueType();
2642   MVT ScalarVT = VT.getScalarType();
2643 
2644   assert(VT.isVector());
2645 
2646   SDValue Src = Op.getOperand(0);
2647   SDLoc DL(Op);
2648 
2649   // TODO: Don't scalarize on Evergreen?
2650   unsigned NElts = VT.getVectorNumElements();
2651   SmallVector<SDValue, 8> Args;
2652   DAG.ExtractVectorElements(Src, Args, 0, NElts);
2653 
2654   SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
2655   for (unsigned I = 0; I < NElts; ++I)
2656     Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
2657 
2658   return DAG.getBuildVector(VT, DL, Args);
2659 }
2660 
2661 //===----------------------------------------------------------------------===//
2662 // Custom DAG optimizations
2663 //===----------------------------------------------------------------------===//
2664 
2665 static bool isU24(SDValue Op, SelectionDAG &DAG) {
2666   return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24;
2667 }
2668 
2669 static bool isI24(SDValue Op, SelectionDAG &DAG) {
2670   EVT VT = Op.getValueType();
2671   return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
2672                                      // as unsigned 24-bit values.
2673     AMDGPUTargetLowering::numBitsSigned(Op, DAG) < 24;
2674 }
2675 
2676 static bool simplifyI24(SDNode *Node24, unsigned OpIdx,
2677                         TargetLowering::DAGCombinerInfo &DCI) {
2678 
2679   SelectionDAG &DAG = DCI.DAG;
2680   SDValue Op = Node24->getOperand(OpIdx);
2681   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2682   EVT VT = Op.getValueType();
2683 
2684   APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
2685   APInt KnownZero, KnownOne;
2686   TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
2687   if (TLI.SimplifyDemandedBits(Node24, OpIdx, Demanded, DCI, TLO))
2688     return true;
2689 
2690   return false;
2691 }
2692 
2693 template <typename IntTy>
2694 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
2695                                uint32_t Width, const SDLoc &DL) {
2696   if (Width + Offset < 32) {
2697     uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
2698     IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
2699     return DAG.getConstant(Result, DL, MVT::i32);
2700   }
2701 
2702   return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
2703 }
2704 
2705 static bool hasVolatileUser(SDNode *Val) {
2706   for (SDNode *U : Val->uses()) {
2707     if (MemSDNode *M = dyn_cast<MemSDNode>(U)) {
2708       if (M->isVolatile())
2709         return true;
2710     }
2711   }
2712 
2713   return false;
2714 }
2715 
2716 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const {
2717   // i32 vectors are the canonical memory type.
2718   if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT))
2719     return false;
2720 
2721   if (!VT.isByteSized())
2722     return false;
2723 
2724   unsigned Size = VT.getStoreSize();
2725 
2726   if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector())
2727     return false;
2728 
2729   if (Size == 3 || (Size > 4 && (Size % 4 != 0)))
2730     return false;
2731 
2732   return true;
2733 }
2734 
2735 // Replace load of an illegal type with a store of a bitcast to a friendlier
2736 // type.
2737 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
2738                                                  DAGCombinerInfo &DCI) const {
2739   if (!DCI.isBeforeLegalize())
2740     return SDValue();
2741 
2742   LoadSDNode *LN = cast<LoadSDNode>(N);
2743   if (LN->isVolatile() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
2744     return SDValue();
2745 
2746   SDLoc SL(N);
2747   SelectionDAG &DAG = DCI.DAG;
2748   EVT VT = LN->getMemoryVT();
2749 
2750   unsigned Size = VT.getStoreSize();
2751   unsigned Align = LN->getAlignment();
2752   if (Align < Size && isTypeLegal(VT)) {
2753     bool IsFast;
2754     unsigned AS = LN->getAddressSpace();
2755 
2756     // Expand unaligned loads earlier than legalization. Due to visitation order
2757     // problems during legalization, the emitted instructions to pack and unpack
2758     // the bytes again are not eliminated in the case of an unaligned copy.
2759     if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) {
2760       if (VT.isVector())
2761         return scalarizeVectorLoad(LN, DAG);
2762 
2763       SDValue Ops[2];
2764       std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
2765       return DAG.getMergeValues(Ops, SDLoc(N));
2766     }
2767 
2768     if (!IsFast)
2769       return SDValue();
2770   }
2771 
2772   if (!shouldCombineMemoryType(VT))
2773     return SDValue();
2774 
2775   EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2776 
2777   SDValue NewLoad
2778     = DAG.getLoad(NewVT, SL, LN->getChain(),
2779                   LN->getBasePtr(), LN->getMemOperand());
2780 
2781   SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad);
2782   DCI.CombineTo(N, BC, NewLoad.getValue(1));
2783   return SDValue(N, 0);
2784 }
2785 
2786 // Replace store of an illegal type with a store of a bitcast to a friendlier
2787 // type.
2788 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
2789                                                   DAGCombinerInfo &DCI) const {
2790   if (!DCI.isBeforeLegalize())
2791     return SDValue();
2792 
2793   StoreSDNode *SN = cast<StoreSDNode>(N);
2794   if (SN->isVolatile() || !ISD::isNormalStore(SN))
2795     return SDValue();
2796 
2797   EVT VT = SN->getMemoryVT();
2798   unsigned Size = VT.getStoreSize();
2799 
2800   SDLoc SL(N);
2801   SelectionDAG &DAG = DCI.DAG;
2802   unsigned Align = SN->getAlignment();
2803   if (Align < Size && isTypeLegal(VT)) {
2804     bool IsFast;
2805     unsigned AS = SN->getAddressSpace();
2806 
2807     // Expand unaligned stores earlier than legalization. Due to visitation
2808     // order problems during legalization, the emitted instructions to pack and
2809     // unpack the bytes again are not eliminated in the case of an unaligned
2810     // copy.
2811     if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) {
2812       if (VT.isVector())
2813         return scalarizeVectorStore(SN, DAG);
2814 
2815       return expandUnalignedStore(SN, DAG);
2816     }
2817 
2818     if (!IsFast)
2819       return SDValue();
2820   }
2821 
2822   if (!shouldCombineMemoryType(VT))
2823     return SDValue();
2824 
2825   EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2826   SDValue Val = SN->getValue();
2827 
2828   //DCI.AddToWorklist(Val.getNode());
2829 
2830   bool OtherUses = !Val.hasOneUse();
2831   SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val);
2832   if (OtherUses) {
2833     SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal);
2834     DAG.ReplaceAllUsesOfValueWith(Val, CastBack);
2835   }
2836 
2837   return DAG.getStore(SN->getChain(), SL, CastVal,
2838                       SN->getBasePtr(), SN->getMemOperand());
2839 }
2840 
2841 // FIXME: This should go in generic DAG combiner with an isTruncateFree check,
2842 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU
2843 // issues.
2844 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N,
2845                                                         DAGCombinerInfo &DCI) const {
2846   SelectionDAG &DAG = DCI.DAG;
2847   SDValue N0 = N->getOperand(0);
2848 
2849   // (vt2 (assertzext (truncate vt0:x), vt1)) ->
2850   //     (vt2 (truncate (assertzext vt0:x, vt1)))
2851   if (N0.getOpcode() == ISD::TRUNCATE) {
2852     SDValue N1 = N->getOperand(1);
2853     EVT ExtVT = cast<VTSDNode>(N1)->getVT();
2854     SDLoc SL(N);
2855 
2856     SDValue Src = N0.getOperand(0);
2857     EVT SrcVT = Src.getValueType();
2858     if (SrcVT.bitsGE(ExtVT)) {
2859       SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1);
2860       return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg);
2861     }
2862   }
2863 
2864   return SDValue();
2865 }
2866 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
2867 /// binary operation \p Opc to it with the corresponding constant operands.
2868 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl(
2869   DAGCombinerInfo &DCI, const SDLoc &SL,
2870   unsigned Opc, SDValue LHS,
2871   uint32_t ValLo, uint32_t ValHi) const {
2872   SelectionDAG &DAG = DCI.DAG;
2873   SDValue Lo, Hi;
2874   std::tie(Lo, Hi) = split64BitValue(LHS, DAG);
2875 
2876   SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32);
2877   SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32);
2878 
2879   SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS);
2880   SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS);
2881 
2882   // Re-visit the ands. It's possible we eliminated one of them and it could
2883   // simplify the vector.
2884   DCI.AddToWorklist(Lo.getNode());
2885   DCI.AddToWorklist(Hi.getNode());
2886 
2887   SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
2888   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
2889 }
2890 
2891 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
2892                                                 DAGCombinerInfo &DCI) const {
2893   EVT VT = N->getValueType(0);
2894 
2895   ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
2896   if (!RHS)
2897     return SDValue();
2898 
2899   SDValue LHS = N->getOperand(0);
2900   unsigned RHSVal = RHS->getZExtValue();
2901   if (!RHSVal)
2902     return LHS;
2903 
2904   SDLoc SL(N);
2905   SelectionDAG &DAG = DCI.DAG;
2906 
2907   switch (LHS->getOpcode()) {
2908   default:
2909     break;
2910   case ISD::ZERO_EXTEND:
2911   case ISD::SIGN_EXTEND:
2912   case ISD::ANY_EXTEND: {
2913     SDValue X = LHS->getOperand(0);
2914 
2915     if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 &&
2916         isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) {
2917       // Prefer build_vector as the canonical form if packed types are legal.
2918       // (shl ([asz]ext i16:x), 16 -> build_vector 0, x
2919       SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL,
2920        { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) });
2921       return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
2922     }
2923 
2924     // shl (ext x) => zext (shl x), if shift does not overflow int
2925     if (VT != MVT::i64)
2926       break;
2927     KnownBits Known;
2928     DAG.computeKnownBits(X, Known);
2929     unsigned LZ = Known.countMinLeadingZeros();
2930     if (LZ < RHSVal)
2931       break;
2932     EVT XVT = X.getValueType();
2933     SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
2934     return DAG.getZExtOrTrunc(Shl, SL, VT);
2935   }
2936   }
2937 
2938   if (VT != MVT::i64)
2939     return SDValue();
2940 
2941   // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
2942 
2943   // On some subtargets, 64-bit shift is a quarter rate instruction. In the
2944   // common case, splitting this into a move and a 32-bit shift is faster and
2945   // the same code size.
2946   if (RHSVal < 32)
2947     return SDValue();
2948 
2949   SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
2950 
2951   SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
2952   SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
2953 
2954   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2955 
2956   SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
2957   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
2958 }
2959 
2960 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
2961                                                 DAGCombinerInfo &DCI) const {
2962   if (N->getValueType(0) != MVT::i64)
2963     return SDValue();
2964 
2965   const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
2966   if (!RHS)
2967     return SDValue();
2968 
2969   SelectionDAG &DAG = DCI.DAG;
2970   SDLoc SL(N);
2971   unsigned RHSVal = RHS->getZExtValue();
2972 
2973   // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
2974   if (RHSVal == 32) {
2975     SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
2976     SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
2977                                    DAG.getConstant(31, SL, MVT::i32));
2978 
2979     SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
2980     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
2981   }
2982 
2983   // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
2984   if (RHSVal == 63) {
2985     SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
2986     SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
2987                                    DAG.getConstant(31, SL, MVT::i32));
2988     SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
2989     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
2990   }
2991 
2992   return SDValue();
2993 }
2994 
2995 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
2996                                                 DAGCombinerInfo &DCI) const {
2997   if (N->getValueType(0) != MVT::i64)
2998     return SDValue();
2999 
3000   const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3001   if (!RHS)
3002     return SDValue();
3003 
3004   unsigned ShiftAmt = RHS->getZExtValue();
3005   if (ShiftAmt < 32)
3006     return SDValue();
3007 
3008   // srl i64:x, C for C >= 32
3009   // =>
3010   //   build_pair (srl hi_32(x), C - 32), 0
3011 
3012   SelectionDAG &DAG = DCI.DAG;
3013   SDLoc SL(N);
3014 
3015   SDValue One = DAG.getConstant(1, SL, MVT::i32);
3016   SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3017 
3018   SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, N->getOperand(0));
3019   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32,
3020                            VecOp, One);
3021 
3022   SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
3023   SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
3024 
3025   SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
3026 
3027   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
3028 }
3029 
3030 SDValue AMDGPUTargetLowering::performTruncateCombine(
3031   SDNode *N, DAGCombinerInfo &DCI) const {
3032   SDLoc SL(N);
3033   SelectionDAG &DAG = DCI.DAG;
3034   EVT VT = N->getValueType(0);
3035   SDValue Src = N->getOperand(0);
3036 
3037   // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x)
3038   if (Src.getOpcode() == ISD::BITCAST) {
3039     SDValue Vec = Src.getOperand(0);
3040     if (Vec.getOpcode() == ISD::BUILD_VECTOR) {
3041       SDValue Elt0 = Vec.getOperand(0);
3042       EVT EltVT = Elt0.getValueType();
3043       if (VT.getSizeInBits() <= EltVT.getSizeInBits()) {
3044         if (EltVT.isFloatingPoint()) {
3045           Elt0 = DAG.getNode(ISD::BITCAST, SL,
3046                              EltVT.changeTypeToInteger(), Elt0);
3047         }
3048 
3049         return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0);
3050       }
3051     }
3052   }
3053 
3054   // Equivalent of above for accessing the high element of a vector as an
3055   // integer operation.
3056   // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y)
3057   if (Src.getOpcode() == ISD::SRL && !VT.isVector()) {
3058     if (auto K = isConstOrConstSplat(Src.getOperand(1))) {
3059       if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) {
3060         SDValue BV = stripBitcast(Src.getOperand(0));
3061         if (BV.getOpcode() == ISD::BUILD_VECTOR &&
3062             BV.getValueType().getVectorNumElements() == 2) {
3063           SDValue SrcElt = BV.getOperand(1);
3064           EVT SrcEltVT = SrcElt.getValueType();
3065           if (SrcEltVT.isFloatingPoint()) {
3066             SrcElt = DAG.getNode(ISD::BITCAST, SL,
3067                                  SrcEltVT.changeTypeToInteger(), SrcElt);
3068           }
3069 
3070           return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt);
3071         }
3072       }
3073     }
3074   }
3075 
3076   // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit.
3077   //
3078   // i16 (trunc (srl i64:x, K)), K <= 16 ->
3079   //     i16 (trunc (srl (i32 (trunc x), K)))
3080   if (VT.getScalarSizeInBits() < 32) {
3081     EVT SrcVT = Src.getValueType();
3082     if (SrcVT.getScalarSizeInBits() > 32 &&
3083         (Src.getOpcode() == ISD::SRL ||
3084          Src.getOpcode() == ISD::SRA ||
3085          Src.getOpcode() == ISD::SHL)) {
3086       SDValue Amt = Src.getOperand(1);
3087       KnownBits Known;
3088       DAG.computeKnownBits(Amt, Known);
3089       unsigned Size = VT.getScalarSizeInBits();
3090       if ((Known.isConstant() && Known.getConstant().ule(Size)) ||
3091           (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size))) {
3092         EVT MidVT = VT.isVector() ?
3093           EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3094                            VT.getVectorNumElements()) : MVT::i32;
3095 
3096         EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout());
3097         SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT,
3098                                     Src.getOperand(0));
3099         DCI.AddToWorklist(Trunc.getNode());
3100 
3101         if (Amt.getValueType() != NewShiftVT) {
3102           Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT);
3103           DCI.AddToWorklist(Amt.getNode());
3104         }
3105 
3106         SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT,
3107                                           Trunc, Amt);
3108         return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift);
3109       }
3110     }
3111   }
3112 
3113   return SDValue();
3114 }
3115 
3116 // We need to specifically handle i64 mul here to avoid unnecessary conversion
3117 // instructions. If we only match on the legalized i64 mul expansion,
3118 // SimplifyDemandedBits will be unable to remove them because there will be
3119 // multiple uses due to the separate mul + mulh[su].
3120 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL,
3121                         SDValue N0, SDValue N1, unsigned Size, bool Signed) {
3122   if (Size <= 32) {
3123     unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3124     return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1);
3125   }
3126 
3127   // Because we want to eliminate extension instructions before the
3128   // operation, we need to create a single user here (i.e. not the separate
3129   // mul_lo + mul_hi) so that SimplifyDemandedBits will deal with it.
3130 
3131   unsigned MulOpc = Signed ? AMDGPUISD::MUL_LOHI_I24 : AMDGPUISD::MUL_LOHI_U24;
3132 
3133   SDValue Mul = DAG.getNode(MulOpc, SL,
3134                             DAG.getVTList(MVT::i32, MVT::i32), N0, N1);
3135 
3136   return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64,
3137                      Mul.getValue(0), Mul.getValue(1));
3138 }
3139 
3140 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
3141                                                 DAGCombinerInfo &DCI) const {
3142   EVT VT = N->getValueType(0);
3143 
3144   unsigned Size = VT.getSizeInBits();
3145   if (VT.isVector() || Size > 64)
3146     return SDValue();
3147 
3148   // There are i16 integer mul/mad.
3149   if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16))
3150     return SDValue();
3151 
3152   SelectionDAG &DAG = DCI.DAG;
3153   SDLoc DL(N);
3154 
3155   SDValue N0 = N->getOperand(0);
3156   SDValue N1 = N->getOperand(1);
3157 
3158   // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
3159   // in the source into any_extends if the result of the mul is truncated. Since
3160   // we can assume the high bits are whatever we want, use the underlying value
3161   // to avoid the unknown high bits from interfering.
3162   if (N0.getOpcode() == ISD::ANY_EXTEND)
3163     N0 = N0.getOperand(0);
3164 
3165   if (N1.getOpcode() == ISD::ANY_EXTEND)
3166     N1 = N1.getOperand(0);
3167 
3168   SDValue Mul;
3169 
3170   if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
3171     N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3172     N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3173     Mul = getMul24(DAG, DL, N0, N1, Size, false);
3174   } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
3175     N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3176     N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3177     Mul = getMul24(DAG, DL, N0, N1, Size, true);
3178   } else {
3179     return SDValue();
3180   }
3181 
3182   // We need to use sext even for MUL_U24, because MUL_U24 is used
3183   // for signed multiply of 8 and 16-bit types.
3184   return DAG.getSExtOrTrunc(Mul, DL, VT);
3185 }
3186 
3187 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
3188                                                   DAGCombinerInfo &DCI) const {
3189   EVT VT = N->getValueType(0);
3190 
3191   if (!Subtarget->hasMulI24() || VT.isVector())
3192     return SDValue();
3193 
3194   SelectionDAG &DAG = DCI.DAG;
3195   SDLoc DL(N);
3196 
3197   SDValue N0 = N->getOperand(0);
3198   SDValue N1 = N->getOperand(1);
3199 
3200   if (!isI24(N0, DAG) || !isI24(N1, DAG))
3201     return SDValue();
3202 
3203   N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3204   N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3205 
3206   SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1);
3207   DCI.AddToWorklist(Mulhi.getNode());
3208   return DAG.getSExtOrTrunc(Mulhi, DL, VT);
3209 }
3210 
3211 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N,
3212                                                   DAGCombinerInfo &DCI) const {
3213   EVT VT = N->getValueType(0);
3214 
3215   if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
3216     return SDValue();
3217 
3218   SelectionDAG &DAG = DCI.DAG;
3219   SDLoc DL(N);
3220 
3221   SDValue N0 = N->getOperand(0);
3222   SDValue N1 = N->getOperand(1);
3223 
3224   if (!isU24(N0, DAG) || !isU24(N1, DAG))
3225     return SDValue();
3226 
3227   N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3228   N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3229 
3230   SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1);
3231   DCI.AddToWorklist(Mulhi.getNode());
3232   return DAG.getZExtOrTrunc(Mulhi, DL, VT);
3233 }
3234 
3235 SDValue AMDGPUTargetLowering::performMulLoHi24Combine(
3236   SDNode *N, DAGCombinerInfo &DCI) const {
3237   SelectionDAG &DAG = DCI.DAG;
3238 
3239   // Simplify demanded bits before splitting into multiple users.
3240   if (simplifyI24(N, 0, DCI) || simplifyI24(N, 1, DCI))
3241     return SDValue();
3242 
3243   SDValue N0 = N->getOperand(0);
3244   SDValue N1 = N->getOperand(1);
3245 
3246   bool Signed = (N->getOpcode() == AMDGPUISD::MUL_LOHI_I24);
3247 
3248   unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3249   unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
3250 
3251   SDLoc SL(N);
3252 
3253   SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1);
3254   SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1);
3255   return DAG.getMergeValues({ MulLo, MulHi }, SL);
3256 }
3257 
3258 static bool isNegativeOne(SDValue Val) {
3259   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val))
3260     return C->isAllOnesValue();
3261   return false;
3262 }
3263 
3264 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG,
3265                                           SDValue Op,
3266                                           const SDLoc &DL,
3267                                           unsigned Opc) const {
3268   EVT VT = Op.getValueType();
3269   EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT);
3270   if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() &&
3271                               LegalVT != MVT::i16))
3272     return SDValue();
3273 
3274   if (VT != MVT::i32)
3275     Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op);
3276 
3277   SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op);
3278   if (VT != MVT::i32)
3279     FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX);
3280 
3281   return FFBX;
3282 }
3283 
3284 // The native instructions return -1 on 0 input. Optimize out a select that
3285 // produces -1 on 0.
3286 //
3287 // TODO: If zero is not undef, we could also do this if the output is compared
3288 // against the bitwidth.
3289 //
3290 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
3291 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond,
3292                                                  SDValue LHS, SDValue RHS,
3293                                                  DAGCombinerInfo &DCI) const {
3294   ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
3295   if (!CmpRhs || !CmpRhs->isNullValue())
3296     return SDValue();
3297 
3298   SelectionDAG &DAG = DCI.DAG;
3299   ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
3300   SDValue CmpLHS = Cond.getOperand(0);
3301 
3302   unsigned Opc = isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 :
3303                                            AMDGPUISD::FFBH_U32;
3304 
3305   // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
3306   // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x
3307   if (CCOpcode == ISD::SETEQ &&
3308       (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
3309       RHS.getOperand(0) == CmpLHS &&
3310       isNegativeOne(LHS)) {
3311     return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3312   }
3313 
3314   // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
3315   // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x
3316   if (CCOpcode == ISD::SETNE &&
3317       (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
3318       LHS.getOperand(0) == CmpLHS &&
3319       isNegativeOne(RHS)) {
3320     return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3321   }
3322 
3323   return SDValue();
3324 }
3325 
3326 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI,
3327                                          unsigned Op,
3328                                          const SDLoc &SL,
3329                                          SDValue Cond,
3330                                          SDValue N1,
3331                                          SDValue N2) {
3332   SelectionDAG &DAG = DCI.DAG;
3333   EVT VT = N1.getValueType();
3334 
3335   SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond,
3336                                   N1.getOperand(0), N2.getOperand(0));
3337   DCI.AddToWorklist(NewSelect.getNode());
3338   return DAG.getNode(Op, SL, VT, NewSelect);
3339 }
3340 
3341 // Pull a free FP operation out of a select so it may fold into uses.
3342 //
3343 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
3344 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
3345 //
3346 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
3347 // select c, (fabs x), +k -> fabs (select c, x, k)
3348 static SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
3349                                     SDValue N) {
3350   SelectionDAG &DAG = DCI.DAG;
3351   SDValue Cond = N.getOperand(0);
3352   SDValue LHS = N.getOperand(1);
3353   SDValue RHS = N.getOperand(2);
3354 
3355   EVT VT = N.getValueType();
3356   if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) ||
3357       (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) {
3358     return distributeOpThroughSelect(DCI, LHS.getOpcode(),
3359                                      SDLoc(N), Cond, LHS, RHS);
3360   }
3361 
3362   bool Inv = false;
3363   if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) {
3364     std::swap(LHS, RHS);
3365     Inv = true;
3366   }
3367 
3368   // TODO: Support vector constants.
3369   ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
3370   if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS) {
3371     SDLoc SL(N);
3372     // If one side is an fneg/fabs and the other is a constant, we can push the
3373     // fneg/fabs down. If it's an fabs, the constant needs to be non-negative.
3374     SDValue NewLHS = LHS.getOperand(0);
3375     SDValue NewRHS = RHS;
3376 
3377     // Careful: if the neg can be folded up, don't try to pull it back down.
3378     bool ShouldFoldNeg = true;
3379 
3380     if (NewLHS.hasOneUse()) {
3381       unsigned Opc = NewLHS.getOpcode();
3382       if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(Opc))
3383         ShouldFoldNeg = false;
3384       if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL)
3385         ShouldFoldNeg = false;
3386     }
3387 
3388     if (ShouldFoldNeg) {
3389       if (LHS.getOpcode() == ISD::FNEG)
3390         NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3391       else if (CRHS->isNegative())
3392         return SDValue();
3393 
3394       if (Inv)
3395         std::swap(NewLHS, NewRHS);
3396 
3397       SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT,
3398                                       Cond, NewLHS, NewRHS);
3399       DCI.AddToWorklist(NewSelect.getNode());
3400       return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect);
3401     }
3402   }
3403 
3404   return SDValue();
3405 }
3406 
3407 
3408 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
3409                                                    DAGCombinerInfo &DCI) const {
3410   if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0)))
3411     return Folded;
3412 
3413   SDValue Cond = N->getOperand(0);
3414   if (Cond.getOpcode() != ISD::SETCC)
3415     return SDValue();
3416 
3417   EVT VT = N->getValueType(0);
3418   SDValue LHS = Cond.getOperand(0);
3419   SDValue RHS = Cond.getOperand(1);
3420   SDValue CC = Cond.getOperand(2);
3421 
3422   SDValue True = N->getOperand(1);
3423   SDValue False = N->getOperand(2);
3424 
3425   if (Cond.hasOneUse()) { // TODO: Look for multiple select uses.
3426     SelectionDAG &DAG = DCI.DAG;
3427     if ((DAG.isConstantValueOfAnyType(True) ||
3428          DAG.isConstantValueOfAnyType(True)) &&
3429         (!DAG.isConstantValueOfAnyType(False) &&
3430          !DAG.isConstantValueOfAnyType(False))) {
3431       // Swap cmp + select pair to move constant to false input.
3432       // This will allow using VOPC cndmasks more often.
3433       // select (setcc x, y), k, x -> select (setcc y, x) x, x
3434 
3435       SDLoc SL(N);
3436       ISD::CondCode NewCC = getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
3437                                             LHS.getValueType().isInteger());
3438 
3439       SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC);
3440       return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True);
3441     }
3442 
3443     if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
3444       SDValue MinMax
3445         = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
3446       // Revisit this node so we can catch min3/max3/med3 patterns.
3447       //DCI.AddToWorklist(MinMax.getNode());
3448       return MinMax;
3449     }
3450   }
3451 
3452   // There's no reason to not do this if the condition has other uses.
3453   return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI);
3454 }
3455 
3456 static bool isConstantFPZero(SDValue N) {
3457   if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N))
3458     return C->isZero() && !C->isNegative();
3459   return false;
3460 }
3461 
3462 static unsigned inverseMinMax(unsigned Opc) {
3463   switch (Opc) {
3464   case ISD::FMAXNUM:
3465     return ISD::FMINNUM;
3466   case ISD::FMINNUM:
3467     return ISD::FMAXNUM;
3468   case AMDGPUISD::FMAX_LEGACY:
3469     return AMDGPUISD::FMIN_LEGACY;
3470   case AMDGPUISD::FMIN_LEGACY:
3471     return  AMDGPUISD::FMAX_LEGACY;
3472   default:
3473     llvm_unreachable("invalid min/max opcode");
3474   }
3475 }
3476 
3477 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
3478                                                  DAGCombinerInfo &DCI) const {
3479   SelectionDAG &DAG = DCI.DAG;
3480   SDValue N0 = N->getOperand(0);
3481   EVT VT = N->getValueType(0);
3482 
3483   unsigned Opc = N0.getOpcode();
3484 
3485   // If the input has multiple uses and we can either fold the negate down, or
3486   // the other uses cannot, give up. This both prevents unprofitable
3487   // transformations and infinite loops: we won't repeatedly try to fold around
3488   // a negate that has no 'good' form.
3489   if (N0.hasOneUse()) {
3490     // This may be able to fold into the source, but at a code size cost. Don't
3491     // fold if the fold into the user is free.
3492     if (allUsesHaveSourceMods(N, 0))
3493       return SDValue();
3494   } else {
3495     if (fnegFoldsIntoOp(Opc) &&
3496         (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode())))
3497       return SDValue();
3498   }
3499 
3500   SDLoc SL(N);
3501   switch (Opc) {
3502   case ISD::FADD: {
3503     if (!mayIgnoreSignedZero(N0))
3504       return SDValue();
3505 
3506     // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y))
3507     SDValue LHS = N0.getOperand(0);
3508     SDValue RHS = N0.getOperand(1);
3509 
3510     if (LHS.getOpcode() != ISD::FNEG)
3511       LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3512     else
3513       LHS = LHS.getOperand(0);
3514 
3515     if (RHS.getOpcode() != ISD::FNEG)
3516       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3517     else
3518       RHS = RHS.getOperand(0);
3519 
3520     SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags());
3521     if (!N0.hasOneUse())
3522       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3523     return Res;
3524   }
3525   case ISD::FMUL:
3526   case AMDGPUISD::FMUL_LEGACY: {
3527     // (fneg (fmul x, y)) -> (fmul x, (fneg y))
3528     // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y))
3529     SDValue LHS = N0.getOperand(0);
3530     SDValue RHS = N0.getOperand(1);
3531 
3532     if (LHS.getOpcode() == ISD::FNEG)
3533       LHS = LHS.getOperand(0);
3534     else if (RHS.getOpcode() == ISD::FNEG)
3535       RHS = RHS.getOperand(0);
3536     else
3537       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3538 
3539     SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags());
3540     if (!N0.hasOneUse())
3541       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3542     return Res;
3543   }
3544   case ISD::FMA:
3545   case ISD::FMAD: {
3546     if (!mayIgnoreSignedZero(N0))
3547       return SDValue();
3548 
3549     // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z))
3550     SDValue LHS = N0.getOperand(0);
3551     SDValue MHS = N0.getOperand(1);
3552     SDValue RHS = N0.getOperand(2);
3553 
3554     if (LHS.getOpcode() == ISD::FNEG)
3555       LHS = LHS.getOperand(0);
3556     else if (MHS.getOpcode() == ISD::FNEG)
3557       MHS = MHS.getOperand(0);
3558     else
3559       MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS);
3560 
3561     if (RHS.getOpcode() != ISD::FNEG)
3562       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3563     else
3564       RHS = RHS.getOperand(0);
3565 
3566     SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS);
3567     if (!N0.hasOneUse())
3568       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3569     return Res;
3570   }
3571   case ISD::FMAXNUM:
3572   case ISD::FMINNUM:
3573   case AMDGPUISD::FMAX_LEGACY:
3574   case AMDGPUISD::FMIN_LEGACY: {
3575     // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y)
3576     // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y)
3577     // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y)
3578     // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y)
3579 
3580     SDValue LHS = N0.getOperand(0);
3581     SDValue RHS = N0.getOperand(1);
3582 
3583     // 0 doesn't have a negated inline immediate.
3584     // TODO: Shouldn't fold 1/2pi either, and should be generalized to other
3585     // operations.
3586     if (isConstantFPZero(RHS))
3587       return SDValue();
3588 
3589     SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3590     SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3591     unsigned Opposite = inverseMinMax(Opc);
3592 
3593     SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags());
3594     if (!N0.hasOneUse())
3595       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3596     return Res;
3597   }
3598   case ISD::FP_EXTEND:
3599   case ISD::FTRUNC:
3600   case ISD::FRINT:
3601   case ISD::FNEARBYINT: // XXX - Should fround be handled?
3602   case ISD::FSIN:
3603   case AMDGPUISD::RCP:
3604   case AMDGPUISD::RCP_LEGACY:
3605   case AMDGPUISD::RCP_IFLAG:
3606   case AMDGPUISD::SIN_HW: {
3607     SDValue CvtSrc = N0.getOperand(0);
3608     if (CvtSrc.getOpcode() == ISD::FNEG) {
3609       // (fneg (fp_extend (fneg x))) -> (fp_extend x)
3610       // (fneg (rcp (fneg x))) -> (rcp x)
3611       return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0));
3612     }
3613 
3614     if (!N0.hasOneUse())
3615       return SDValue();
3616 
3617     // (fneg (fp_extend x)) -> (fp_extend (fneg x))
3618     // (fneg (rcp x)) -> (rcp (fneg x))
3619     SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3620     return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags());
3621   }
3622   case ISD::FP_ROUND: {
3623     SDValue CvtSrc = N0.getOperand(0);
3624 
3625     if (CvtSrc.getOpcode() == ISD::FNEG) {
3626       // (fneg (fp_round (fneg x))) -> (fp_round x)
3627       return DAG.getNode(ISD::FP_ROUND, SL, VT,
3628                          CvtSrc.getOperand(0), N0.getOperand(1));
3629     }
3630 
3631     if (!N0.hasOneUse())
3632       return SDValue();
3633 
3634     // (fneg (fp_round x)) -> (fp_round (fneg x))
3635     SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3636     return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1));
3637   }
3638   case ISD::FP16_TO_FP: {
3639     // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal
3640     // f16, but legalization of f16 fneg ends up pulling it out of the source.
3641     // Put the fneg back as a legal source operation that can be matched later.
3642     SDLoc SL(N);
3643 
3644     SDValue Src = N0.getOperand(0);
3645     EVT SrcVT = Src.getValueType();
3646 
3647     // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000)
3648     SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src,
3649                                   DAG.getConstant(0x8000, SL, SrcVT));
3650     return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg);
3651   }
3652   default:
3653     return SDValue();
3654   }
3655 }
3656 
3657 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N,
3658                                                  DAGCombinerInfo &DCI) const {
3659   SelectionDAG &DAG = DCI.DAG;
3660   SDValue N0 = N->getOperand(0);
3661 
3662   if (!N0.hasOneUse())
3663     return SDValue();
3664 
3665   switch (N0.getOpcode()) {
3666   case ISD::FP16_TO_FP: {
3667     assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal");
3668     SDLoc SL(N);
3669     SDValue Src = N0.getOperand(0);
3670     EVT SrcVT = Src.getValueType();
3671 
3672     // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff)
3673     SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src,
3674                                   DAG.getConstant(0x7fff, SL, SrcVT));
3675     return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs);
3676   }
3677   default:
3678     return SDValue();
3679   }
3680 }
3681 
3682 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N,
3683                                                 DAGCombinerInfo &DCI) const {
3684   const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
3685   if (!CFP)
3686     return SDValue();
3687 
3688   // XXX - Should this flush denormals?
3689   const APFloat &Val = CFP->getValueAPF();
3690   APFloat One(Val.getSemantics(), "1.0");
3691   return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
3692 }
3693 
3694 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
3695                                                 DAGCombinerInfo &DCI) const {
3696   SelectionDAG &DAG = DCI.DAG;
3697   SDLoc DL(N);
3698 
3699   switch(N->getOpcode()) {
3700   default:
3701     break;
3702   case ISD::BITCAST: {
3703     EVT DestVT = N->getValueType(0);
3704 
3705     // Push casts through vector builds. This helps avoid emitting a large
3706     // number of copies when materializing floating point vector constants.
3707     //
3708     // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) =>
3709     //   vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y))
3710     if (DestVT.isVector()) {
3711       SDValue Src = N->getOperand(0);
3712       if (Src.getOpcode() == ISD::BUILD_VECTOR) {
3713         EVT SrcVT = Src.getValueType();
3714         unsigned NElts = DestVT.getVectorNumElements();
3715 
3716         if (SrcVT.getVectorNumElements() == NElts) {
3717           EVT DestEltVT = DestVT.getVectorElementType();
3718 
3719           SmallVector<SDValue, 8> CastedElts;
3720           SDLoc SL(N);
3721           for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) {
3722             SDValue Elt = Src.getOperand(I);
3723             CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt));
3724           }
3725 
3726           return DAG.getBuildVector(DestVT, SL, CastedElts);
3727         }
3728       }
3729     }
3730 
3731     if (DestVT.getSizeInBits() != 64 && !DestVT.isVector())
3732       break;
3733 
3734     // Fold bitcasts of constants.
3735     //
3736     // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
3737     // TODO: Generalize and move to DAGCombiner
3738     SDValue Src = N->getOperand(0);
3739     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
3740       if (Src.getValueType() == MVT::i64) {
3741         SDLoc SL(N);
3742         uint64_t CVal = C->getZExtValue();
3743         return DAG.getNode(ISD::BUILD_VECTOR, SL, DestVT,
3744                            DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
3745                            DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
3746       }
3747     }
3748 
3749     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
3750       const APInt &Val = C->getValueAPF().bitcastToAPInt();
3751       SDLoc SL(N);
3752       uint64_t CVal = Val.getZExtValue();
3753       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
3754                                 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
3755                                 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
3756 
3757       return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
3758     }
3759 
3760     break;
3761   }
3762   case ISD::SHL: {
3763     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3764       break;
3765 
3766     return performShlCombine(N, DCI);
3767   }
3768   case ISD::SRL: {
3769     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3770       break;
3771 
3772     return performSrlCombine(N, DCI);
3773   }
3774   case ISD::SRA: {
3775     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3776       break;
3777 
3778     return performSraCombine(N, DCI);
3779   }
3780   case ISD::TRUNCATE:
3781     return performTruncateCombine(N, DCI);
3782   case ISD::MUL:
3783     return performMulCombine(N, DCI);
3784   case ISD::MULHS:
3785     return performMulhsCombine(N, DCI);
3786   case ISD::MULHU:
3787     return performMulhuCombine(N, DCI);
3788   case AMDGPUISD::MUL_I24:
3789   case AMDGPUISD::MUL_U24:
3790   case AMDGPUISD::MULHI_I24:
3791   case AMDGPUISD::MULHI_U24: {
3792     // If the first call to simplify is successfull, then N may end up being
3793     // deleted, so we shouldn't call simplifyI24 again.
3794     simplifyI24(N, 0, DCI) || simplifyI24(N, 1, DCI);
3795     return SDValue();
3796   }
3797   case AMDGPUISD::MUL_LOHI_I24:
3798   case AMDGPUISD::MUL_LOHI_U24:
3799     return performMulLoHi24Combine(N, DCI);
3800   case ISD::SELECT:
3801     return performSelectCombine(N, DCI);
3802   case ISD::FNEG:
3803     return performFNegCombine(N, DCI);
3804   case ISD::FABS:
3805     return performFAbsCombine(N, DCI);
3806   case AMDGPUISD::BFE_I32:
3807   case AMDGPUISD::BFE_U32: {
3808     assert(!N->getValueType(0).isVector() &&
3809            "Vector handling of BFE not implemented");
3810     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
3811     if (!Width)
3812       break;
3813 
3814     uint32_t WidthVal = Width->getZExtValue() & 0x1f;
3815     if (WidthVal == 0)
3816       return DAG.getConstant(0, DL, MVT::i32);
3817 
3818     ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
3819     if (!Offset)
3820       break;
3821 
3822     SDValue BitsFrom = N->getOperand(0);
3823     uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
3824 
3825     bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
3826 
3827     if (OffsetVal == 0) {
3828       // This is already sign / zero extended, so try to fold away extra BFEs.
3829       unsigned SignBits =  Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
3830 
3831       unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
3832       if (OpSignBits >= SignBits)
3833         return BitsFrom;
3834 
3835       EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
3836       if (Signed) {
3837         // This is a sign_extend_inreg. Replace it to take advantage of existing
3838         // DAG Combines. If not eliminated, we will match back to BFE during
3839         // selection.
3840 
3841         // TODO: The sext_inreg of extended types ends, although we can could
3842         // handle them in a single BFE.
3843         return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
3844                            DAG.getValueType(SmallVT));
3845       }
3846 
3847       return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
3848     }
3849 
3850     if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
3851       if (Signed) {
3852         return constantFoldBFE<int32_t>(DAG,
3853                                         CVal->getSExtValue(),
3854                                         OffsetVal,
3855                                         WidthVal,
3856                                         DL);
3857       }
3858 
3859       return constantFoldBFE<uint32_t>(DAG,
3860                                        CVal->getZExtValue(),
3861                                        OffsetVal,
3862                                        WidthVal,
3863                                        DL);
3864     }
3865 
3866     if ((OffsetVal + WidthVal) >= 32 &&
3867         !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
3868       SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
3869       return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
3870                          BitsFrom, ShiftVal);
3871     }
3872 
3873     if (BitsFrom.hasOneUse()) {
3874       APInt Demanded = APInt::getBitsSet(32,
3875                                          OffsetVal,
3876                                          OffsetVal + WidthVal);
3877 
3878       KnownBits Known;
3879       TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
3880                                             !DCI.isBeforeLegalizeOps());
3881       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3882       if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) ||
3883           TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) {
3884         DCI.CommitTargetLoweringOpt(TLO);
3885       }
3886     }
3887 
3888     break;
3889   }
3890   case ISD::LOAD:
3891     return performLoadCombine(N, DCI);
3892   case ISD::STORE:
3893     return performStoreCombine(N, DCI);
3894   case AMDGPUISD::RCP:
3895   case AMDGPUISD::RCP_IFLAG:
3896     return performRcpCombine(N, DCI);
3897   case ISD::AssertZext:
3898   case ISD::AssertSext:
3899     return performAssertSZExtCombine(N, DCI);
3900   }
3901   return SDValue();
3902 }
3903 
3904 //===----------------------------------------------------------------------===//
3905 // Helper functions
3906 //===----------------------------------------------------------------------===//
3907 
3908 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
3909                                                    const TargetRegisterClass *RC,
3910                                                    unsigned Reg, EVT VT,
3911                                                    const SDLoc &SL,
3912                                                    bool RawReg) const {
3913   MachineFunction &MF = DAG.getMachineFunction();
3914   MachineRegisterInfo &MRI = MF.getRegInfo();
3915   unsigned VReg;
3916 
3917   if (!MRI.isLiveIn(Reg)) {
3918     VReg = MRI.createVirtualRegister(RC);
3919     MRI.addLiveIn(Reg, VReg);
3920   } else {
3921     VReg = MRI.getLiveInVirtReg(Reg);
3922   }
3923 
3924   if (RawReg)
3925     return DAG.getRegister(VReg, VT);
3926 
3927   return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT);
3928 }
3929 
3930 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG,
3931                                                   EVT VT,
3932                                                   const SDLoc &SL,
3933                                                   int64_t Offset) const {
3934   MachineFunction &MF = DAG.getMachineFunction();
3935   MachineFrameInfo &MFI = MF.getFrameInfo();
3936 
3937   int FI = MFI.CreateFixedObject(VT.getStoreSize(), Offset, true);
3938   auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset);
3939   SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32);
3940 
3941   return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, 4,
3942                      MachineMemOperand::MODereferenceable |
3943                      MachineMemOperand::MOInvariant);
3944 }
3945 
3946 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG,
3947                                                    const SDLoc &SL,
3948                                                    SDValue Chain,
3949                                                    SDValue StackPtr,
3950                                                    SDValue ArgVal,
3951                                                    int64_t Offset) const {
3952   MachineFunction &MF = DAG.getMachineFunction();
3953   MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset);
3954 
3955   SDValue Ptr = DAG.getObjectPtrOffset(SL, StackPtr, Offset);
3956   SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, 4,
3957                                MachineMemOperand::MODereferenceable);
3958   return Store;
3959 }
3960 
3961 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG,
3962                                              const TargetRegisterClass *RC,
3963                                              EVT VT, const SDLoc &SL,
3964                                              const ArgDescriptor &Arg) const {
3965   assert(Arg && "Attempting to load missing argument");
3966 
3967   if (Arg.isRegister())
3968     return CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL);
3969   return loadStackInputValue(DAG, VT, SL, Arg.getStackOffset());
3970 }
3971 
3972 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
3973     const MachineFunction &MF, const ImplicitParameter Param) const {
3974   const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
3975   const AMDGPUSubtarget &ST =
3976       AMDGPUSubtarget::get(getTargetMachine(), MF.getFunction());
3977   unsigned ExplicitArgOffset = ST.getExplicitKernelArgOffset(MF.getFunction());
3978   unsigned Alignment = ST.getAlignmentForImplicitArgPtr();
3979   uint64_t ArgOffset = alignTo(MFI->getExplicitKernArgSize(), Alignment) +
3980                        ExplicitArgOffset;
3981   switch (Param) {
3982   case GRID_DIM:
3983     return ArgOffset;
3984   case GRID_OFFSET:
3985     return ArgOffset + 4;
3986   }
3987   llvm_unreachable("unexpected implicit parameter type");
3988 }
3989 
3990 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
3991 
3992 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
3993   switch ((AMDGPUISD::NodeType)Opcode) {
3994   case AMDGPUISD::FIRST_NUMBER: break;
3995   // AMDIL DAG nodes
3996   NODE_NAME_CASE(UMUL);
3997   NODE_NAME_CASE(BRANCH_COND);
3998 
3999   // AMDGPU DAG nodes
4000   NODE_NAME_CASE(IF)
4001   NODE_NAME_CASE(ELSE)
4002   NODE_NAME_CASE(LOOP)
4003   NODE_NAME_CASE(CALL)
4004   NODE_NAME_CASE(TC_RETURN)
4005   NODE_NAME_CASE(TRAP)
4006   NODE_NAME_CASE(RET_FLAG)
4007   NODE_NAME_CASE(RETURN_TO_EPILOG)
4008   NODE_NAME_CASE(ENDPGM)
4009   NODE_NAME_CASE(DWORDADDR)
4010   NODE_NAME_CASE(FRACT)
4011   NODE_NAME_CASE(SETCC)
4012   NODE_NAME_CASE(SETREG)
4013   NODE_NAME_CASE(FMA_W_CHAIN)
4014   NODE_NAME_CASE(FMUL_W_CHAIN)
4015   NODE_NAME_CASE(CLAMP)
4016   NODE_NAME_CASE(COS_HW)
4017   NODE_NAME_CASE(SIN_HW)
4018   NODE_NAME_CASE(FMAX_LEGACY)
4019   NODE_NAME_CASE(FMIN_LEGACY)
4020   NODE_NAME_CASE(FMAX3)
4021   NODE_NAME_CASE(SMAX3)
4022   NODE_NAME_CASE(UMAX3)
4023   NODE_NAME_CASE(FMIN3)
4024   NODE_NAME_CASE(SMIN3)
4025   NODE_NAME_CASE(UMIN3)
4026   NODE_NAME_CASE(FMED3)
4027   NODE_NAME_CASE(SMED3)
4028   NODE_NAME_CASE(UMED3)
4029   NODE_NAME_CASE(FDOT2)
4030   NODE_NAME_CASE(URECIP)
4031   NODE_NAME_CASE(DIV_SCALE)
4032   NODE_NAME_CASE(DIV_FMAS)
4033   NODE_NAME_CASE(DIV_FIXUP)
4034   NODE_NAME_CASE(FMAD_FTZ)
4035   NODE_NAME_CASE(TRIG_PREOP)
4036   NODE_NAME_CASE(RCP)
4037   NODE_NAME_CASE(RSQ)
4038   NODE_NAME_CASE(RCP_LEGACY)
4039   NODE_NAME_CASE(RSQ_LEGACY)
4040   NODE_NAME_CASE(RCP_IFLAG)
4041   NODE_NAME_CASE(FMUL_LEGACY)
4042   NODE_NAME_CASE(RSQ_CLAMP)
4043   NODE_NAME_CASE(LDEXP)
4044   NODE_NAME_CASE(FP_CLASS)
4045   NODE_NAME_CASE(DOT4)
4046   NODE_NAME_CASE(CARRY)
4047   NODE_NAME_CASE(BORROW)
4048   NODE_NAME_CASE(BFE_U32)
4049   NODE_NAME_CASE(BFE_I32)
4050   NODE_NAME_CASE(BFI)
4051   NODE_NAME_CASE(BFM)
4052   NODE_NAME_CASE(FFBH_U32)
4053   NODE_NAME_CASE(FFBH_I32)
4054   NODE_NAME_CASE(FFBL_B32)
4055   NODE_NAME_CASE(MUL_U24)
4056   NODE_NAME_CASE(MUL_I24)
4057   NODE_NAME_CASE(MULHI_U24)
4058   NODE_NAME_CASE(MULHI_I24)
4059   NODE_NAME_CASE(MUL_LOHI_U24)
4060   NODE_NAME_CASE(MUL_LOHI_I24)
4061   NODE_NAME_CASE(MAD_U24)
4062   NODE_NAME_CASE(MAD_I24)
4063   NODE_NAME_CASE(MAD_I64_I32)
4064   NODE_NAME_CASE(MAD_U64_U32)
4065   NODE_NAME_CASE(PERM)
4066   NODE_NAME_CASE(TEXTURE_FETCH)
4067   NODE_NAME_CASE(EXPORT)
4068   NODE_NAME_CASE(EXPORT_DONE)
4069   NODE_NAME_CASE(R600_EXPORT)
4070   NODE_NAME_CASE(CONST_ADDRESS)
4071   NODE_NAME_CASE(REGISTER_LOAD)
4072   NODE_NAME_CASE(REGISTER_STORE)
4073   NODE_NAME_CASE(SAMPLE)
4074   NODE_NAME_CASE(SAMPLEB)
4075   NODE_NAME_CASE(SAMPLED)
4076   NODE_NAME_CASE(SAMPLEL)
4077   NODE_NAME_CASE(CVT_F32_UBYTE0)
4078   NODE_NAME_CASE(CVT_F32_UBYTE1)
4079   NODE_NAME_CASE(CVT_F32_UBYTE2)
4080   NODE_NAME_CASE(CVT_F32_UBYTE3)
4081   NODE_NAME_CASE(CVT_PKRTZ_F16_F32)
4082   NODE_NAME_CASE(CVT_PKNORM_I16_F32)
4083   NODE_NAME_CASE(CVT_PKNORM_U16_F32)
4084   NODE_NAME_CASE(CVT_PK_I16_I32)
4085   NODE_NAME_CASE(CVT_PK_U16_U32)
4086   NODE_NAME_CASE(FP_TO_FP16)
4087   NODE_NAME_CASE(FP16_ZEXT)
4088   NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
4089   NODE_NAME_CASE(CONST_DATA_PTR)
4090   NODE_NAME_CASE(PC_ADD_REL_OFFSET)
4091   NODE_NAME_CASE(KILL)
4092   NODE_NAME_CASE(DUMMY_CHAIN)
4093   case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
4094   NODE_NAME_CASE(INIT_EXEC)
4095   NODE_NAME_CASE(INIT_EXEC_FROM_INPUT)
4096   NODE_NAME_CASE(SENDMSG)
4097   NODE_NAME_CASE(SENDMSGHALT)
4098   NODE_NAME_CASE(INTERP_MOV)
4099   NODE_NAME_CASE(INTERP_P1)
4100   NODE_NAME_CASE(INTERP_P2)
4101   NODE_NAME_CASE(STORE_MSKOR)
4102   NODE_NAME_CASE(LOAD_CONSTANT)
4103   NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
4104   NODE_NAME_CASE(TBUFFER_STORE_FORMAT_X3)
4105   NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16)
4106   NODE_NAME_CASE(TBUFFER_LOAD_FORMAT)
4107   NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16)
4108   NODE_NAME_CASE(ATOMIC_CMP_SWAP)
4109   NODE_NAME_CASE(ATOMIC_INC)
4110   NODE_NAME_CASE(ATOMIC_DEC)
4111   NODE_NAME_CASE(ATOMIC_LOAD_FADD)
4112   NODE_NAME_CASE(ATOMIC_LOAD_FMIN)
4113   NODE_NAME_CASE(ATOMIC_LOAD_FMAX)
4114   NODE_NAME_CASE(BUFFER_LOAD)
4115   NODE_NAME_CASE(BUFFER_LOAD_FORMAT)
4116   NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16)
4117   NODE_NAME_CASE(BUFFER_STORE)
4118   NODE_NAME_CASE(BUFFER_STORE_FORMAT)
4119   NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16)
4120   NODE_NAME_CASE(BUFFER_ATOMIC_SWAP)
4121   NODE_NAME_CASE(BUFFER_ATOMIC_ADD)
4122   NODE_NAME_CASE(BUFFER_ATOMIC_SUB)
4123   NODE_NAME_CASE(BUFFER_ATOMIC_SMIN)
4124   NODE_NAME_CASE(BUFFER_ATOMIC_UMIN)
4125   NODE_NAME_CASE(BUFFER_ATOMIC_SMAX)
4126   NODE_NAME_CASE(BUFFER_ATOMIC_UMAX)
4127   NODE_NAME_CASE(BUFFER_ATOMIC_AND)
4128   NODE_NAME_CASE(BUFFER_ATOMIC_OR)
4129   NODE_NAME_CASE(BUFFER_ATOMIC_XOR)
4130   NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP)
4131 
4132   case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
4133   }
4134   return nullptr;
4135 }
4136 
4137 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand,
4138                                               SelectionDAG &DAG, int Enabled,
4139                                               int &RefinementSteps,
4140                                               bool &UseOneConstNR,
4141                                               bool Reciprocal) const {
4142   EVT VT = Operand.getValueType();
4143 
4144   if (VT == MVT::f32) {
4145     RefinementSteps = 0;
4146     return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
4147   }
4148 
4149   // TODO: There is also f64 rsq instruction, but the documentation is less
4150   // clear on its precision.
4151 
4152   return SDValue();
4153 }
4154 
4155 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
4156                                                SelectionDAG &DAG, int Enabled,
4157                                                int &RefinementSteps) const {
4158   EVT VT = Operand.getValueType();
4159 
4160   if (VT == MVT::f32) {
4161     // Reciprocal, < 1 ulp error.
4162     //
4163     // This reciprocal approximation converges to < 0.5 ulp error with one
4164     // newton rhapson performed with two fused multiple adds (FMAs).
4165 
4166     RefinementSteps = 0;
4167     return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
4168   }
4169 
4170   // TODO: There is also f64 rcp instruction, but the documentation is less
4171   // clear on its precision.
4172 
4173   return SDValue();
4174 }
4175 
4176 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
4177     const SDValue Op, KnownBits &Known,
4178     const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
4179 
4180   Known.resetAll(); // Don't know anything.
4181 
4182   unsigned Opc = Op.getOpcode();
4183 
4184   switch (Opc) {
4185   default:
4186     break;
4187   case AMDGPUISD::CARRY:
4188   case AMDGPUISD::BORROW: {
4189     Known.Zero = APInt::getHighBitsSet(32, 31);
4190     break;
4191   }
4192 
4193   case AMDGPUISD::BFE_I32:
4194   case AMDGPUISD::BFE_U32: {
4195     ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4196     if (!CWidth)
4197       return;
4198 
4199     uint32_t Width = CWidth->getZExtValue() & 0x1f;
4200 
4201     if (Opc == AMDGPUISD::BFE_U32)
4202       Known.Zero = APInt::getHighBitsSet(32, 32 - Width);
4203 
4204     break;
4205   }
4206   case AMDGPUISD::FP_TO_FP16:
4207   case AMDGPUISD::FP16_ZEXT: {
4208     unsigned BitWidth = Known.getBitWidth();
4209 
4210     // High bits are zero.
4211     Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
4212     break;
4213   }
4214   case AMDGPUISD::MUL_U24:
4215   case AMDGPUISD::MUL_I24: {
4216     KnownBits LHSKnown, RHSKnown;
4217     DAG.computeKnownBits(Op.getOperand(0), LHSKnown, Depth + 1);
4218     DAG.computeKnownBits(Op.getOperand(1), RHSKnown, Depth + 1);
4219 
4220     unsigned TrailZ = LHSKnown.countMinTrailingZeros() +
4221                       RHSKnown.countMinTrailingZeros();
4222     Known.Zero.setLowBits(std::min(TrailZ, 32u));
4223 
4224     unsigned LHSValBits = 32 - std::max(LHSKnown.countMinSignBits(), 8u);
4225     unsigned RHSValBits = 32 - std::max(RHSKnown.countMinSignBits(), 8u);
4226     unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
4227     if (MaxValBits >= 32)
4228       break;
4229     bool Negative = false;
4230     if (Opc == AMDGPUISD::MUL_I24) {
4231       bool LHSNegative = !!(LHSKnown.One  & (1 << 23));
4232       bool LHSPositive = !!(LHSKnown.Zero & (1 << 23));
4233       bool RHSNegative = !!(RHSKnown.One  & (1 << 23));
4234       bool RHSPositive = !!(RHSKnown.Zero & (1 << 23));
4235       if ((!LHSNegative && !LHSPositive) || (!RHSNegative && !RHSPositive))
4236         break;
4237       Negative = (LHSNegative && RHSPositive) || (LHSPositive && RHSNegative);
4238     }
4239     if (Negative)
4240       Known.One.setHighBits(32 - MaxValBits);
4241     else
4242       Known.Zero.setHighBits(32 - MaxValBits);
4243     break;
4244   }
4245   case AMDGPUISD::PERM: {
4246     ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4247     if (!CMask)
4248       return;
4249 
4250     KnownBits LHSKnown, RHSKnown;
4251     DAG.computeKnownBits(Op.getOperand(0), LHSKnown, Depth + 1);
4252     DAG.computeKnownBits(Op.getOperand(1), RHSKnown, Depth + 1);
4253     unsigned Sel = CMask->getZExtValue();
4254 
4255     for (unsigned I = 0; I < 32; I += 8) {
4256       unsigned SelBits = Sel & 0xff;
4257       if (SelBits < 4) {
4258         SelBits *= 8;
4259         Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4260         Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4261       } else if (SelBits < 7) {
4262         SelBits = (SelBits & 3) * 8;
4263         Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4264         Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4265       } else if (SelBits == 0x0c) {
4266         Known.Zero |= 0xff << I;
4267       } else if (SelBits > 0x0c) {
4268         Known.One |= 0xff << I;
4269       }
4270       Sel >>= 8;
4271     }
4272     break;
4273   }
4274   case ISD::INTRINSIC_WO_CHAIN: {
4275     unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4276     switch (IID) {
4277     case Intrinsic::amdgcn_mbcnt_lo:
4278     case Intrinsic::amdgcn_mbcnt_hi: {
4279       const GCNSubtarget &ST =
4280           DAG.getMachineFunction().getSubtarget<GCNSubtarget>();
4281       // These return at most the wavefront size - 1.
4282       unsigned Size = Op.getValueType().getSizeInBits();
4283       Known.Zero.setHighBits(Size - ST.getWavefrontSizeLog2());
4284       break;
4285     }
4286     default:
4287       break;
4288     }
4289   }
4290   }
4291 }
4292 
4293 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
4294     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4295     unsigned Depth) const {
4296   switch (Op.getOpcode()) {
4297   case AMDGPUISD::BFE_I32: {
4298     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4299     if (!Width)
4300       return 1;
4301 
4302     unsigned SignBits = 32 - Width->getZExtValue() + 1;
4303     if (!isNullConstant(Op.getOperand(1)))
4304       return SignBits;
4305 
4306     // TODO: Could probably figure something out with non-0 offsets.
4307     unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4308     return std::max(SignBits, Op0SignBits);
4309   }
4310 
4311   case AMDGPUISD::BFE_U32: {
4312     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4313     return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
4314   }
4315 
4316   case AMDGPUISD::CARRY:
4317   case AMDGPUISD::BORROW:
4318     return 31;
4319   case AMDGPUISD::FP_TO_FP16:
4320   case AMDGPUISD::FP16_ZEXT:
4321     return 16;
4322   default:
4323     return 1;
4324   }
4325 }
4326