1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This is the parent TargetLowering class for hardware code gen
11 /// targets.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPUISelLowering.h"
16 #include "AMDGPU.h"
17 #include "AMDGPUCallLowering.h"
18 #include "AMDGPUFrameLowering.h"
19 #include "AMDGPUSubtarget.h"
20 #include "AMDGPUTargetMachine.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "SIInstrInfo.h"
24 #include "SIMachineFunctionInfo.h"
25 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
26 #include "llvm/CodeGen/Analysis.h"
27 #include "llvm/CodeGen/CallingConvLower.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/SelectionDAG.h"
31 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DiagnosticInfo.h"
34 #include "llvm/Support/KnownBits.h"
35 #include "llvm/Support/MathExtras.h"
36 using namespace llvm;
37 
38 #include "AMDGPUGenCallingConv.inc"
39 
40 // Find a larger type to do a load / store of a vector with.
41 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
42   unsigned StoreSize = VT.getStoreSizeInBits();
43   if (StoreSize <= 32)
44     return EVT::getIntegerVT(Ctx, StoreSize);
45 
46   assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
47   return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
48 }
49 
50 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) {
51   EVT VT = Op.getValueType();
52   KnownBits Known = DAG.computeKnownBits(Op);
53   return VT.getSizeInBits() - Known.countMinLeadingZeros();
54 }
55 
56 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) {
57   EVT VT = Op.getValueType();
58 
59   // In order for this to be a signed 24-bit value, bit 23, must
60   // be a sign bit.
61   return VT.getSizeInBits() - DAG.ComputeNumSignBits(Op);
62 }
63 
64 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
65                                            const AMDGPUSubtarget &STI)
66     : TargetLowering(TM), Subtarget(&STI) {
67   // Lower floating point store/load to integer store/load to reduce the number
68   // of patterns in tablegen.
69   setOperationAction(ISD::LOAD, MVT::f32, Promote);
70   AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
71 
72   setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
73   AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
74 
75   setOperationAction(ISD::LOAD, MVT::v3f32, Promote);
76   AddPromotedToType(ISD::LOAD, MVT::v3f32, MVT::v3i32);
77 
78   setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
79   AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
80 
81   setOperationAction(ISD::LOAD, MVT::v5f32, Promote);
82   AddPromotedToType(ISD::LOAD, MVT::v5f32, MVT::v5i32);
83 
84   setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
85   AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
86 
87   setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
88   AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
89 
90   setOperationAction(ISD::LOAD, MVT::v32f32, Promote);
91   AddPromotedToType(ISD::LOAD, MVT::v32f32, MVT::v32i32);
92 
93   setOperationAction(ISD::LOAD, MVT::i64, Promote);
94   AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
95 
96   setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
97   AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
98 
99   setOperationAction(ISD::LOAD, MVT::f64, Promote);
100   AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32);
101 
102   setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
103   AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
104 
105   // There are no 64-bit extloads. These should be done as a 32-bit extload and
106   // an extension to 64-bit.
107   for (MVT VT : MVT::integer_valuetypes()) {
108     setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
109     setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
110     setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
111   }
112 
113   for (MVT VT : MVT::integer_valuetypes()) {
114     if (VT == MVT::i64)
115       continue;
116 
117     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
118     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal);
119     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal);
120     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
121 
122     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
123     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal);
124     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal);
125     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
126 
127     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
128     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal);
129     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal);
130     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
131   }
132 
133   for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
134     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
135     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
136     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
137     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
138     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
139     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
140     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
141     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
142     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
143     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v3i16, Expand);
144     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v3i16, Expand);
145     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v3i16, Expand);
146     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
147     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
148     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
149   }
150 
151   setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
152   setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
153   setLoadExtAction(ISD::EXTLOAD, MVT::v3f32, MVT::v3f16, Expand);
154   setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
155   setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
156   setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Expand);
157   setLoadExtAction(ISD::EXTLOAD, MVT::v32f32, MVT::v32f16, Expand);
158 
159   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
160   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
161   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
162   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand);
163 
164   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
165   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
166   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
167   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
168 
169   setOperationAction(ISD::STORE, MVT::f32, Promote);
170   AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
171 
172   setOperationAction(ISD::STORE, MVT::v2f32, Promote);
173   AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
174 
175   setOperationAction(ISD::STORE, MVT::v3f32, Promote);
176   AddPromotedToType(ISD::STORE, MVT::v3f32, MVT::v3i32);
177 
178   setOperationAction(ISD::STORE, MVT::v4f32, Promote);
179   AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
180 
181   setOperationAction(ISD::STORE, MVT::v5f32, Promote);
182   AddPromotedToType(ISD::STORE, MVT::v5f32, MVT::v5i32);
183 
184   setOperationAction(ISD::STORE, MVT::v8f32, Promote);
185   AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
186 
187   setOperationAction(ISD::STORE, MVT::v16f32, Promote);
188   AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
189 
190   setOperationAction(ISD::STORE, MVT::v32f32, Promote);
191   AddPromotedToType(ISD::STORE, MVT::v32f32, MVT::v32i32);
192 
193   setOperationAction(ISD::STORE, MVT::i64, Promote);
194   AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
195 
196   setOperationAction(ISD::STORE, MVT::v2i64, Promote);
197   AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
198 
199   setOperationAction(ISD::STORE, MVT::f64, Promote);
200   AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32);
201 
202   setOperationAction(ISD::STORE, MVT::v2f64, Promote);
203   AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
204 
205   setTruncStoreAction(MVT::i64, MVT::i1, Expand);
206   setTruncStoreAction(MVT::i64, MVT::i8, Expand);
207   setTruncStoreAction(MVT::i64, MVT::i16, Expand);
208   setTruncStoreAction(MVT::i64, MVT::i32, Expand);
209 
210   setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
211   setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand);
212   setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand);
213   setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
214 
215   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
216   setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
217   setTruncStoreAction(MVT::v3f32, MVT::v3f16, Expand);
218   setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
219   setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
220   setTruncStoreAction(MVT::v16f32, MVT::v16f16, Expand);
221   setTruncStoreAction(MVT::v32f32, MVT::v32f16, Expand);
222 
223   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
224   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
225 
226   setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
227   setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
228 
229   setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
230   setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
231 
232   setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand);
233   setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand);
234 
235 
236   setOperationAction(ISD::Constant, MVT::i32, Legal);
237   setOperationAction(ISD::Constant, MVT::i64, Legal);
238   setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
239   setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
240 
241   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
242   setOperationAction(ISD::BRIND, MVT::Other, Expand);
243 
244   // This is totally unsupported, just custom lower to produce an error.
245   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
246 
247   // Library functions.  These default to Expand, but we have instructions
248   // for them.
249   setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
250   setOperationAction(ISD::FEXP2,  MVT::f32, Legal);
251   setOperationAction(ISD::FPOW,   MVT::f32, Legal);
252   setOperationAction(ISD::FLOG2,  MVT::f32, Legal);
253   setOperationAction(ISD::FABS,   MVT::f32, Legal);
254   setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
255   setOperationAction(ISD::FRINT,  MVT::f32, Legal);
256   setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
257   setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
258   setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
259 
260   setOperationAction(ISD::FROUND, MVT::f32, Custom);
261   setOperationAction(ISD::FROUND, MVT::f64, Custom);
262 
263   setOperationAction(ISD::FLOG, MVT::f32, Custom);
264   setOperationAction(ISD::FLOG10, MVT::f32, Custom);
265   setOperationAction(ISD::FEXP, MVT::f32, Custom);
266 
267 
268   setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
269   setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
270 
271   setOperationAction(ISD::FREM, MVT::f32, Custom);
272   setOperationAction(ISD::FREM, MVT::f64, Custom);
273 
274   // Expand to fneg + fadd.
275   setOperationAction(ISD::FSUB, MVT::f64, Expand);
276 
277   setOperationAction(ISD::CONCAT_VECTORS, MVT::v3i32, Custom);
278   setOperationAction(ISD::CONCAT_VECTORS, MVT::v3f32, Custom);
279   setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
280   setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
281   setOperationAction(ISD::CONCAT_VECTORS, MVT::v5i32, Custom);
282   setOperationAction(ISD::CONCAT_VECTORS, MVT::v5f32, Custom);
283   setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
284   setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
285   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
286   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
287   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3f32, Custom);
288   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3i32, Custom);
289   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
290   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
291   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5f32, Custom);
292   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5i32, Custom);
293   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
294   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
295   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16f32, Custom);
296   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i32, Custom);
297   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32f32, Custom);
298   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32i32, Custom);
299 
300   setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
301   setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom);
302   setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom);
303 
304   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
305   for (MVT VT : ScalarIntVTs) {
306     // These should use [SU]DIVREM, so set them to expand
307     setOperationAction(ISD::SDIV, VT, Expand);
308     setOperationAction(ISD::UDIV, VT, Expand);
309     setOperationAction(ISD::SREM, VT, Expand);
310     setOperationAction(ISD::UREM, VT, Expand);
311 
312     // GPU does not have divrem function for signed or unsigned.
313     setOperationAction(ISD::SDIVREM, VT, Custom);
314     setOperationAction(ISD::UDIVREM, VT, Custom);
315 
316     // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
317     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
318     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
319 
320     setOperationAction(ISD::BSWAP, VT, Expand);
321     setOperationAction(ISD::CTTZ, VT, Expand);
322     setOperationAction(ISD::CTLZ, VT, Expand);
323 
324     // AMDGPU uses ADDC/SUBC/ADDE/SUBE
325     setOperationAction(ISD::ADDC, VT, Legal);
326     setOperationAction(ISD::SUBC, VT, Legal);
327     setOperationAction(ISD::ADDE, VT, Legal);
328     setOperationAction(ISD::SUBE, VT, Legal);
329   }
330 
331   // The hardware supports 32-bit ROTR, but not ROTL.
332   setOperationAction(ISD::ROTL, MVT::i32, Expand);
333   setOperationAction(ISD::ROTL, MVT::i64, Expand);
334   setOperationAction(ISD::ROTR, MVT::i64, Expand);
335 
336   setOperationAction(ISD::MUL, MVT::i64, Expand);
337   setOperationAction(ISD::MULHU, MVT::i64, Expand);
338   setOperationAction(ISD::MULHS, MVT::i64, Expand);
339   setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
340   setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
341   setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
342   setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
343   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
344 
345   setOperationAction(ISD::SMIN, MVT::i32, Legal);
346   setOperationAction(ISD::UMIN, MVT::i32, Legal);
347   setOperationAction(ISD::SMAX, MVT::i32, Legal);
348   setOperationAction(ISD::UMAX, MVT::i32, Legal);
349 
350   setOperationAction(ISD::CTTZ, MVT::i64, Custom);
351   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom);
352   setOperationAction(ISD::CTLZ, MVT::i64, Custom);
353   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
354 
355   static const MVT::SimpleValueType VectorIntTypes[] = {
356     MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32
357   };
358 
359   for (MVT VT : VectorIntTypes) {
360     // Expand the following operations for the current type by default.
361     setOperationAction(ISD::ADD,  VT, Expand);
362     setOperationAction(ISD::AND,  VT, Expand);
363     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
364     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
365     setOperationAction(ISD::MUL,  VT, Expand);
366     setOperationAction(ISD::MULHU, VT, Expand);
367     setOperationAction(ISD::MULHS, VT, Expand);
368     setOperationAction(ISD::OR,   VT, Expand);
369     setOperationAction(ISD::SHL,  VT, Expand);
370     setOperationAction(ISD::SRA,  VT, Expand);
371     setOperationAction(ISD::SRL,  VT, Expand);
372     setOperationAction(ISD::ROTL, VT, Expand);
373     setOperationAction(ISD::ROTR, VT, Expand);
374     setOperationAction(ISD::SUB,  VT, Expand);
375     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
376     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
377     setOperationAction(ISD::SDIV, VT, Expand);
378     setOperationAction(ISD::UDIV, VT, Expand);
379     setOperationAction(ISD::SREM, VT, Expand);
380     setOperationAction(ISD::UREM, VT, Expand);
381     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
382     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
383     setOperationAction(ISD::SDIVREM, VT, Custom);
384     setOperationAction(ISD::UDIVREM, VT, Expand);
385     setOperationAction(ISD::SELECT, VT, Expand);
386     setOperationAction(ISD::VSELECT, VT, Expand);
387     setOperationAction(ISD::SELECT_CC, VT, Expand);
388     setOperationAction(ISD::XOR,  VT, Expand);
389     setOperationAction(ISD::BSWAP, VT, Expand);
390     setOperationAction(ISD::CTPOP, VT, Expand);
391     setOperationAction(ISD::CTTZ, VT, Expand);
392     setOperationAction(ISD::CTLZ, VT, Expand);
393     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
394     setOperationAction(ISD::SETCC, VT, Expand);
395   }
396 
397   static const MVT::SimpleValueType FloatVectorTypes[] = {
398      MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32
399   };
400 
401   for (MVT VT : FloatVectorTypes) {
402     setOperationAction(ISD::FABS, VT, Expand);
403     setOperationAction(ISD::FMINNUM, VT, Expand);
404     setOperationAction(ISD::FMAXNUM, VT, Expand);
405     setOperationAction(ISD::FADD, VT, Expand);
406     setOperationAction(ISD::FCEIL, VT, Expand);
407     setOperationAction(ISD::FCOS, VT, Expand);
408     setOperationAction(ISD::FDIV, VT, Expand);
409     setOperationAction(ISD::FEXP2, VT, Expand);
410     setOperationAction(ISD::FEXP, VT, Expand);
411     setOperationAction(ISD::FLOG2, VT, Expand);
412     setOperationAction(ISD::FREM, VT, Expand);
413     setOperationAction(ISD::FLOG, VT, Expand);
414     setOperationAction(ISD::FLOG10, VT, Expand);
415     setOperationAction(ISD::FPOW, VT, Expand);
416     setOperationAction(ISD::FFLOOR, VT, Expand);
417     setOperationAction(ISD::FTRUNC, VT, Expand);
418     setOperationAction(ISD::FMUL, VT, Expand);
419     setOperationAction(ISD::FMA, VT, Expand);
420     setOperationAction(ISD::FRINT, VT, Expand);
421     setOperationAction(ISD::FNEARBYINT, VT, Expand);
422     setOperationAction(ISD::FSQRT, VT, Expand);
423     setOperationAction(ISD::FSIN, VT, Expand);
424     setOperationAction(ISD::FSUB, VT, Expand);
425     setOperationAction(ISD::FNEG, VT, Expand);
426     setOperationAction(ISD::VSELECT, VT, Expand);
427     setOperationAction(ISD::SELECT_CC, VT, Expand);
428     setOperationAction(ISD::FCOPYSIGN, VT, Expand);
429     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
430     setOperationAction(ISD::SETCC, VT, Expand);
431     setOperationAction(ISD::FCANONICALIZE, VT, Expand);
432   }
433 
434   // This causes using an unrolled select operation rather than expansion with
435   // bit operations. This is in general better, but the alternative using BFI
436   // instructions may be better if the select sources are SGPRs.
437   setOperationAction(ISD::SELECT, MVT::v2f32, Promote);
438   AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32);
439 
440   setOperationAction(ISD::SELECT, MVT::v3f32, Promote);
441   AddPromotedToType(ISD::SELECT, MVT::v3f32, MVT::v3i32);
442 
443   setOperationAction(ISD::SELECT, MVT::v4f32, Promote);
444   AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32);
445 
446   setOperationAction(ISD::SELECT, MVT::v5f32, Promote);
447   AddPromotedToType(ISD::SELECT, MVT::v5f32, MVT::v5i32);
448 
449   // There are no libcalls of any kind.
450   for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I)
451     setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr);
452 
453   setSchedulingPreference(Sched::RegPressure);
454   setJumpIsExpensive(true);
455 
456   // FIXME: This is only partially true. If we have to do vector compares, any
457   // SGPR pair can be a condition register. If we have a uniform condition, we
458   // are better off doing SALU operations, where there is only one SCC. For now,
459   // we don't have a way of knowing during instruction selection if a condition
460   // will be uniform and we always use vector compares. Assume we are using
461   // vector compares until that is fixed.
462   setHasMultipleConditionRegisters(true);
463 
464   setMinCmpXchgSizeInBits(32);
465   setSupportsUnalignedAtomics(false);
466 
467   PredictableSelectIsExpensive = false;
468 
469   // We want to find all load dependencies for long chains of stores to enable
470   // merging into very wide vectors. The problem is with vectors with > 4
471   // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
472   // vectors are a legal type, even though we have to split the loads
473   // usually. When we can more precisely specify load legality per address
474   // space, we should be able to make FindBetterChain/MergeConsecutiveStores
475   // smarter so that they can figure out what to do in 2 iterations without all
476   // N > 4 stores on the same chain.
477   GatherAllAliasesMaxDepth = 16;
478 
479   // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry
480   // about these during lowering.
481   MaxStoresPerMemcpy  = 0xffffffff;
482   MaxStoresPerMemmove = 0xffffffff;
483   MaxStoresPerMemset  = 0xffffffff;
484 
485   setTargetDAGCombine(ISD::BITCAST);
486   setTargetDAGCombine(ISD::SHL);
487   setTargetDAGCombine(ISD::SRA);
488   setTargetDAGCombine(ISD::SRL);
489   setTargetDAGCombine(ISD::TRUNCATE);
490   setTargetDAGCombine(ISD::MUL);
491   setTargetDAGCombine(ISD::MULHU);
492   setTargetDAGCombine(ISD::MULHS);
493   setTargetDAGCombine(ISD::SELECT);
494   setTargetDAGCombine(ISD::SELECT_CC);
495   setTargetDAGCombine(ISD::STORE);
496   setTargetDAGCombine(ISD::FADD);
497   setTargetDAGCombine(ISD::FSUB);
498   setTargetDAGCombine(ISD::FNEG);
499   setTargetDAGCombine(ISD::FABS);
500   setTargetDAGCombine(ISD::AssertZext);
501   setTargetDAGCombine(ISD::AssertSext);
502   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
503 }
504 
505 //===----------------------------------------------------------------------===//
506 // Target Information
507 //===----------------------------------------------------------------------===//
508 
509 LLVM_READNONE
510 static bool fnegFoldsIntoOp(unsigned Opc) {
511   switch (Opc) {
512   case ISD::FADD:
513   case ISD::FSUB:
514   case ISD::FMUL:
515   case ISD::FMA:
516   case ISD::FMAD:
517   case ISD::FMINNUM:
518   case ISD::FMAXNUM:
519   case ISD::FMINNUM_IEEE:
520   case ISD::FMAXNUM_IEEE:
521   case ISD::FSIN:
522   case ISD::FTRUNC:
523   case ISD::FRINT:
524   case ISD::FNEARBYINT:
525   case ISD::FCANONICALIZE:
526   case AMDGPUISD::RCP:
527   case AMDGPUISD::RCP_LEGACY:
528   case AMDGPUISD::RCP_IFLAG:
529   case AMDGPUISD::SIN_HW:
530   case AMDGPUISD::FMUL_LEGACY:
531   case AMDGPUISD::FMIN_LEGACY:
532   case AMDGPUISD::FMAX_LEGACY:
533   case AMDGPUISD::FMED3:
534     return true;
535   default:
536     return false;
537   }
538 }
539 
540 /// \p returns true if the operation will definitely need to use a 64-bit
541 /// encoding, and thus will use a VOP3 encoding regardless of the source
542 /// modifiers.
543 LLVM_READONLY
544 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) {
545   return N->getNumOperands() > 2 || VT == MVT::f64;
546 }
547 
548 // Most FP instructions support source modifiers, but this could be refined
549 // slightly.
550 LLVM_READONLY
551 static bool hasSourceMods(const SDNode *N) {
552   if (isa<MemSDNode>(N))
553     return false;
554 
555   switch (N->getOpcode()) {
556   case ISD::CopyToReg:
557   case ISD::SELECT:
558   case ISD::FDIV:
559   case ISD::FREM:
560   case ISD::INLINEASM:
561   case ISD::INLINEASM_BR:
562   case AMDGPUISD::DIV_SCALE:
563   case ISD::INTRINSIC_W_CHAIN:
564 
565   // TODO: Should really be looking at the users of the bitcast. These are
566   // problematic because bitcasts are used to legalize all stores to integer
567   // types.
568   case ISD::BITCAST:
569     return false;
570   case ISD::INTRINSIC_WO_CHAIN: {
571     switch (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()) {
572     case Intrinsic::amdgcn_interp_p1:
573     case Intrinsic::amdgcn_interp_p2:
574     case Intrinsic::amdgcn_interp_mov:
575     case Intrinsic::amdgcn_interp_p1_f16:
576     case Intrinsic::amdgcn_interp_p2_f16:
577       return false;
578     default:
579       return true;
580     }
581   }
582   default:
583     return true;
584   }
585 }
586 
587 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N,
588                                                  unsigned CostThreshold) {
589   // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
590   // it is truly free to use a source modifier in all cases. If there are
591   // multiple users but for each one will necessitate using VOP3, there will be
592   // a code size increase. Try to avoid increasing code size unless we know it
593   // will save on the instruction count.
594   unsigned NumMayIncreaseSize = 0;
595   MVT VT = N->getValueType(0).getScalarType().getSimpleVT();
596 
597   // XXX - Should this limit number of uses to check?
598   for (const SDNode *U : N->uses()) {
599     if (!hasSourceMods(U))
600       return false;
601 
602     if (!opMustUseVOP3Encoding(U, VT)) {
603       if (++NumMayIncreaseSize > CostThreshold)
604         return false;
605     }
606   }
607 
608   return true;
609 }
610 
611 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
612   return MVT::i32;
613 }
614 
615 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
616   return true;
617 }
618 
619 // The backend supports 32 and 64 bit floating point immediates.
620 // FIXME: Why are we reporting vectors of FP immediates as legal?
621 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
622                                         bool ForCodeSize) const {
623   EVT ScalarVT = VT.getScalarType();
624   return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 ||
625          (ScalarVT == MVT::f16 && Subtarget->has16BitInsts()));
626 }
627 
628 // We don't want to shrink f64 / f32 constants.
629 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
630   EVT ScalarVT = VT.getScalarType();
631   return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
632 }
633 
634 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
635                                                  ISD::LoadExtType ExtTy,
636                                                  EVT NewVT) const {
637   // TODO: This may be worth removing. Check regression tests for diffs.
638   if (!TargetLoweringBase::shouldReduceLoadWidth(N, ExtTy, NewVT))
639     return false;
640 
641   unsigned NewSize = NewVT.getStoreSizeInBits();
642 
643   // If we are reducing to a 32-bit load or a smaller multi-dword load,
644   // this is always better.
645   if (NewSize >= 32)
646     return true;
647 
648   EVT OldVT = N->getValueType(0);
649   unsigned OldSize = OldVT.getStoreSizeInBits();
650 
651   MemSDNode *MN = cast<MemSDNode>(N);
652   unsigned AS = MN->getAddressSpace();
653   // Do not shrink an aligned scalar load to sub-dword.
654   // Scalar engine cannot do sub-dword loads.
655   if (OldSize >= 32 && NewSize < 32 && MN->getAlignment() >= 4 &&
656       (AS == AMDGPUAS::CONSTANT_ADDRESS ||
657        AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
658        (isa<LoadSDNode>(N) &&
659         AS == AMDGPUAS::GLOBAL_ADDRESS && MN->isInvariant())) &&
660       AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand()))
661     return false;
662 
663   // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
664   // extloads, so doing one requires using a buffer_load. In cases where we
665   // still couldn't use a scalar load, using the wider load shouldn't really
666   // hurt anything.
667 
668   // If the old size already had to be an extload, there's no harm in continuing
669   // to reduce the width.
670   return (OldSize < 32);
671 }
672 
673 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy,
674                                                    const SelectionDAG &DAG,
675                                                    const MachineMemOperand &MMO) const {
676 
677   assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits());
678 
679   if (LoadTy.getScalarType() == MVT::i32)
680     return false;
681 
682   unsigned LScalarSize = LoadTy.getScalarSizeInBits();
683   unsigned CastScalarSize = CastTy.getScalarSizeInBits();
684 
685   if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32))
686     return false;
687 
688   bool Fast = false;
689   return allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
690                                         CastTy, MMO, &Fast) &&
691          Fast;
692 }
693 
694 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
695 // profitable with the expansion for 64-bit since it's generally good to
696 // speculate things.
697 // FIXME: These should really have the size as a parameter.
698 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
699   return true;
700 }
701 
702 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
703   return true;
704 }
705 
706 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode * N) const {
707   switch (N->getOpcode()) {
708     default:
709     return false;
710     case ISD::EntryToken:
711     case ISD::TokenFactor:
712       return true;
713     case ISD::INTRINSIC_WO_CHAIN:
714     {
715       unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
716       switch (IntrID) {
717         default:
718         return false;
719         case Intrinsic::amdgcn_readfirstlane:
720         case Intrinsic::amdgcn_readlane:
721           return true;
722       }
723     }
724     break;
725     case ISD::LOAD:
726     {
727       if (cast<LoadSDNode>(N)->getMemOperand()->getAddrSpace() ==
728           AMDGPUAS::CONSTANT_ADDRESS_32BIT)
729         return true;
730       return false;
731     }
732     break;
733   }
734 }
735 
736 char AMDGPUTargetLowering::isNegatibleForFree(SDValue Op, SelectionDAG &DAG,
737                                               bool LegalOperations,
738                                               bool ForCodeSize,
739                                               unsigned Depth) const {
740   switch (Op.getOpcode()) {
741     case ISD::FMA:
742     case ISD::FMAD: {
743       // Negating a fma is not free if it has users without source mods.
744       if (!allUsesHaveSourceMods(Op.getNode()))
745         return 0;
746       break;
747     }
748     default:
749       break;
750   }
751 
752   return TargetLowering::isNegatibleForFree(Op, DAG, LegalOperations,
753                                             ForCodeSize, Depth);
754 }
755 
756 //===---------------------------------------------------------------------===//
757 // Target Properties
758 //===---------------------------------------------------------------------===//
759 
760 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
761   assert(VT.isFloatingPoint());
762 
763   // Packed operations do not have a fabs modifier.
764   return VT == MVT::f32 || VT == MVT::f64 ||
765          (Subtarget->has16BitInsts() && VT == MVT::f16);
766 }
767 
768 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
769   assert(VT.isFloatingPoint());
770   return VT == MVT::f32 || VT == MVT::f64 ||
771          (Subtarget->has16BitInsts() && VT == MVT::f16) ||
772          (Subtarget->hasVOP3PInsts() && VT == MVT::v2f16);
773 }
774 
775 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
776                                                          unsigned NumElem,
777                                                          unsigned AS) const {
778   return true;
779 }
780 
781 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
782   // There are few operations which truly have vector input operands. Any vector
783   // operation is going to involve operations on each component, and a
784   // build_vector will be a copy per element, so it always makes sense to use a
785   // build_vector input in place of the extracted element to avoid a copy into a
786   // super register.
787   //
788   // We should probably only do this if all users are extracts only, but this
789   // should be the common case.
790   return true;
791 }
792 
793 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
794   // Truncate is just accessing a subregister.
795 
796   unsigned SrcSize = Source.getSizeInBits();
797   unsigned DestSize = Dest.getSizeInBits();
798 
799   return DestSize < SrcSize && DestSize % 32 == 0 ;
800 }
801 
802 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
803   // Truncate is just accessing a subregister.
804 
805   unsigned SrcSize = Source->getScalarSizeInBits();
806   unsigned DestSize = Dest->getScalarSizeInBits();
807 
808   if (DestSize== 16 && Subtarget->has16BitInsts())
809     return SrcSize >= 32;
810 
811   return DestSize < SrcSize && DestSize % 32 == 0;
812 }
813 
814 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
815   unsigned SrcSize = Src->getScalarSizeInBits();
816   unsigned DestSize = Dest->getScalarSizeInBits();
817 
818   if (SrcSize == 16 && Subtarget->has16BitInsts())
819     return DestSize >= 32;
820 
821   return SrcSize == 32 && DestSize == 64;
822 }
823 
824 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
825   // Any register load of a 64-bit value really requires 2 32-bit moves. For all
826   // practical purposes, the extra mov 0 to load a 64-bit is free.  As used,
827   // this will enable reducing 64-bit operations the 32-bit, which is always
828   // good.
829 
830   if (Src == MVT::i16)
831     return Dest == MVT::i32 ||Dest == MVT::i64 ;
832 
833   return Src == MVT::i32 && Dest == MVT::i64;
834 }
835 
836 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
837   return isZExtFree(Val.getValueType(), VT2);
838 }
839 
840 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
841   // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
842   // limited number of native 64-bit operations. Shrinking an operation to fit
843   // in a single 32-bit register should always be helpful. As currently used,
844   // this is much less general than the name suggests, and is only used in
845   // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
846   // not profitable, and may actually be harmful.
847   return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
848 }
849 
850 //===---------------------------------------------------------------------===//
851 // TargetLowering Callbacks
852 //===---------------------------------------------------------------------===//
853 
854 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC,
855                                                   bool IsVarArg) {
856   switch (CC) {
857   case CallingConv::AMDGPU_VS:
858   case CallingConv::AMDGPU_GS:
859   case CallingConv::AMDGPU_PS:
860   case CallingConv::AMDGPU_CS:
861   case CallingConv::AMDGPU_HS:
862   case CallingConv::AMDGPU_ES:
863   case CallingConv::AMDGPU_LS:
864     return CC_AMDGPU;
865   case CallingConv::C:
866   case CallingConv::Fast:
867   case CallingConv::Cold:
868     return CC_AMDGPU_Func;
869   case CallingConv::AMDGPU_KERNEL:
870   case CallingConv::SPIR_KERNEL:
871   default:
872     report_fatal_error("Unsupported calling convention for call");
873   }
874 }
875 
876 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC,
877                                                     bool IsVarArg) {
878   switch (CC) {
879   case CallingConv::AMDGPU_KERNEL:
880   case CallingConv::SPIR_KERNEL:
881     llvm_unreachable("kernels should not be handled here");
882   case CallingConv::AMDGPU_VS:
883   case CallingConv::AMDGPU_GS:
884   case CallingConv::AMDGPU_PS:
885   case CallingConv::AMDGPU_CS:
886   case CallingConv::AMDGPU_HS:
887   case CallingConv::AMDGPU_ES:
888   case CallingConv::AMDGPU_LS:
889     return RetCC_SI_Shader;
890   case CallingConv::C:
891   case CallingConv::Fast:
892   case CallingConv::Cold:
893     return RetCC_AMDGPU_Func;
894   default:
895     report_fatal_error("Unsupported calling convention.");
896   }
897 }
898 
899 /// The SelectionDAGBuilder will automatically promote function arguments
900 /// with illegal types.  However, this does not work for the AMDGPU targets
901 /// since the function arguments are stored in memory as these illegal types.
902 /// In order to handle this properly we need to get the original types sizes
903 /// from the LLVM IR Function and fixup the ISD:InputArg values before
904 /// passing them to AnalyzeFormalArguments()
905 
906 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting
907 /// input values across multiple registers.  Each item in the Ins array
908 /// represents a single value that will be stored in registers.  Ins[x].VT is
909 /// the value type of the value that will be stored in the register, so
910 /// whatever SDNode we lower the argument to needs to be this type.
911 ///
912 /// In order to correctly lower the arguments we need to know the size of each
913 /// argument.  Since Ins[x].VT gives us the size of the register that will
914 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type
915 /// for the orignal function argument so that we can deduce the correct memory
916 /// type to use for Ins[x].  In most cases the correct memory type will be
917 /// Ins[x].ArgVT.  However, this will not always be the case.  If, for example,
918 /// we have a kernel argument of type v8i8, this argument will be split into
919 /// 8 parts and each part will be represented by its own item in the Ins array.
920 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of
921 /// the argument before it was split.  From this, we deduce that the memory type
922 /// for each individual part is i8.  We pass the memory type as LocVT to the
923 /// calling convention analysis function and the register type (Ins[x].VT) as
924 /// the ValVT.
925 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
926   CCState &State,
927   const SmallVectorImpl<ISD::InputArg> &Ins) const {
928   const MachineFunction &MF = State.getMachineFunction();
929   const Function &Fn = MF.getFunction();
930   LLVMContext &Ctx = Fn.getParent()->getContext();
931   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
932   const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn);
933   CallingConv::ID CC = Fn.getCallingConv();
934 
935   unsigned MaxAlign = 1;
936   uint64_t ExplicitArgOffset = 0;
937   const DataLayout &DL = Fn.getParent()->getDataLayout();
938 
939   unsigned InIndex = 0;
940 
941   for (const Argument &Arg : Fn.args()) {
942     Type *BaseArgTy = Arg.getType();
943     unsigned Align = DL.getABITypeAlignment(BaseArgTy);
944     MaxAlign = std::max(Align, MaxAlign);
945     unsigned AllocSize = DL.getTypeAllocSize(BaseArgTy);
946 
947     uint64_t ArgOffset = alignTo(ExplicitArgOffset, Align) + ExplicitOffset;
948     ExplicitArgOffset = alignTo(ExplicitArgOffset, Align) + AllocSize;
949 
950     // We're basically throwing away everything passed into us and starting over
951     // to get accurate in-memory offsets. The "PartOffset" is completely useless
952     // to us as computed in Ins.
953     //
954     // We also need to figure out what type legalization is trying to do to get
955     // the correct memory offsets.
956 
957     SmallVector<EVT, 16> ValueVTs;
958     SmallVector<uint64_t, 16> Offsets;
959     ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset);
960 
961     for (unsigned Value = 0, NumValues = ValueVTs.size();
962          Value != NumValues; ++Value) {
963       uint64_t BasePartOffset = Offsets[Value];
964 
965       EVT ArgVT = ValueVTs[Value];
966       EVT MemVT = ArgVT;
967       MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT);
968       unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT);
969 
970       if (NumRegs == 1) {
971         // This argument is not split, so the IR type is the memory type.
972         if (ArgVT.isExtended()) {
973           // We have an extended type, like i24, so we should just use the
974           // register type.
975           MemVT = RegisterVT;
976         } else {
977           MemVT = ArgVT;
978         }
979       } else if (ArgVT.isVector() && RegisterVT.isVector() &&
980                  ArgVT.getScalarType() == RegisterVT.getScalarType()) {
981         assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements());
982         // We have a vector value which has been split into a vector with
983         // the same scalar type, but fewer elements.  This should handle
984         // all the floating-point vector types.
985         MemVT = RegisterVT;
986       } else if (ArgVT.isVector() &&
987                  ArgVT.getVectorNumElements() == NumRegs) {
988         // This arg has been split so that each element is stored in a separate
989         // register.
990         MemVT = ArgVT.getScalarType();
991       } else if (ArgVT.isExtended()) {
992         // We have an extended type, like i65.
993         MemVT = RegisterVT;
994       } else {
995         unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs;
996         assert(ArgVT.getStoreSizeInBits() % NumRegs == 0);
997         if (RegisterVT.isInteger()) {
998           MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
999         } else if (RegisterVT.isVector()) {
1000           assert(!RegisterVT.getScalarType().isFloatingPoint());
1001           unsigned NumElements = RegisterVT.getVectorNumElements();
1002           assert(MemoryBits % NumElements == 0);
1003           // This vector type has been split into another vector type with
1004           // a different elements size.
1005           EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
1006                                            MemoryBits / NumElements);
1007           MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements);
1008         } else {
1009           llvm_unreachable("cannot deduce memory type.");
1010         }
1011       }
1012 
1013       // Convert one element vectors to scalar.
1014       if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
1015         MemVT = MemVT.getScalarType();
1016 
1017       // Round up vec3/vec5 argument.
1018       if (MemVT.isVector() && !MemVT.isPow2VectorType()) {
1019         assert(MemVT.getVectorNumElements() == 3 ||
1020                MemVT.getVectorNumElements() == 5);
1021         MemVT = MemVT.getPow2VectorType(State.getContext());
1022       }
1023 
1024       unsigned PartOffset = 0;
1025       for (unsigned i = 0; i != NumRegs; ++i) {
1026         State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT,
1027                                                BasePartOffset + PartOffset,
1028                                                MemVT.getSimpleVT(),
1029                                                CCValAssign::Full));
1030         PartOffset += MemVT.getStoreSize();
1031       }
1032     }
1033   }
1034 }
1035 
1036 SDValue AMDGPUTargetLowering::LowerReturn(
1037   SDValue Chain, CallingConv::ID CallConv,
1038   bool isVarArg,
1039   const SmallVectorImpl<ISD::OutputArg> &Outs,
1040   const SmallVectorImpl<SDValue> &OutVals,
1041   const SDLoc &DL, SelectionDAG &DAG) const {
1042   // FIXME: Fails for r600 tests
1043   //assert(!isVarArg && Outs.empty() && OutVals.empty() &&
1044   // "wave terminate should not have return values");
1045   return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain);
1046 }
1047 
1048 //===---------------------------------------------------------------------===//
1049 // Target specific lowering
1050 //===---------------------------------------------------------------------===//
1051 
1052 /// Selects the correct CCAssignFn for a given CallingConvention value.
1053 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1054                                                     bool IsVarArg) {
1055   return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg);
1056 }
1057 
1058 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1059                                                       bool IsVarArg) {
1060   return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg);
1061 }
1062 
1063 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain,
1064                                                   SelectionDAG &DAG,
1065                                                   MachineFrameInfo &MFI,
1066                                                   int ClobberedFI) const {
1067   SmallVector<SDValue, 8> ArgChains;
1068   int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
1069   int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
1070 
1071   // Include the original chain at the beginning of the list. When this is
1072   // used by target LowerCall hooks, this helps legalize find the
1073   // CALLSEQ_BEGIN node.
1074   ArgChains.push_back(Chain);
1075 
1076   // Add a chain value for each stack argument corresponding
1077   for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
1078                             UE = DAG.getEntryNode().getNode()->use_end();
1079        U != UE; ++U) {
1080     if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) {
1081       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) {
1082         if (FI->getIndex() < 0) {
1083           int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
1084           int64_t InLastByte = InFirstByte;
1085           InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
1086 
1087           if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1088               (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1089             ArgChains.push_back(SDValue(L, 1));
1090         }
1091       }
1092     }
1093   }
1094 
1095   // Build a tokenfactor for all the chains.
1096   return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
1097 }
1098 
1099 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI,
1100                                                  SmallVectorImpl<SDValue> &InVals,
1101                                                  StringRef Reason) const {
1102   SDValue Callee = CLI.Callee;
1103   SelectionDAG &DAG = CLI.DAG;
1104 
1105   const Function &Fn = DAG.getMachineFunction().getFunction();
1106 
1107   StringRef FuncName("<unknown>");
1108 
1109   if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
1110     FuncName = G->getSymbol();
1111   else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1112     FuncName = G->getGlobal()->getName();
1113 
1114   DiagnosticInfoUnsupported NoCalls(
1115     Fn, Reason + FuncName, CLI.DL.getDebugLoc());
1116   DAG.getContext()->diagnose(NoCalls);
1117 
1118   if (!CLI.IsTailCall) {
1119     for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
1120       InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
1121   }
1122 
1123   return DAG.getEntryNode();
1124 }
1125 
1126 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
1127                                         SmallVectorImpl<SDValue> &InVals) const {
1128   return lowerUnhandledCall(CLI, InVals, "unsupported call to function ");
1129 }
1130 
1131 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
1132                                                       SelectionDAG &DAG) const {
1133   const Function &Fn = DAG.getMachineFunction().getFunction();
1134 
1135   DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
1136                                             SDLoc(Op).getDebugLoc());
1137   DAG.getContext()->diagnose(NoDynamicAlloca);
1138   auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
1139   return DAG.getMergeValues(Ops, SDLoc());
1140 }
1141 
1142 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
1143                                              SelectionDAG &DAG) const {
1144   switch (Op.getOpcode()) {
1145   default:
1146     Op->print(errs(), &DAG);
1147     llvm_unreachable("Custom lowering code for this"
1148                      "instruction is not implemented yet!");
1149     break;
1150   case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
1151   case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
1152   case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
1153   case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
1154   case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
1155   case ISD::FREM: return LowerFREM(Op, DAG);
1156   case ISD::FCEIL: return LowerFCEIL(Op, DAG);
1157   case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
1158   case ISD::FRINT: return LowerFRINT(Op, DAG);
1159   case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
1160   case ISD::FROUND: return LowerFROUND(Op, DAG);
1161   case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
1162   case ISD::FLOG:
1163     return LowerFLOG(Op, DAG, 1.0F / numbers::log2ef);
1164   case ISD::FLOG10:
1165     return LowerFLOG(Op, DAG, numbers::ln2f / numbers::ln10f);
1166   case ISD::FEXP:
1167     return lowerFEXP(Op, DAG);
1168   case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
1169   case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
1170   case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
1171   case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
1172   case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
1173   case ISD::CTTZ:
1174   case ISD::CTTZ_ZERO_UNDEF:
1175   case ISD::CTLZ:
1176   case ISD::CTLZ_ZERO_UNDEF:
1177     return LowerCTLZ_CTTZ(Op, DAG);
1178   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1179   }
1180   return Op;
1181 }
1182 
1183 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
1184                                               SmallVectorImpl<SDValue> &Results,
1185                                               SelectionDAG &DAG) const {
1186   switch (N->getOpcode()) {
1187   case ISD::SIGN_EXTEND_INREG:
1188     // Different parts of legalization seem to interpret which type of
1189     // sign_extend_inreg is the one to check for custom lowering. The extended
1190     // from type is what really matters, but some places check for custom
1191     // lowering of the result type. This results in trying to use
1192     // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
1193     // nothing here and let the illegal result integer be handled normally.
1194     return;
1195   default:
1196     return;
1197   }
1198 }
1199 
1200 bool AMDGPUTargetLowering::hasDefinedInitializer(const GlobalValue *GV) {
1201   const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1202   if (!GVar || !GVar->hasInitializer())
1203     return false;
1204 
1205   return !isa<UndefValue>(GVar->getInitializer());
1206 }
1207 
1208 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
1209                                                  SDValue Op,
1210                                                  SelectionDAG &DAG) const {
1211 
1212   const DataLayout &DL = DAG.getDataLayout();
1213   GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
1214   const GlobalValue *GV = G->getGlobal();
1215 
1216   if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1217       G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) {
1218     if (!MFI->isEntryFunction()) {
1219       const Function &Fn = DAG.getMachineFunction().getFunction();
1220       DiagnosticInfoUnsupported BadLDSDecl(
1221         Fn, "local memory global used by non-kernel function", SDLoc(Op).getDebugLoc());
1222       DAG.getContext()->diagnose(BadLDSDecl);
1223     }
1224 
1225     // XXX: What does the value of G->getOffset() mean?
1226     assert(G->getOffset() == 0 &&
1227          "Do not know what to do with an non-zero offset");
1228 
1229     // TODO: We could emit code to handle the initialization somewhere.
1230     if (!hasDefinedInitializer(GV)) {
1231       unsigned Offset = MFI->allocateLDSGlobal(DL, *GV);
1232       return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType());
1233     }
1234   }
1235 
1236   const Function &Fn = DAG.getMachineFunction().getFunction();
1237   DiagnosticInfoUnsupported BadInit(
1238       Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc());
1239   DAG.getContext()->diagnose(BadInit);
1240   return SDValue();
1241 }
1242 
1243 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
1244                                                   SelectionDAG &DAG) const {
1245   SmallVector<SDValue, 8> Args;
1246 
1247   EVT VT = Op.getValueType();
1248   if (VT == MVT::v4i16 || VT == MVT::v4f16) {
1249     SDLoc SL(Op);
1250     SDValue Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(0));
1251     SDValue Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(1));
1252 
1253     SDValue BV = DAG.getBuildVector(MVT::v2i32, SL, { Lo, Hi });
1254     return DAG.getNode(ISD::BITCAST, SL, VT, BV);
1255   }
1256 
1257   for (const SDUse &U : Op->ops())
1258     DAG.ExtractVectorElements(U.get(), Args);
1259 
1260   return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1261 }
1262 
1263 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
1264                                                      SelectionDAG &DAG) const {
1265 
1266   SmallVector<SDValue, 8> Args;
1267   unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1268   EVT VT = Op.getValueType();
1269   DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
1270                             VT.getVectorNumElements());
1271 
1272   return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1273 }
1274 
1275 /// Generate Min/Max node
1276 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT,
1277                                                    SDValue LHS, SDValue RHS,
1278                                                    SDValue True, SDValue False,
1279                                                    SDValue CC,
1280                                                    DAGCombinerInfo &DCI) const {
1281   if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
1282     return SDValue();
1283 
1284   SelectionDAG &DAG = DCI.DAG;
1285   ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1286   switch (CCOpcode) {
1287   case ISD::SETOEQ:
1288   case ISD::SETONE:
1289   case ISD::SETUNE:
1290   case ISD::SETNE:
1291   case ISD::SETUEQ:
1292   case ISD::SETEQ:
1293   case ISD::SETFALSE:
1294   case ISD::SETFALSE2:
1295   case ISD::SETTRUE:
1296   case ISD::SETTRUE2:
1297   case ISD::SETUO:
1298   case ISD::SETO:
1299     break;
1300   case ISD::SETULE:
1301   case ISD::SETULT: {
1302     if (LHS == True)
1303       return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1304     return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1305   }
1306   case ISD::SETOLE:
1307   case ISD::SETOLT:
1308   case ISD::SETLE:
1309   case ISD::SETLT: {
1310     // Ordered. Assume ordered for undefined.
1311 
1312     // Only do this after legalization to avoid interfering with other combines
1313     // which might occur.
1314     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1315         !DCI.isCalledByLegalizer())
1316       return SDValue();
1317 
1318     // We need to permute the operands to get the correct NaN behavior. The
1319     // selected operand is the second one based on the failing compare with NaN,
1320     // so permute it based on the compare type the hardware uses.
1321     if (LHS == True)
1322       return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1323     return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1324   }
1325   case ISD::SETUGE:
1326   case ISD::SETUGT: {
1327     if (LHS == True)
1328       return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1329     return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1330   }
1331   case ISD::SETGT:
1332   case ISD::SETGE:
1333   case ISD::SETOGE:
1334   case ISD::SETOGT: {
1335     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1336         !DCI.isCalledByLegalizer())
1337       return SDValue();
1338 
1339     if (LHS == True)
1340       return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1341     return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1342   }
1343   case ISD::SETCC_INVALID:
1344     llvm_unreachable("Invalid setcc condcode!");
1345   }
1346   return SDValue();
1347 }
1348 
1349 std::pair<SDValue, SDValue>
1350 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
1351   SDLoc SL(Op);
1352 
1353   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1354 
1355   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1356   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1357 
1358   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1359   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1360 
1361   return std::make_pair(Lo, Hi);
1362 }
1363 
1364 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
1365   SDLoc SL(Op);
1366 
1367   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1368   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1369   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1370 }
1371 
1372 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
1373   SDLoc SL(Op);
1374 
1375   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1376   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1377   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1378 }
1379 
1380 // Split a vector type into two parts. The first part is a power of two vector.
1381 // The second part is whatever is left over, and is a scalar if it would
1382 // otherwise be a 1-vector.
1383 std::pair<EVT, EVT>
1384 AMDGPUTargetLowering::getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const {
1385   EVT LoVT, HiVT;
1386   EVT EltVT = VT.getVectorElementType();
1387   unsigned NumElts = VT.getVectorNumElements();
1388   unsigned LoNumElts = PowerOf2Ceil((NumElts + 1) / 2);
1389   LoVT = EVT::getVectorVT(*DAG.getContext(), EltVT, LoNumElts);
1390   HiVT = NumElts - LoNumElts == 1
1391              ? EltVT
1392              : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts);
1393   return std::make_pair(LoVT, HiVT);
1394 }
1395 
1396 // Split a vector value into two parts of types LoVT and HiVT. HiVT could be
1397 // scalar.
1398 std::pair<SDValue, SDValue>
1399 AMDGPUTargetLowering::splitVector(const SDValue &N, const SDLoc &DL,
1400                                   const EVT &LoVT, const EVT &HiVT,
1401                                   SelectionDAG &DAG) const {
1402   assert(LoVT.getVectorNumElements() +
1403                  (HiVT.isVector() ? HiVT.getVectorNumElements() : 1) <=
1404              N.getValueType().getVectorNumElements() &&
1405          "More vector elements requested than available!");
1406   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
1407                            DAG.getVectorIdxConstant(0, DL));
1408   SDValue Hi = DAG.getNode(
1409       HiVT.isVector() ? ISD::EXTRACT_SUBVECTOR : ISD::EXTRACT_VECTOR_ELT, DL,
1410       HiVT, N, DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), DL));
1411   return std::make_pair(Lo, Hi);
1412 }
1413 
1414 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1415                                               SelectionDAG &DAG) const {
1416   LoadSDNode *Load = cast<LoadSDNode>(Op);
1417   EVT VT = Op.getValueType();
1418   SDLoc SL(Op);
1419 
1420 
1421   // If this is a 2 element vector, we really want to scalarize and not create
1422   // weird 1 element vectors.
1423   if (VT.getVectorNumElements() == 2) {
1424     SDValue Ops[2];
1425     std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
1426     return DAG.getMergeValues(Ops, SL);
1427   }
1428 
1429   SDValue BasePtr = Load->getBasePtr();
1430   EVT MemVT = Load->getMemoryVT();
1431 
1432   const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1433 
1434   EVT LoVT, HiVT;
1435   EVT LoMemVT, HiMemVT;
1436   SDValue Lo, Hi;
1437 
1438   std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1439   std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1440   std::tie(Lo, Hi) = splitVector(Op, SL, LoVT, HiVT, DAG);
1441 
1442   unsigned Size = LoMemVT.getStoreSize();
1443   unsigned BaseAlign = Load->getAlignment();
1444   unsigned HiAlign = MinAlign(BaseAlign, Size);
1445 
1446   SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1447                                   Load->getChain(), BasePtr, SrcValue, LoMemVT,
1448                                   BaseAlign, Load->getMemOperand()->getFlags());
1449   SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, Size);
1450   SDValue HiLoad =
1451       DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
1452                      HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1453                      HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
1454 
1455   SDValue Join;
1456   if (LoVT == HiVT) {
1457     // This is the case that the vector is power of two so was evenly split.
1458     Join = DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad);
1459   } else {
1460     Join = DAG.getNode(ISD::INSERT_SUBVECTOR, SL, VT, DAG.getUNDEF(VT), LoLoad,
1461                        DAG.getVectorIdxConstant(0, SL));
1462     Join = DAG.getNode(
1463         HiVT.isVector() ? ISD::INSERT_SUBVECTOR : ISD::INSERT_VECTOR_ELT, SL,
1464         VT, Join, HiLoad,
1465         DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), SL));
1466   }
1467 
1468   SDValue Ops[] = {Join, DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1469                                      LoLoad.getValue(1), HiLoad.getValue(1))};
1470 
1471   return DAG.getMergeValues(Ops, SL);
1472 }
1473 
1474 // Widen a vector load from vec3 to vec4.
1475 SDValue AMDGPUTargetLowering::WidenVectorLoad(SDValue Op,
1476                                               SelectionDAG &DAG) const {
1477   LoadSDNode *Load = cast<LoadSDNode>(Op);
1478   EVT VT = Op.getValueType();
1479   assert(VT.getVectorNumElements() == 3);
1480   SDValue BasePtr = Load->getBasePtr();
1481   EVT MemVT = Load->getMemoryVT();
1482   SDLoc SL(Op);
1483   const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1484   unsigned BaseAlign = Load->getAlignment();
1485 
1486   EVT WideVT =
1487       EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
1488   EVT WideMemVT =
1489       EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 4);
1490   SDValue WideLoad = DAG.getExtLoad(
1491       Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue,
1492       WideMemVT, BaseAlign, Load->getMemOperand()->getFlags());
1493   return DAG.getMergeValues(
1494       {DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, VT, WideLoad,
1495                    DAG.getVectorIdxConstant(0, SL)),
1496        WideLoad.getValue(1)},
1497       SL);
1498 }
1499 
1500 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1501                                                SelectionDAG &DAG) const {
1502   StoreSDNode *Store = cast<StoreSDNode>(Op);
1503   SDValue Val = Store->getValue();
1504   EVT VT = Val.getValueType();
1505 
1506   // If this is a 2 element vector, we really want to scalarize and not create
1507   // weird 1 element vectors.
1508   if (VT.getVectorNumElements() == 2)
1509     return scalarizeVectorStore(Store, DAG);
1510 
1511   EVT MemVT = Store->getMemoryVT();
1512   SDValue Chain = Store->getChain();
1513   SDValue BasePtr = Store->getBasePtr();
1514   SDLoc SL(Op);
1515 
1516   EVT LoVT, HiVT;
1517   EVT LoMemVT, HiMemVT;
1518   SDValue Lo, Hi;
1519 
1520   std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1521   std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1522   std::tie(Lo, Hi) = splitVector(Val, SL, LoVT, HiVT, DAG);
1523 
1524   SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize());
1525 
1526   const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1527   unsigned BaseAlign = Store->getAlignment();
1528   unsigned Size = LoMemVT.getStoreSize();
1529   unsigned HiAlign = MinAlign(BaseAlign, Size);
1530 
1531   SDValue LoStore =
1532       DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1533                         Store->getMemOperand()->getFlags());
1534   SDValue HiStore =
1535       DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size),
1536                         HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
1537 
1538   return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1539 }
1540 
1541 // This is a shortcut for integer division because we have fast i32<->f32
1542 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1543 // float is enough to accurately represent up to a 24-bit signed integer.
1544 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG,
1545                                             bool Sign) const {
1546   SDLoc DL(Op);
1547   EVT VT = Op.getValueType();
1548   SDValue LHS = Op.getOperand(0);
1549   SDValue RHS = Op.getOperand(1);
1550   MVT IntVT = MVT::i32;
1551   MVT FltVT = MVT::f32;
1552 
1553   unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS);
1554   if (LHSSignBits < 9)
1555     return SDValue();
1556 
1557   unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS);
1558   if (RHSSignBits < 9)
1559     return SDValue();
1560 
1561   unsigned BitSize = VT.getSizeInBits();
1562   unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1563   unsigned DivBits = BitSize - SignBits;
1564   if (Sign)
1565     ++DivBits;
1566 
1567   ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1568   ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1569 
1570   SDValue jq = DAG.getConstant(1, DL, IntVT);
1571 
1572   if (Sign) {
1573     // char|short jq = ia ^ ib;
1574     jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1575 
1576     // jq = jq >> (bitsize - 2)
1577     jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1578                      DAG.getConstant(BitSize - 2, DL, VT));
1579 
1580     // jq = jq | 0x1
1581     jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1582   }
1583 
1584   // int ia = (int)LHS;
1585   SDValue ia = LHS;
1586 
1587   // int ib, (int)RHS;
1588   SDValue ib = RHS;
1589 
1590   // float fa = (float)ia;
1591   SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1592 
1593   // float fb = (float)ib;
1594   SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1595 
1596   SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1597                            fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1598 
1599   // fq = trunc(fq);
1600   fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1601 
1602   // float fqneg = -fq;
1603   SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1604 
1605   MachineFunction &MF = DAG.getMachineFunction();
1606   const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
1607 
1608   // float fr = mad(fqneg, fb, fa);
1609   unsigned OpCode = MFI->getMode().allFP32Denormals() ?
1610                     (unsigned)AMDGPUISD::FMAD_FTZ :
1611                     (unsigned)ISD::FMAD;
1612   SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa);
1613 
1614   // int iq = (int)fq;
1615   SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1616 
1617   // fr = fabs(fr);
1618   fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1619 
1620   // fb = fabs(fb);
1621   fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1622 
1623   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1624 
1625   // int cv = fr >= fb;
1626   SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1627 
1628   // jq = (cv ? jq : 0);
1629   jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1630 
1631   // dst = iq + jq;
1632   SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1633 
1634   // Rem needs compensation, it's easier to recompute it
1635   SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1636   Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1637 
1638   // Truncate to number of bits this divide really is.
1639   if (Sign) {
1640     SDValue InRegSize
1641       = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits));
1642     Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize);
1643     Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize);
1644   } else {
1645     SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
1646     Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask);
1647     Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask);
1648   }
1649 
1650   return DAG.getMergeValues({ Div, Rem }, DL);
1651 }
1652 
1653 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1654                                       SelectionDAG &DAG,
1655                                       SmallVectorImpl<SDValue> &Results) const {
1656   SDLoc DL(Op);
1657   EVT VT = Op.getValueType();
1658 
1659   assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64");
1660 
1661   EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1662 
1663   SDValue One = DAG.getConstant(1, DL, HalfVT);
1664   SDValue Zero = DAG.getConstant(0, DL, HalfVT);
1665 
1666   //HiLo split
1667   SDValue LHS = Op.getOperand(0);
1668   SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1669   SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, One);
1670 
1671   SDValue RHS = Op.getOperand(1);
1672   SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1673   SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, One);
1674 
1675   if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1676       DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1677 
1678     SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1679                               LHS_Lo, RHS_Lo);
1680 
1681     SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero});
1682     SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero});
1683 
1684     Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
1685     Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
1686     return;
1687   }
1688 
1689   if (isTypeLegal(MVT::i64)) {
1690     MachineFunction &MF = DAG.getMachineFunction();
1691     const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1692 
1693     // Compute denominator reciprocal.
1694     unsigned FMAD = MFI->getMode().allFP32Denormals() ?
1695                     (unsigned)AMDGPUISD::FMAD_FTZ :
1696                     (unsigned)ISD::FMAD;
1697 
1698     SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo);
1699     SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi);
1700     SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi,
1701       DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32),
1702       Cvt_Lo);
1703     SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1);
1704     SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp,
1705       DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32));
1706     SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1,
1707       DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32));
1708     SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2);
1709     SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc,
1710       DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32),
1711       Mul1);
1712     SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2);
1713     SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc);
1714     SDValue Rcp64 = DAG.getBitcast(VT,
1715                         DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi}));
1716 
1717     SDValue Zero64 = DAG.getConstant(0, DL, VT);
1718     SDValue One64  = DAG.getConstant(1, DL, VT);
1719     SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1);
1720     SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1);
1721 
1722     SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS);
1723     SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64);
1724     SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1);
1725     SDValue Mulhi1_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1,
1726                                     Zero);
1727     SDValue Mulhi1_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1,
1728                                     One);
1729 
1730     SDValue Add1_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Lo,
1731                                   Mulhi1_Lo, Zero1);
1732     SDValue Add1_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Hi,
1733                                   Mulhi1_Hi, Add1_Lo.getValue(1));
1734     SDValue Add1_HiNc = DAG.getNode(ISD::ADD, DL, HalfVT, Rcp_Hi, Mulhi1_Hi);
1735     SDValue Add1 = DAG.getBitcast(VT,
1736                         DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi}));
1737 
1738     SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1);
1739     SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2);
1740     SDValue Mulhi2_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2,
1741                                     Zero);
1742     SDValue Mulhi2_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2,
1743                                     One);
1744 
1745     SDValue Add2_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Lo,
1746                                   Mulhi2_Lo, Zero1);
1747     SDValue Add2_HiC = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_HiNc,
1748                                    Mulhi2_Hi, Add1_Lo.getValue(1));
1749     SDValue Add2_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add2_HiC,
1750                                   Zero, Add2_Lo.getValue(1));
1751     SDValue Add2 = DAG.getBitcast(VT,
1752                         DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi}));
1753     SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2);
1754 
1755     SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3);
1756 
1757     SDValue Mul3_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, Zero);
1758     SDValue Mul3_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, One);
1759     SDValue Sub1_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Lo,
1760                                   Mul3_Lo, Zero1);
1761     SDValue Sub1_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Hi,
1762                                   Mul3_Hi, Sub1_Lo.getValue(1));
1763     SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi);
1764     SDValue Sub1 = DAG.getBitcast(VT,
1765                         DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi}));
1766 
1767     SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT);
1768     SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero,
1769                                  ISD::SETUGE);
1770     SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero,
1771                                  ISD::SETUGE);
1772     SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ);
1773 
1774     // TODO: Here and below portions of the code can be enclosed into if/endif.
1775     // Currently control flow is unconditional and we have 4 selects after
1776     // potential endif to substitute PHIs.
1777 
1778     // if C3 != 0 ...
1779     SDValue Sub2_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Lo,
1780                                   RHS_Lo, Zero1);
1781     SDValue Sub2_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Mi,
1782                                   RHS_Hi, Sub1_Lo.getValue(1));
1783     SDValue Sub2_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1784                                   Zero, Sub2_Lo.getValue(1));
1785     SDValue Sub2 = DAG.getBitcast(VT,
1786                         DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi}));
1787 
1788     SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64);
1789 
1790     SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero,
1791                                  ISD::SETUGE);
1792     SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero,
1793                                  ISD::SETUGE);
1794     SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ);
1795 
1796     // if (C6 != 0)
1797     SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64);
1798 
1799     SDValue Sub3_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Lo,
1800                                   RHS_Lo, Zero1);
1801     SDValue Sub3_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1802                                   RHS_Hi, Sub2_Lo.getValue(1));
1803     SDValue Sub3_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub3_Mi,
1804                                   Zero, Sub3_Lo.getValue(1));
1805     SDValue Sub3 = DAG.getBitcast(VT,
1806                         DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi}));
1807 
1808     // endif C6
1809     // endif C3
1810 
1811     SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE);
1812     SDValue Div  = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE);
1813 
1814     SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE);
1815     SDValue Rem  = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE);
1816 
1817     Results.push_back(Div);
1818     Results.push_back(Rem);
1819 
1820     return;
1821   }
1822 
1823   // r600 expandion.
1824   // Get Speculative values
1825   SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
1826   SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
1827 
1828   SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ);
1829   SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero});
1830   REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
1831 
1832   SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ);
1833   SDValue DIV_Lo = Zero;
1834 
1835   const unsigned halfBitWidth = HalfVT.getSizeInBits();
1836 
1837   for (unsigned i = 0; i < halfBitWidth; ++i) {
1838     const unsigned bitPos = halfBitWidth - i - 1;
1839     SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
1840     // Get value of high bit
1841     SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
1842     HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One);
1843     HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
1844 
1845     // Shift
1846     REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
1847     // Add LHS high bit
1848     REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
1849 
1850     SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
1851     SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE);
1852 
1853     DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
1854 
1855     // Update REM
1856     SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
1857     REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
1858   }
1859 
1860   SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
1861   DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
1862   Results.push_back(DIV);
1863   Results.push_back(REM);
1864 }
1865 
1866 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1867                                            SelectionDAG &DAG) const {
1868   SDLoc DL(Op);
1869   EVT VT = Op.getValueType();
1870 
1871   if (VT == MVT::i64) {
1872     SmallVector<SDValue, 2> Results;
1873     LowerUDIVREM64(Op, DAG, Results);
1874     return DAG.getMergeValues(Results, DL);
1875   }
1876 
1877   if (VT == MVT::i32) {
1878     if (SDValue Res = LowerDIVREM24(Op, DAG, false))
1879       return Res;
1880   }
1881 
1882   SDValue Num = Op.getOperand(0);
1883   SDValue Den = Op.getOperand(1);
1884 
1885   // RCP =  URECIP(Den) = 2^32 / Den + e
1886   // e is rounding error.
1887   SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1888 
1889   // RCP_LO = mul(RCP, Den) */
1890   SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den);
1891 
1892   // RCP_HI = mulhu (RCP, Den) */
1893   SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1894 
1895   // NEG_RCP_LO = -RCP_LO
1896   SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
1897                                                      RCP_LO);
1898 
1899   // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
1900   SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1901                                            NEG_RCP_LO, RCP_LO,
1902                                            ISD::SETEQ);
1903   // Calculate the rounding error from the URECIP instruction
1904   // E = mulhu(ABS_RCP_LO, RCP)
1905   SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1906 
1907   // RCP_A_E = RCP + E
1908   SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1909 
1910   // RCP_S_E = RCP - E
1911   SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1912 
1913   // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
1914   SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1915                                      RCP_A_E, RCP_S_E,
1916                                      ISD::SETEQ);
1917   // Quotient = mulhu(Tmp0, Num)
1918   SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1919 
1920   // Num_S_Remainder = Quotient * Den
1921   SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den);
1922 
1923   // Remainder = Num - Num_S_Remainder
1924   SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1925 
1926   // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1927   SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1928                                                  DAG.getConstant(-1, DL, VT),
1929                                                  DAG.getConstant(0, DL, VT),
1930                                                  ISD::SETUGE);
1931   // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1932   SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1933                                                   Num_S_Remainder,
1934                                                   DAG.getConstant(-1, DL, VT),
1935                                                   DAG.getConstant(0, DL, VT),
1936                                                   ISD::SETUGE);
1937   // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1938   SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1939                                                Remainder_GE_Zero);
1940 
1941   // Calculate Division result:
1942 
1943   // Quotient_A_One = Quotient + 1
1944   SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1945                                        DAG.getConstant(1, DL, VT));
1946 
1947   // Quotient_S_One = Quotient - 1
1948   SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1949                                        DAG.getConstant(1, DL, VT));
1950 
1951   // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1952   SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1953                                      Quotient, Quotient_A_One, ISD::SETEQ);
1954 
1955   // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1956   Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1957                             Quotient_S_One, Div, ISD::SETEQ);
1958 
1959   // Calculate Rem result:
1960 
1961   // Remainder_S_Den = Remainder - Den
1962   SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1963 
1964   // Remainder_A_Den = Remainder + Den
1965   SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1966 
1967   // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
1968   SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1969                                     Remainder, Remainder_S_Den, ISD::SETEQ);
1970 
1971   // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
1972   Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1973                             Remainder_A_Den, Rem, ISD::SETEQ);
1974   SDValue Ops[2] = {
1975     Div,
1976     Rem
1977   };
1978   return DAG.getMergeValues(Ops, DL);
1979 }
1980 
1981 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
1982                                            SelectionDAG &DAG) const {
1983   SDLoc DL(Op);
1984   EVT VT = Op.getValueType();
1985 
1986   SDValue LHS = Op.getOperand(0);
1987   SDValue RHS = Op.getOperand(1);
1988 
1989   SDValue Zero = DAG.getConstant(0, DL, VT);
1990   SDValue NegOne = DAG.getConstant(-1, DL, VT);
1991 
1992   if (VT == MVT::i32) {
1993     if (SDValue Res = LowerDIVREM24(Op, DAG, true))
1994       return Res;
1995   }
1996 
1997   if (VT == MVT::i64 &&
1998       DAG.ComputeNumSignBits(LHS) > 32 &&
1999       DAG.ComputeNumSignBits(RHS) > 32) {
2000     EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
2001 
2002     //HiLo split
2003     SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
2004     SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
2005     SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
2006                                  LHS_Lo, RHS_Lo);
2007     SDValue Res[2] = {
2008       DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
2009       DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
2010     };
2011     return DAG.getMergeValues(Res, DL);
2012   }
2013 
2014   SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
2015   SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
2016   SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
2017   SDValue RSign = LHSign; // Remainder sign is the same as LHS
2018 
2019   LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
2020   RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
2021 
2022   LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
2023   RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
2024 
2025   SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
2026   SDValue Rem = Div.getValue(1);
2027 
2028   Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
2029   Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
2030 
2031   Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
2032   Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
2033 
2034   SDValue Res[2] = {
2035     Div,
2036     Rem
2037   };
2038   return DAG.getMergeValues(Res, DL);
2039 }
2040 
2041 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y))
2042 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
2043   SDLoc SL(Op);
2044   EVT VT = Op.getValueType();
2045   SDValue X = Op.getOperand(0);
2046   SDValue Y = Op.getOperand(1);
2047 
2048   // TODO: Should this propagate fast-math-flags?
2049 
2050   SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y);
2051   SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div);
2052   SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y);
2053 
2054   return DAG.getNode(ISD::FSUB, SL, VT, X, Mul);
2055 }
2056 
2057 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
2058   SDLoc SL(Op);
2059   SDValue Src = Op.getOperand(0);
2060 
2061   // result = trunc(src)
2062   // if (src > 0.0 && src != result)
2063   //   result += 1.0
2064 
2065   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2066 
2067   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2068   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
2069 
2070   EVT SetCCVT =
2071       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2072 
2073   SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
2074   SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2075   SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2076 
2077   SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
2078   // TODO: Should this propagate fast-math-flags?
2079   return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2080 }
2081 
2082 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
2083                                   SelectionDAG &DAG) {
2084   const unsigned FractBits = 52;
2085   const unsigned ExpBits = 11;
2086 
2087   SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
2088                                 Hi,
2089                                 DAG.getConstant(FractBits - 32, SL, MVT::i32),
2090                                 DAG.getConstant(ExpBits, SL, MVT::i32));
2091   SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
2092                             DAG.getConstant(1023, SL, MVT::i32));
2093 
2094   return Exp;
2095 }
2096 
2097 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
2098   SDLoc SL(Op);
2099   SDValue Src = Op.getOperand(0);
2100 
2101   assert(Op.getValueType() == MVT::f64);
2102 
2103   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2104   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2105 
2106   SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2107 
2108   // Extract the upper half, since this is where we will find the sign and
2109   // exponent.
2110   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
2111 
2112   SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2113 
2114   const unsigned FractBits = 52;
2115 
2116   // Extract the sign bit.
2117   const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
2118   SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
2119 
2120   // Extend back to 64-bits.
2121   SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
2122   SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
2123 
2124   SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
2125   const SDValue FractMask
2126     = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2127 
2128   SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
2129   SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
2130   SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
2131 
2132   EVT SetCCVT =
2133       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2134 
2135   const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
2136 
2137   SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2138   SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2139 
2140   SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
2141   SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
2142 
2143   return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
2144 }
2145 
2146 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
2147   SDLoc SL(Op);
2148   SDValue Src = Op.getOperand(0);
2149 
2150   assert(Op.getValueType() == MVT::f64);
2151 
2152   APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
2153   SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
2154   SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
2155 
2156   // TODO: Should this propagate fast-math-flags?
2157 
2158   SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
2159   SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
2160 
2161   SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
2162 
2163   APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
2164   SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
2165 
2166   EVT SetCCVT =
2167       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2168   SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
2169 
2170   return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
2171 }
2172 
2173 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
2174   // FNEARBYINT and FRINT are the same, except in their handling of FP
2175   // exceptions. Those aren't really meaningful for us, and OpenCL only has
2176   // rint, so just treat them as equivalent.
2177   return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
2178 }
2179 
2180 // XXX - May require not supporting f32 denormals?
2181 
2182 // Don't handle v2f16. The extra instructions to scalarize and repack around the
2183 // compare and vselect end up producing worse code than scalarizing the whole
2184 // operation.
2185 SDValue AMDGPUTargetLowering::LowerFROUND_LegalFTRUNC(SDValue Op,
2186                                                       SelectionDAG &DAG) const {
2187   SDLoc SL(Op);
2188   SDValue X = Op.getOperand(0);
2189   EVT VT = Op.getValueType();
2190 
2191   SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X);
2192 
2193   // TODO: Should this propagate fast-math-flags?
2194 
2195   SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T);
2196 
2197   SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff);
2198 
2199   const SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2200   const SDValue One = DAG.getConstantFP(1.0, SL, VT);
2201   const SDValue Half = DAG.getConstantFP(0.5, SL, VT);
2202 
2203   SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X);
2204 
2205   EVT SetCCVT =
2206       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2207 
2208   SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
2209 
2210   SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero);
2211 
2212   return DAG.getNode(ISD::FADD, SL, VT, T, Sel);
2213 }
2214 
2215 SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const {
2216   SDLoc SL(Op);
2217   SDValue X = Op.getOperand(0);
2218 
2219   SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X);
2220 
2221   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2222   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2223   const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32);
2224   const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32);
2225   EVT SetCCVT =
2226       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2227 
2228   SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
2229 
2230   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One);
2231 
2232   SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2233 
2234   const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL,
2235                                        MVT::i64);
2236 
2237   SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp);
2238   SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64,
2239                           DAG.getConstant(INT64_C(0x0008000000000000), SL,
2240                                           MVT::i64),
2241                           Exp);
2242 
2243   SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M);
2244   SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT,
2245                               DAG.getConstant(0, SL, MVT::i64), Tmp0,
2246                               ISD::SETNE);
2247 
2248   SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1,
2249                              D, DAG.getConstant(0, SL, MVT::i64));
2250   SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2);
2251 
2252   K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64));
2253   K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K);
2254 
2255   SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2256   SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2257   SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ);
2258 
2259   SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64,
2260                             ExpEqNegOne,
2261                             DAG.getConstantFP(1.0, SL, MVT::f64),
2262                             DAG.getConstantFP(0.0, SL, MVT::f64));
2263 
2264   SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X);
2265 
2266   K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K);
2267   K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K);
2268 
2269   return K;
2270 }
2271 
2272 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2273   EVT VT = Op.getValueType();
2274 
2275   if (isOperationLegal(ISD::FTRUNC, VT))
2276     return LowerFROUND_LegalFTRUNC(Op, DAG);
2277 
2278   if (VT == MVT::f64)
2279     return LowerFROUND64(Op, DAG);
2280 
2281   llvm_unreachable("unhandled type");
2282 }
2283 
2284 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
2285   SDLoc SL(Op);
2286   SDValue Src = Op.getOperand(0);
2287 
2288   // result = trunc(src);
2289   // if (src < 0.0 && src != result)
2290   //   result += -1.0.
2291 
2292   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2293 
2294   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2295   const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
2296 
2297   EVT SetCCVT =
2298       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2299 
2300   SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
2301   SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2302   SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2303 
2304   SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
2305   // TODO: Should this propagate fast-math-flags?
2306   return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2307 }
2308 
2309 SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG,
2310                                         double Log2BaseInverted) const {
2311   EVT VT = Op.getValueType();
2312 
2313   SDLoc SL(Op);
2314   SDValue Operand = Op.getOperand(0);
2315   SDValue Log2Operand = DAG.getNode(ISD::FLOG2, SL, VT, Operand);
2316   SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT);
2317 
2318   return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand);
2319 }
2320 
2321 // exp2(M_LOG2E_F * f);
2322 SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const {
2323   EVT VT = Op.getValueType();
2324   SDLoc SL(Op);
2325   SDValue Src = Op.getOperand(0);
2326 
2327   const SDValue K = DAG.getConstantFP(numbers::log2e, SL, VT);
2328   SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Src, K, Op->getFlags());
2329   return DAG.getNode(ISD::FEXP2, SL, VT, Mul, Op->getFlags());
2330 }
2331 
2332 static bool isCtlzOpc(unsigned Opc) {
2333   return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
2334 }
2335 
2336 static bool isCttzOpc(unsigned Opc) {
2337   return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF;
2338 }
2339 
2340 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const {
2341   SDLoc SL(Op);
2342   SDValue Src = Op.getOperand(0);
2343   bool ZeroUndef = Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
2344                    Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF;
2345 
2346   unsigned ISDOpc, NewOpc;
2347   if (isCtlzOpc(Op.getOpcode())) {
2348     ISDOpc = ISD::CTLZ_ZERO_UNDEF;
2349     NewOpc = AMDGPUISD::FFBH_U32;
2350   } else if (isCttzOpc(Op.getOpcode())) {
2351     ISDOpc = ISD::CTTZ_ZERO_UNDEF;
2352     NewOpc = AMDGPUISD::FFBL_B32;
2353   } else
2354     llvm_unreachable("Unexpected OPCode!!!");
2355 
2356 
2357   if (ZeroUndef && Src.getValueType() == MVT::i32)
2358     return DAG.getNode(NewOpc, SL, MVT::i32, Src);
2359 
2360   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2361 
2362   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2363   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2364 
2365   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
2366   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
2367 
2368   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2369                                    *DAG.getContext(), MVT::i32);
2370 
2371   SDValue HiOrLo = isCtlzOpc(Op.getOpcode()) ? Hi : Lo;
2372   SDValue Hi0orLo0 = DAG.getSetCC(SL, SetCCVT, HiOrLo, Zero, ISD::SETEQ);
2373 
2374   SDValue OprLo = DAG.getNode(ISDOpc, SL, MVT::i32, Lo);
2375   SDValue OprHi = DAG.getNode(ISDOpc, SL, MVT::i32, Hi);
2376 
2377   const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32);
2378   SDValue Add, NewOpr;
2379   if (isCtlzOpc(Op.getOpcode())) {
2380     Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprLo, Bits32);
2381     // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x))
2382     NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprHi);
2383   } else {
2384     Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprHi, Bits32);
2385     // cttz(x) = lo_32(x) == 0 ? cttz(hi_32(x)) + 32 : cttz(lo_32(x))
2386     NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprLo);
2387   }
2388 
2389   if (!ZeroUndef) {
2390     // Test if the full 64-bit input is zero.
2391 
2392     // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32,
2393     // which we probably don't want.
2394     SDValue LoOrHi = isCtlzOpc(Op.getOpcode()) ? Lo : Hi;
2395     SDValue Lo0OrHi0 = DAG.getSetCC(SL, SetCCVT, LoOrHi, Zero, ISD::SETEQ);
2396     SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0OrHi0, Hi0orLo0);
2397 
2398     // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction
2399     // with the same cycles, otherwise it is slower.
2400     // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src,
2401     // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ);
2402 
2403     const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32);
2404 
2405     // The instruction returns -1 for 0 input, but the defined intrinsic
2406     // behavior is to return the number of bits.
2407     NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32,
2408                          SrcIsZero, Bits32, NewOpr);
2409   }
2410 
2411   return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr);
2412 }
2413 
2414 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
2415                                                bool Signed) const {
2416   // Unsigned
2417   // cul2f(ulong u)
2418   //{
2419   //  uint lz = clz(u);
2420   //  uint e = (u != 0) ? 127U + 63U - lz : 0;
2421   //  u = (u << lz) & 0x7fffffffffffffffUL;
2422   //  ulong t = u & 0xffffffffffUL;
2423   //  uint v = (e << 23) | (uint)(u >> 40);
2424   //  uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U);
2425   //  return as_float(v + r);
2426   //}
2427   // Signed
2428   // cl2f(long l)
2429   //{
2430   //  long s = l >> 63;
2431   //  float r = cul2f((l + s) ^ s);
2432   //  return s ? -r : r;
2433   //}
2434 
2435   SDLoc SL(Op);
2436   SDValue Src = Op.getOperand(0);
2437   SDValue L = Src;
2438 
2439   SDValue S;
2440   if (Signed) {
2441     const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64);
2442     S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit);
2443 
2444     SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S);
2445     L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S);
2446   }
2447 
2448   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2449                                    *DAG.getContext(), MVT::f32);
2450 
2451 
2452   SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32);
2453   SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64);
2454   SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L);
2455   LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ);
2456 
2457   SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32);
2458   SDValue E = DAG.getSelect(SL, MVT::i32,
2459     DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE),
2460     DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ),
2461     ZeroI32);
2462 
2463   SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64,
2464     DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ),
2465     DAG.getConstant((-1ULL) >> 1, SL, MVT::i64));
2466 
2467   SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U,
2468                           DAG.getConstant(0xffffffffffULL, SL, MVT::i64));
2469 
2470   SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64,
2471                              U, DAG.getConstant(40, SL, MVT::i64));
2472 
2473   SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32,
2474     DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)),
2475     DAG.getNode(ISD::TRUNCATE, SL, MVT::i32,  UShl));
2476 
2477   SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64);
2478   SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT);
2479   SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ);
2480 
2481   SDValue One = DAG.getConstant(1, SL, MVT::i32);
2482 
2483   SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One);
2484 
2485   SDValue R = DAG.getSelect(SL, MVT::i32,
2486     RCmp,
2487     One,
2488     DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32));
2489   R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R);
2490   R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R);
2491 
2492   if (!Signed)
2493     return R;
2494 
2495   SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R);
2496   return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R);
2497 }
2498 
2499 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
2500                                                bool Signed) const {
2501   SDLoc SL(Op);
2502   SDValue Src = Op.getOperand(0);
2503 
2504   SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2505 
2506   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2507                            DAG.getConstant(0, SL, MVT::i32));
2508   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2509                            DAG.getConstant(1, SL, MVT::i32));
2510 
2511   SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
2512                               SL, MVT::f64, Hi);
2513 
2514   SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
2515 
2516   SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
2517                               DAG.getConstant(32, SL, MVT::i32));
2518   // TODO: Should this propagate fast-math-flags?
2519   return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
2520 }
2521 
2522 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
2523                                                SelectionDAG &DAG) const {
2524   // TODO: Factor out code common with LowerSINT_TO_FP.
2525   EVT DestVT = Op.getValueType();
2526   SDValue Src = Op.getOperand(0);
2527   EVT SrcVT = Src.getValueType();
2528 
2529   if (SrcVT == MVT::i16) {
2530     if (DestVT == MVT::f16)
2531       return Op;
2532     SDLoc DL(Op);
2533 
2534     // Promote src to i32
2535     SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Src);
2536     return DAG.getNode(ISD::UINT_TO_FP, DL, DestVT, Ext);
2537   }
2538 
2539   assert(SrcVT == MVT::i64 && "operation should be legal");
2540 
2541   if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2542     SDLoc DL(Op);
2543 
2544     SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2545     SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2546     SDValue FPRound =
2547         DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2548 
2549     return FPRound;
2550   }
2551 
2552   if (DestVT == MVT::f32)
2553     return LowerINT_TO_FP32(Op, DAG, false);
2554 
2555   assert(DestVT == MVT::f64);
2556   return LowerINT_TO_FP64(Op, DAG, false);
2557 }
2558 
2559 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
2560                                               SelectionDAG &DAG) const {
2561   EVT DestVT = Op.getValueType();
2562 
2563   SDValue Src = Op.getOperand(0);
2564   EVT SrcVT = Src.getValueType();
2565 
2566   if (SrcVT == MVT::i16) {
2567     if (DestVT == MVT::f16)
2568       return Op;
2569 
2570     SDLoc DL(Op);
2571     // Promote src to i32
2572     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32, Src);
2573     return DAG.getNode(ISD::SINT_TO_FP, DL, DestVT, Ext);
2574   }
2575 
2576   assert(SrcVT == MVT::i64 && "operation should be legal");
2577 
2578   // TODO: Factor out code common with LowerUINT_TO_FP.
2579 
2580   if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2581     SDLoc DL(Op);
2582     SDValue Src = Op.getOperand(0);
2583 
2584     SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2585     SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2586     SDValue FPRound =
2587         DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2588 
2589     return FPRound;
2590   }
2591 
2592   if (DestVT == MVT::f32)
2593     return LowerINT_TO_FP32(Op, DAG, true);
2594 
2595   assert(DestVT == MVT::f64);
2596   return LowerINT_TO_FP64(Op, DAG, true);
2597 }
2598 
2599 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
2600                                                bool Signed) const {
2601   SDLoc SL(Op);
2602 
2603   SDValue Src = Op.getOperand(0);
2604 
2605   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2606 
2607   SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL,
2608                                  MVT::f64);
2609   SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL,
2610                                  MVT::f64);
2611   // TODO: Should this propagate fast-math-flags?
2612   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0);
2613 
2614   SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul);
2615 
2616 
2617   SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc);
2618 
2619   SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL,
2620                            MVT::i32, FloorMul);
2621   SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
2622 
2623   SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi});
2624 
2625   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
2626 }
2627 
2628 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const {
2629   SDLoc DL(Op);
2630   SDValue N0 = Op.getOperand(0);
2631 
2632   // Convert to target node to get known bits
2633   if (N0.getValueType() == MVT::f32)
2634     return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
2635 
2636   if (getTargetMachine().Options.UnsafeFPMath) {
2637     // There is a generic expand for FP_TO_FP16 with unsafe fast math.
2638     return SDValue();
2639   }
2640 
2641   assert(N0.getSimpleValueType() == MVT::f64);
2642 
2643   // f64 -> f16 conversion using round-to-nearest-even rounding mode.
2644   const unsigned ExpMask = 0x7ff;
2645   const unsigned ExpBiasf64 = 1023;
2646   const unsigned ExpBiasf16 = 15;
2647   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
2648   SDValue One = DAG.getConstant(1, DL, MVT::i32);
2649   SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0);
2650   SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U,
2651                            DAG.getConstant(32, DL, MVT::i64));
2652   UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32);
2653   U = DAG.getZExtOrTrunc(U, DL, MVT::i32);
2654   SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2655                           DAG.getConstant(20, DL, MVT::i64));
2656   E = DAG.getNode(ISD::AND, DL, MVT::i32, E,
2657                   DAG.getConstant(ExpMask, DL, MVT::i32));
2658   // Subtract the fp64 exponent bias (1023) to get the real exponent and
2659   // add the f16 bias (15) to get the biased exponent for the f16 format.
2660   E = DAG.getNode(ISD::ADD, DL, MVT::i32, E,
2661                   DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32));
2662 
2663   SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2664                           DAG.getConstant(8, DL, MVT::i32));
2665   M = DAG.getNode(ISD::AND, DL, MVT::i32, M,
2666                   DAG.getConstant(0xffe, DL, MVT::i32));
2667 
2668   SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH,
2669                                   DAG.getConstant(0x1ff, DL, MVT::i32));
2670   MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U);
2671 
2672   SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ);
2673   M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set);
2674 
2675   // (M != 0 ? 0x0200 : 0) | 0x7c00;
2676   SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32,
2677       DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32),
2678                       Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32));
2679 
2680   // N = M | (E << 12);
2681   SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2682       DAG.getNode(ISD::SHL, DL, MVT::i32, E,
2683                   DAG.getConstant(12, DL, MVT::i32)));
2684 
2685   // B = clamp(1-E, 0, 13);
2686   SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32,
2687                                   One, E);
2688   SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero);
2689   B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B,
2690                   DAG.getConstant(13, DL, MVT::i32));
2691 
2692   SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2693                                    DAG.getConstant(0x1000, DL, MVT::i32));
2694 
2695   SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B);
2696   SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B);
2697   SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE);
2698   D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1);
2699 
2700   SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT);
2701   SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V,
2702                               DAG.getConstant(0x7, DL, MVT::i32));
2703   V = DAG.getNode(ISD::SRL, DL, MVT::i32, V,
2704                   DAG.getConstant(2, DL, MVT::i32));
2705   SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32),
2706                                One, Zero, ISD::SETEQ);
2707   SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32),
2708                                One, Zero, ISD::SETGT);
2709   V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1);
2710   V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1);
2711 
2712   V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32),
2713                       DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT);
2714   V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32),
2715                       I, V, ISD::SETEQ);
2716 
2717   // Extract the sign bit.
2718   SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2719                             DAG.getConstant(16, DL, MVT::i32));
2720   Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign,
2721                      DAG.getConstant(0x8000, DL, MVT::i32));
2722 
2723   V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V);
2724   return DAG.getZExtOrTrunc(V, DL, Op.getValueType());
2725 }
2726 
2727 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op,
2728                                               SelectionDAG &DAG) const {
2729   SDValue Src = Op.getOperand(0);
2730 
2731   // TODO: Factor out code common with LowerFP_TO_UINT.
2732 
2733   EVT SrcVT = Src.getValueType();
2734   if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) {
2735     SDLoc DL(Op);
2736 
2737     SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
2738     SDValue FpToInt32 =
2739         DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend);
2740 
2741     return FpToInt32;
2742   }
2743 
2744   if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2745     return LowerFP64_TO_INT(Op, DAG, true);
2746 
2747   return SDValue();
2748 }
2749 
2750 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op,
2751                                               SelectionDAG &DAG) const {
2752   SDValue Src = Op.getOperand(0);
2753 
2754   // TODO: Factor out code common with LowerFP_TO_SINT.
2755 
2756   EVT SrcVT = Src.getValueType();
2757   if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) {
2758     SDLoc DL(Op);
2759 
2760     SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
2761     SDValue FpToInt32 =
2762         DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend);
2763 
2764     return FpToInt32;
2765   }
2766 
2767   if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2768     return LowerFP64_TO_INT(Op, DAG, false);
2769 
2770   return SDValue();
2771 }
2772 
2773 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2774                                                      SelectionDAG &DAG) const {
2775   EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2776   MVT VT = Op.getSimpleValueType();
2777   MVT ScalarVT = VT.getScalarType();
2778 
2779   assert(VT.isVector());
2780 
2781   SDValue Src = Op.getOperand(0);
2782   SDLoc DL(Op);
2783 
2784   // TODO: Don't scalarize on Evergreen?
2785   unsigned NElts = VT.getVectorNumElements();
2786   SmallVector<SDValue, 8> Args;
2787   DAG.ExtractVectorElements(Src, Args, 0, NElts);
2788 
2789   SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
2790   for (unsigned I = 0; I < NElts; ++I)
2791     Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
2792 
2793   return DAG.getBuildVector(VT, DL, Args);
2794 }
2795 
2796 //===----------------------------------------------------------------------===//
2797 // Custom DAG optimizations
2798 //===----------------------------------------------------------------------===//
2799 
2800 static bool isU24(SDValue Op, SelectionDAG &DAG) {
2801   return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24;
2802 }
2803 
2804 static bool isI24(SDValue Op, SelectionDAG &DAG) {
2805   EVT VT = Op.getValueType();
2806   return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
2807                                      // as unsigned 24-bit values.
2808     AMDGPUTargetLowering::numBitsSigned(Op, DAG) < 24;
2809 }
2810 
2811 static SDValue simplifyI24(SDNode *Node24,
2812                            TargetLowering::DAGCombinerInfo &DCI) {
2813   SelectionDAG &DAG = DCI.DAG;
2814   bool IsIntrin = Node24->getOpcode() == ISD::INTRINSIC_WO_CHAIN;
2815 
2816   SDValue LHS = IsIntrin ? Node24->getOperand(1) : Node24->getOperand(0);
2817   SDValue RHS = IsIntrin ? Node24->getOperand(2) : Node24->getOperand(1);
2818   unsigned NewOpcode = Node24->getOpcode();
2819   if (IsIntrin) {
2820     unsigned IID = cast<ConstantSDNode>(Node24->getOperand(0))->getZExtValue();
2821     NewOpcode = IID == Intrinsic::amdgcn_mul_i24 ?
2822       AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
2823   }
2824 
2825   APInt Demanded = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 24);
2826 
2827   // First try to simplify using GetDemandedBits which allows the operands to
2828   // have other uses, but will only perform simplifications that involve
2829   // bypassing some nodes for this user.
2830   SDValue DemandedLHS = DAG.GetDemandedBits(LHS, Demanded);
2831   SDValue DemandedRHS = DAG.GetDemandedBits(RHS, Demanded);
2832   if (DemandedLHS || DemandedRHS)
2833     return DAG.getNode(NewOpcode, SDLoc(Node24), Node24->getVTList(),
2834                        DemandedLHS ? DemandedLHS : LHS,
2835                        DemandedRHS ? DemandedRHS : RHS);
2836 
2837   // Now try SimplifyDemandedBits which can simplify the nodes used by our
2838   // operands if this node is the only user.
2839   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2840   if (TLI.SimplifyDemandedBits(LHS, Demanded, DCI))
2841     return SDValue(Node24, 0);
2842   if (TLI.SimplifyDemandedBits(RHS, Demanded, DCI))
2843     return SDValue(Node24, 0);
2844 
2845   return SDValue();
2846 }
2847 
2848 template <typename IntTy>
2849 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
2850                                uint32_t Width, const SDLoc &DL) {
2851   if (Width + Offset < 32) {
2852     uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
2853     IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
2854     return DAG.getConstant(Result, DL, MVT::i32);
2855   }
2856 
2857   return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
2858 }
2859 
2860 static bool hasVolatileUser(SDNode *Val) {
2861   for (SDNode *U : Val->uses()) {
2862     if (MemSDNode *M = dyn_cast<MemSDNode>(U)) {
2863       if (M->isVolatile())
2864         return true;
2865     }
2866   }
2867 
2868   return false;
2869 }
2870 
2871 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const {
2872   // i32 vectors are the canonical memory type.
2873   if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT))
2874     return false;
2875 
2876   if (!VT.isByteSized())
2877     return false;
2878 
2879   unsigned Size = VT.getStoreSize();
2880 
2881   if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector())
2882     return false;
2883 
2884   if (Size == 3 || (Size > 4 && (Size % 4 != 0)))
2885     return false;
2886 
2887   return true;
2888 }
2889 
2890 // Replace load of an illegal type with a store of a bitcast to a friendlier
2891 // type.
2892 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
2893                                                  DAGCombinerInfo &DCI) const {
2894   if (!DCI.isBeforeLegalize())
2895     return SDValue();
2896 
2897   LoadSDNode *LN = cast<LoadSDNode>(N);
2898   if (LN->isVolatile() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
2899     return SDValue();
2900 
2901   SDLoc SL(N);
2902   SelectionDAG &DAG = DCI.DAG;
2903   EVT VT = LN->getMemoryVT();
2904 
2905   unsigned Size = VT.getStoreSize();
2906   unsigned Align = LN->getAlignment();
2907   if (Align < Size && isTypeLegal(VT)) {
2908     bool IsFast;
2909     unsigned AS = LN->getAddressSpace();
2910 
2911     // Expand unaligned loads earlier than legalization. Due to visitation order
2912     // problems during legalization, the emitted instructions to pack and unpack
2913     // the bytes again are not eliminated in the case of an unaligned copy.
2914     if (!allowsMisalignedMemoryAccesses(
2915             VT, AS, Align, LN->getMemOperand()->getFlags(), &IsFast)) {
2916       SDValue Ops[2];
2917 
2918       if (VT.isVector())
2919         std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(LN, DAG);
2920       else
2921         std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
2922 
2923       return DAG.getMergeValues(Ops, SDLoc(N));
2924     }
2925 
2926     if (!IsFast)
2927       return SDValue();
2928   }
2929 
2930   if (!shouldCombineMemoryType(VT))
2931     return SDValue();
2932 
2933   EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2934 
2935   SDValue NewLoad
2936     = DAG.getLoad(NewVT, SL, LN->getChain(),
2937                   LN->getBasePtr(), LN->getMemOperand());
2938 
2939   SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad);
2940   DCI.CombineTo(N, BC, NewLoad.getValue(1));
2941   return SDValue(N, 0);
2942 }
2943 
2944 // Replace store of an illegal type with a store of a bitcast to a friendlier
2945 // type.
2946 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
2947                                                   DAGCombinerInfo &DCI) const {
2948   if (!DCI.isBeforeLegalize())
2949     return SDValue();
2950 
2951   StoreSDNode *SN = cast<StoreSDNode>(N);
2952   if (SN->isVolatile() || !ISD::isNormalStore(SN))
2953     return SDValue();
2954 
2955   EVT VT = SN->getMemoryVT();
2956   unsigned Size = VT.getStoreSize();
2957 
2958   SDLoc SL(N);
2959   SelectionDAG &DAG = DCI.DAG;
2960   unsigned Align = SN->getAlignment();
2961   if (Align < Size && isTypeLegal(VT)) {
2962     bool IsFast;
2963     unsigned AS = SN->getAddressSpace();
2964 
2965     // Expand unaligned stores earlier than legalization. Due to visitation
2966     // order problems during legalization, the emitted instructions to pack and
2967     // unpack the bytes again are not eliminated in the case of an unaligned
2968     // copy.
2969     if (!allowsMisalignedMemoryAccesses(
2970             VT, AS, Align, SN->getMemOperand()->getFlags(), &IsFast)) {
2971       if (VT.isVector())
2972         return scalarizeVectorStore(SN, DAG);
2973 
2974       return expandUnalignedStore(SN, DAG);
2975     }
2976 
2977     if (!IsFast)
2978       return SDValue();
2979   }
2980 
2981   if (!shouldCombineMemoryType(VT))
2982     return SDValue();
2983 
2984   EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2985   SDValue Val = SN->getValue();
2986 
2987   //DCI.AddToWorklist(Val.getNode());
2988 
2989   bool OtherUses = !Val.hasOneUse();
2990   SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val);
2991   if (OtherUses) {
2992     SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal);
2993     DAG.ReplaceAllUsesOfValueWith(Val, CastBack);
2994   }
2995 
2996   return DAG.getStore(SN->getChain(), SL, CastVal,
2997                       SN->getBasePtr(), SN->getMemOperand());
2998 }
2999 
3000 // FIXME: This should go in generic DAG combiner with an isTruncateFree check,
3001 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU
3002 // issues.
3003 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N,
3004                                                         DAGCombinerInfo &DCI) const {
3005   SelectionDAG &DAG = DCI.DAG;
3006   SDValue N0 = N->getOperand(0);
3007 
3008   // (vt2 (assertzext (truncate vt0:x), vt1)) ->
3009   //     (vt2 (truncate (assertzext vt0:x, vt1)))
3010   if (N0.getOpcode() == ISD::TRUNCATE) {
3011     SDValue N1 = N->getOperand(1);
3012     EVT ExtVT = cast<VTSDNode>(N1)->getVT();
3013     SDLoc SL(N);
3014 
3015     SDValue Src = N0.getOperand(0);
3016     EVT SrcVT = Src.getValueType();
3017     if (SrcVT.bitsGE(ExtVT)) {
3018       SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1);
3019       return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg);
3020     }
3021   }
3022 
3023   return SDValue();
3024 }
3025 
3026 SDValue AMDGPUTargetLowering::performIntrinsicWOChainCombine(
3027   SDNode *N, DAGCombinerInfo &DCI) const {
3028   unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3029   switch (IID) {
3030   case Intrinsic::amdgcn_mul_i24:
3031   case Intrinsic::amdgcn_mul_u24:
3032     return simplifyI24(N, DCI);
3033   default:
3034     return SDValue();
3035   }
3036 }
3037 
3038 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
3039 /// binary operation \p Opc to it with the corresponding constant operands.
3040 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl(
3041   DAGCombinerInfo &DCI, const SDLoc &SL,
3042   unsigned Opc, SDValue LHS,
3043   uint32_t ValLo, uint32_t ValHi) const {
3044   SelectionDAG &DAG = DCI.DAG;
3045   SDValue Lo, Hi;
3046   std::tie(Lo, Hi) = split64BitValue(LHS, DAG);
3047 
3048   SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32);
3049   SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32);
3050 
3051   SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS);
3052   SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS);
3053 
3054   // Re-visit the ands. It's possible we eliminated one of them and it could
3055   // simplify the vector.
3056   DCI.AddToWorklist(Lo.getNode());
3057   DCI.AddToWorklist(Hi.getNode());
3058 
3059   SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
3060   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3061 }
3062 
3063 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
3064                                                 DAGCombinerInfo &DCI) const {
3065   EVT VT = N->getValueType(0);
3066 
3067   ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3068   if (!RHS)
3069     return SDValue();
3070 
3071   SDValue LHS = N->getOperand(0);
3072   unsigned RHSVal = RHS->getZExtValue();
3073   if (!RHSVal)
3074     return LHS;
3075 
3076   SDLoc SL(N);
3077   SelectionDAG &DAG = DCI.DAG;
3078 
3079   switch (LHS->getOpcode()) {
3080   default:
3081     break;
3082   case ISD::ZERO_EXTEND:
3083   case ISD::SIGN_EXTEND:
3084   case ISD::ANY_EXTEND: {
3085     SDValue X = LHS->getOperand(0);
3086 
3087     if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 &&
3088         isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) {
3089       // Prefer build_vector as the canonical form if packed types are legal.
3090       // (shl ([asz]ext i16:x), 16 -> build_vector 0, x
3091       SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL,
3092        { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) });
3093       return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
3094     }
3095 
3096     // shl (ext x) => zext (shl x), if shift does not overflow int
3097     if (VT != MVT::i64)
3098       break;
3099     KnownBits Known = DAG.computeKnownBits(X);
3100     unsigned LZ = Known.countMinLeadingZeros();
3101     if (LZ < RHSVal)
3102       break;
3103     EVT XVT = X.getValueType();
3104     SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
3105     return DAG.getZExtOrTrunc(Shl, SL, VT);
3106   }
3107   }
3108 
3109   if (VT != MVT::i64)
3110     return SDValue();
3111 
3112   // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
3113 
3114   // On some subtargets, 64-bit shift is a quarter rate instruction. In the
3115   // common case, splitting this into a move and a 32-bit shift is faster and
3116   // the same code size.
3117   if (RHSVal < 32)
3118     return SDValue();
3119 
3120   SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
3121 
3122   SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
3123   SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
3124 
3125   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3126 
3127   SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
3128   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3129 }
3130 
3131 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
3132                                                 DAGCombinerInfo &DCI) const {
3133   if (N->getValueType(0) != MVT::i64)
3134     return SDValue();
3135 
3136   const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3137   if (!RHS)
3138     return SDValue();
3139 
3140   SelectionDAG &DAG = DCI.DAG;
3141   SDLoc SL(N);
3142   unsigned RHSVal = RHS->getZExtValue();
3143 
3144   // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
3145   if (RHSVal == 32) {
3146     SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3147     SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3148                                    DAG.getConstant(31, SL, MVT::i32));
3149 
3150     SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
3151     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3152   }
3153 
3154   // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
3155   if (RHSVal == 63) {
3156     SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3157     SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3158                                    DAG.getConstant(31, SL, MVT::i32));
3159     SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
3160     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3161   }
3162 
3163   return SDValue();
3164 }
3165 
3166 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
3167                                                 DAGCombinerInfo &DCI) const {
3168   auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3169   if (!RHS)
3170     return SDValue();
3171 
3172   EVT VT = N->getValueType(0);
3173   SDValue LHS = N->getOperand(0);
3174   unsigned ShiftAmt = RHS->getZExtValue();
3175   SelectionDAG &DAG = DCI.DAG;
3176   SDLoc SL(N);
3177 
3178   // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1)
3179   // this improves the ability to match BFE patterns in isel.
3180   if (LHS.getOpcode() == ISD::AND) {
3181     if (auto *Mask = dyn_cast<ConstantSDNode>(LHS.getOperand(1))) {
3182       if (Mask->getAPIntValue().isShiftedMask() &&
3183           Mask->getAPIntValue().countTrailingZeros() == ShiftAmt) {
3184         return DAG.getNode(
3185             ISD::AND, SL, VT,
3186             DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)),
3187             DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1)));
3188       }
3189     }
3190   }
3191 
3192   if (VT != MVT::i64)
3193     return SDValue();
3194 
3195   if (ShiftAmt < 32)
3196     return SDValue();
3197 
3198   // srl i64:x, C for C >= 32
3199   // =>
3200   //   build_pair (srl hi_32(x), C - 32), 0
3201   SDValue One = DAG.getConstant(1, SL, MVT::i32);
3202   SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3203 
3204   SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, LHS);
3205   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecOp, One);
3206 
3207   SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
3208   SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
3209 
3210   SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
3211 
3212   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
3213 }
3214 
3215 SDValue AMDGPUTargetLowering::performTruncateCombine(
3216   SDNode *N, DAGCombinerInfo &DCI) const {
3217   SDLoc SL(N);
3218   SelectionDAG &DAG = DCI.DAG;
3219   EVT VT = N->getValueType(0);
3220   SDValue Src = N->getOperand(0);
3221 
3222   // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x)
3223   if (Src.getOpcode() == ISD::BITCAST && !VT.isVector()) {
3224     SDValue Vec = Src.getOperand(0);
3225     if (Vec.getOpcode() == ISD::BUILD_VECTOR) {
3226       SDValue Elt0 = Vec.getOperand(0);
3227       EVT EltVT = Elt0.getValueType();
3228       if (VT.getSizeInBits() <= EltVT.getSizeInBits()) {
3229         if (EltVT.isFloatingPoint()) {
3230           Elt0 = DAG.getNode(ISD::BITCAST, SL,
3231                              EltVT.changeTypeToInteger(), Elt0);
3232         }
3233 
3234         return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0);
3235       }
3236     }
3237   }
3238 
3239   // Equivalent of above for accessing the high element of a vector as an
3240   // integer operation.
3241   // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y)
3242   if (Src.getOpcode() == ISD::SRL && !VT.isVector()) {
3243     if (auto K = isConstOrConstSplat(Src.getOperand(1))) {
3244       if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) {
3245         SDValue BV = stripBitcast(Src.getOperand(0));
3246         if (BV.getOpcode() == ISD::BUILD_VECTOR &&
3247             BV.getValueType().getVectorNumElements() == 2) {
3248           SDValue SrcElt = BV.getOperand(1);
3249           EVT SrcEltVT = SrcElt.getValueType();
3250           if (SrcEltVT.isFloatingPoint()) {
3251             SrcElt = DAG.getNode(ISD::BITCAST, SL,
3252                                  SrcEltVT.changeTypeToInteger(), SrcElt);
3253           }
3254 
3255           return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt);
3256         }
3257       }
3258     }
3259   }
3260 
3261   // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit.
3262   //
3263   // i16 (trunc (srl i64:x, K)), K <= 16 ->
3264   //     i16 (trunc (srl (i32 (trunc x), K)))
3265   if (VT.getScalarSizeInBits() < 32) {
3266     EVT SrcVT = Src.getValueType();
3267     if (SrcVT.getScalarSizeInBits() > 32 &&
3268         (Src.getOpcode() == ISD::SRL ||
3269          Src.getOpcode() == ISD::SRA ||
3270          Src.getOpcode() == ISD::SHL)) {
3271       SDValue Amt = Src.getOperand(1);
3272       KnownBits Known = DAG.computeKnownBits(Amt);
3273       unsigned Size = VT.getScalarSizeInBits();
3274       if ((Known.isConstant() && Known.getConstant().ule(Size)) ||
3275           (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size))) {
3276         EVT MidVT = VT.isVector() ?
3277           EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3278                            VT.getVectorNumElements()) : MVT::i32;
3279 
3280         EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout());
3281         SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT,
3282                                     Src.getOperand(0));
3283         DCI.AddToWorklist(Trunc.getNode());
3284 
3285         if (Amt.getValueType() != NewShiftVT) {
3286           Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT);
3287           DCI.AddToWorklist(Amt.getNode());
3288         }
3289 
3290         SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT,
3291                                           Trunc, Amt);
3292         return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift);
3293       }
3294     }
3295   }
3296 
3297   return SDValue();
3298 }
3299 
3300 // We need to specifically handle i64 mul here to avoid unnecessary conversion
3301 // instructions. If we only match on the legalized i64 mul expansion,
3302 // SimplifyDemandedBits will be unable to remove them because there will be
3303 // multiple uses due to the separate mul + mulh[su].
3304 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL,
3305                         SDValue N0, SDValue N1, unsigned Size, bool Signed) {
3306   if (Size <= 32) {
3307     unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3308     return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1);
3309   }
3310 
3311   // Because we want to eliminate extension instructions before the
3312   // operation, we need to create a single user here (i.e. not the separate
3313   // mul_lo + mul_hi) so that SimplifyDemandedBits will deal with it.
3314 
3315   unsigned MulOpc = Signed ? AMDGPUISD::MUL_LOHI_I24 : AMDGPUISD::MUL_LOHI_U24;
3316 
3317   SDValue Mul = DAG.getNode(MulOpc, SL,
3318                             DAG.getVTList(MVT::i32, MVT::i32), N0, N1);
3319 
3320   return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64,
3321                      Mul.getValue(0), Mul.getValue(1));
3322 }
3323 
3324 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
3325                                                 DAGCombinerInfo &DCI) const {
3326   EVT VT = N->getValueType(0);
3327 
3328   unsigned Size = VT.getSizeInBits();
3329   if (VT.isVector() || Size > 64)
3330     return SDValue();
3331 
3332   // There are i16 integer mul/mad.
3333   if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16))
3334     return SDValue();
3335 
3336   SelectionDAG &DAG = DCI.DAG;
3337   SDLoc DL(N);
3338 
3339   SDValue N0 = N->getOperand(0);
3340   SDValue N1 = N->getOperand(1);
3341 
3342   // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
3343   // in the source into any_extends if the result of the mul is truncated. Since
3344   // we can assume the high bits are whatever we want, use the underlying value
3345   // to avoid the unknown high bits from interfering.
3346   if (N0.getOpcode() == ISD::ANY_EXTEND)
3347     N0 = N0.getOperand(0);
3348 
3349   if (N1.getOpcode() == ISD::ANY_EXTEND)
3350     N1 = N1.getOperand(0);
3351 
3352   SDValue Mul;
3353 
3354   if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
3355     N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3356     N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3357     Mul = getMul24(DAG, DL, N0, N1, Size, false);
3358   } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
3359     N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3360     N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3361     Mul = getMul24(DAG, DL, N0, N1, Size, true);
3362   } else {
3363     return SDValue();
3364   }
3365 
3366   // We need to use sext even for MUL_U24, because MUL_U24 is used
3367   // for signed multiply of 8 and 16-bit types.
3368   return DAG.getSExtOrTrunc(Mul, DL, VT);
3369 }
3370 
3371 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
3372                                                   DAGCombinerInfo &DCI) const {
3373   EVT VT = N->getValueType(0);
3374 
3375   if (!Subtarget->hasMulI24() || VT.isVector())
3376     return SDValue();
3377 
3378   SelectionDAG &DAG = DCI.DAG;
3379   SDLoc DL(N);
3380 
3381   SDValue N0 = N->getOperand(0);
3382   SDValue N1 = N->getOperand(1);
3383 
3384   if (!isI24(N0, DAG) || !isI24(N1, DAG))
3385     return SDValue();
3386 
3387   N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3388   N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3389 
3390   SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1);
3391   DCI.AddToWorklist(Mulhi.getNode());
3392   return DAG.getSExtOrTrunc(Mulhi, DL, VT);
3393 }
3394 
3395 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N,
3396                                                   DAGCombinerInfo &DCI) const {
3397   EVT VT = N->getValueType(0);
3398 
3399   if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
3400     return SDValue();
3401 
3402   SelectionDAG &DAG = DCI.DAG;
3403   SDLoc DL(N);
3404 
3405   SDValue N0 = N->getOperand(0);
3406   SDValue N1 = N->getOperand(1);
3407 
3408   if (!isU24(N0, DAG) || !isU24(N1, DAG))
3409     return SDValue();
3410 
3411   N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3412   N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3413 
3414   SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1);
3415   DCI.AddToWorklist(Mulhi.getNode());
3416   return DAG.getZExtOrTrunc(Mulhi, DL, VT);
3417 }
3418 
3419 SDValue AMDGPUTargetLowering::performMulLoHi24Combine(
3420   SDNode *N, DAGCombinerInfo &DCI) const {
3421   SelectionDAG &DAG = DCI.DAG;
3422 
3423   // Simplify demanded bits before splitting into multiple users.
3424   if (SDValue V = simplifyI24(N, DCI))
3425     return V;
3426 
3427   SDValue N0 = N->getOperand(0);
3428   SDValue N1 = N->getOperand(1);
3429 
3430   bool Signed = (N->getOpcode() == AMDGPUISD::MUL_LOHI_I24);
3431 
3432   unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3433   unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
3434 
3435   SDLoc SL(N);
3436 
3437   SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1);
3438   SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1);
3439   return DAG.getMergeValues({ MulLo, MulHi }, SL);
3440 }
3441 
3442 static bool isNegativeOne(SDValue Val) {
3443   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val))
3444     return C->isAllOnesValue();
3445   return false;
3446 }
3447 
3448 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG,
3449                                           SDValue Op,
3450                                           const SDLoc &DL,
3451                                           unsigned Opc) const {
3452   EVT VT = Op.getValueType();
3453   EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT);
3454   if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() &&
3455                               LegalVT != MVT::i16))
3456     return SDValue();
3457 
3458   if (VT != MVT::i32)
3459     Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op);
3460 
3461   SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op);
3462   if (VT != MVT::i32)
3463     FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX);
3464 
3465   return FFBX;
3466 }
3467 
3468 // The native instructions return -1 on 0 input. Optimize out a select that
3469 // produces -1 on 0.
3470 //
3471 // TODO: If zero is not undef, we could also do this if the output is compared
3472 // against the bitwidth.
3473 //
3474 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
3475 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond,
3476                                                  SDValue LHS, SDValue RHS,
3477                                                  DAGCombinerInfo &DCI) const {
3478   ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
3479   if (!CmpRhs || !CmpRhs->isNullValue())
3480     return SDValue();
3481 
3482   SelectionDAG &DAG = DCI.DAG;
3483   ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
3484   SDValue CmpLHS = Cond.getOperand(0);
3485 
3486   unsigned Opc = isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 :
3487                                            AMDGPUISD::FFBH_U32;
3488 
3489   // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
3490   // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x
3491   if (CCOpcode == ISD::SETEQ &&
3492       (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
3493       RHS.getOperand(0) == CmpLHS &&
3494       isNegativeOne(LHS)) {
3495     return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3496   }
3497 
3498   // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
3499   // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x
3500   if (CCOpcode == ISD::SETNE &&
3501       (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
3502       LHS.getOperand(0) == CmpLHS &&
3503       isNegativeOne(RHS)) {
3504     return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3505   }
3506 
3507   return SDValue();
3508 }
3509 
3510 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI,
3511                                          unsigned Op,
3512                                          const SDLoc &SL,
3513                                          SDValue Cond,
3514                                          SDValue N1,
3515                                          SDValue N2) {
3516   SelectionDAG &DAG = DCI.DAG;
3517   EVT VT = N1.getValueType();
3518 
3519   SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond,
3520                                   N1.getOperand(0), N2.getOperand(0));
3521   DCI.AddToWorklist(NewSelect.getNode());
3522   return DAG.getNode(Op, SL, VT, NewSelect);
3523 }
3524 
3525 // Pull a free FP operation out of a select so it may fold into uses.
3526 //
3527 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
3528 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
3529 //
3530 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
3531 // select c, (fabs x), +k -> fabs (select c, x, k)
3532 static SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
3533                                     SDValue N) {
3534   SelectionDAG &DAG = DCI.DAG;
3535   SDValue Cond = N.getOperand(0);
3536   SDValue LHS = N.getOperand(1);
3537   SDValue RHS = N.getOperand(2);
3538 
3539   EVT VT = N.getValueType();
3540   if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) ||
3541       (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) {
3542     return distributeOpThroughSelect(DCI, LHS.getOpcode(),
3543                                      SDLoc(N), Cond, LHS, RHS);
3544   }
3545 
3546   bool Inv = false;
3547   if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) {
3548     std::swap(LHS, RHS);
3549     Inv = true;
3550   }
3551 
3552   // TODO: Support vector constants.
3553   ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
3554   if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS) {
3555     SDLoc SL(N);
3556     // If one side is an fneg/fabs and the other is a constant, we can push the
3557     // fneg/fabs down. If it's an fabs, the constant needs to be non-negative.
3558     SDValue NewLHS = LHS.getOperand(0);
3559     SDValue NewRHS = RHS;
3560 
3561     // Careful: if the neg can be folded up, don't try to pull it back down.
3562     bool ShouldFoldNeg = true;
3563 
3564     if (NewLHS.hasOneUse()) {
3565       unsigned Opc = NewLHS.getOpcode();
3566       if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(Opc))
3567         ShouldFoldNeg = false;
3568       if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL)
3569         ShouldFoldNeg = false;
3570     }
3571 
3572     if (ShouldFoldNeg) {
3573       if (LHS.getOpcode() == ISD::FNEG)
3574         NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3575       else if (CRHS->isNegative())
3576         return SDValue();
3577 
3578       if (Inv)
3579         std::swap(NewLHS, NewRHS);
3580 
3581       SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT,
3582                                       Cond, NewLHS, NewRHS);
3583       DCI.AddToWorklist(NewSelect.getNode());
3584       return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect);
3585     }
3586   }
3587 
3588   return SDValue();
3589 }
3590 
3591 
3592 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
3593                                                    DAGCombinerInfo &DCI) const {
3594   if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0)))
3595     return Folded;
3596 
3597   SDValue Cond = N->getOperand(0);
3598   if (Cond.getOpcode() != ISD::SETCC)
3599     return SDValue();
3600 
3601   EVT VT = N->getValueType(0);
3602   SDValue LHS = Cond.getOperand(0);
3603   SDValue RHS = Cond.getOperand(1);
3604   SDValue CC = Cond.getOperand(2);
3605 
3606   SDValue True = N->getOperand(1);
3607   SDValue False = N->getOperand(2);
3608 
3609   if (Cond.hasOneUse()) { // TODO: Look for multiple select uses.
3610     SelectionDAG &DAG = DCI.DAG;
3611     if (DAG.isConstantValueOfAnyType(True) &&
3612         !DAG.isConstantValueOfAnyType(False)) {
3613       // Swap cmp + select pair to move constant to false input.
3614       // This will allow using VOPC cndmasks more often.
3615       // select (setcc x, y), k, x -> select (setccinv x, y), x, k
3616 
3617       SDLoc SL(N);
3618       ISD::CondCode NewCC =
3619           getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), LHS.getValueType());
3620 
3621       SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC);
3622       return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True);
3623     }
3624 
3625     if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
3626       SDValue MinMax
3627         = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
3628       // Revisit this node so we can catch min3/max3/med3 patterns.
3629       //DCI.AddToWorklist(MinMax.getNode());
3630       return MinMax;
3631     }
3632   }
3633 
3634   // There's no reason to not do this if the condition has other uses.
3635   return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI);
3636 }
3637 
3638 static bool isInv2Pi(const APFloat &APF) {
3639   static const APFloat KF16(APFloat::IEEEhalf(), APInt(16, 0x3118));
3640   static const APFloat KF32(APFloat::IEEEsingle(), APInt(32, 0x3e22f983));
3641   static const APFloat KF64(APFloat::IEEEdouble(), APInt(64, 0x3fc45f306dc9c882));
3642 
3643   return APF.bitwiseIsEqual(KF16) ||
3644          APF.bitwiseIsEqual(KF32) ||
3645          APF.bitwiseIsEqual(KF64);
3646 }
3647 
3648 // 0 and 1.0 / (0.5 * pi) do not have inline immmediates, so there is an
3649 // additional cost to negate them.
3650 bool AMDGPUTargetLowering::isConstantCostlierToNegate(SDValue N) const {
3651   if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N)) {
3652     if (C->isZero() && !C->isNegative())
3653       return true;
3654 
3655     if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF()))
3656       return true;
3657   }
3658 
3659   return false;
3660 }
3661 
3662 static unsigned inverseMinMax(unsigned Opc) {
3663   switch (Opc) {
3664   case ISD::FMAXNUM:
3665     return ISD::FMINNUM;
3666   case ISD::FMINNUM:
3667     return ISD::FMAXNUM;
3668   case ISD::FMAXNUM_IEEE:
3669     return ISD::FMINNUM_IEEE;
3670   case ISD::FMINNUM_IEEE:
3671     return ISD::FMAXNUM_IEEE;
3672   case AMDGPUISD::FMAX_LEGACY:
3673     return AMDGPUISD::FMIN_LEGACY;
3674   case AMDGPUISD::FMIN_LEGACY:
3675     return  AMDGPUISD::FMAX_LEGACY;
3676   default:
3677     llvm_unreachable("invalid min/max opcode");
3678   }
3679 }
3680 
3681 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
3682                                                  DAGCombinerInfo &DCI) const {
3683   SelectionDAG &DAG = DCI.DAG;
3684   SDValue N0 = N->getOperand(0);
3685   EVT VT = N->getValueType(0);
3686 
3687   unsigned Opc = N0.getOpcode();
3688 
3689   // If the input has multiple uses and we can either fold the negate down, or
3690   // the other uses cannot, give up. This both prevents unprofitable
3691   // transformations and infinite loops: we won't repeatedly try to fold around
3692   // a negate that has no 'good' form.
3693   if (N0.hasOneUse()) {
3694     // This may be able to fold into the source, but at a code size cost. Don't
3695     // fold if the fold into the user is free.
3696     if (allUsesHaveSourceMods(N, 0))
3697       return SDValue();
3698   } else {
3699     if (fnegFoldsIntoOp(Opc) &&
3700         (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode())))
3701       return SDValue();
3702   }
3703 
3704   SDLoc SL(N);
3705   switch (Opc) {
3706   case ISD::FADD: {
3707     if (!mayIgnoreSignedZero(N0))
3708       return SDValue();
3709 
3710     // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y))
3711     SDValue LHS = N0.getOperand(0);
3712     SDValue RHS = N0.getOperand(1);
3713 
3714     if (LHS.getOpcode() != ISD::FNEG)
3715       LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3716     else
3717       LHS = LHS.getOperand(0);
3718 
3719     if (RHS.getOpcode() != ISD::FNEG)
3720       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3721     else
3722       RHS = RHS.getOperand(0);
3723 
3724     SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags());
3725     if (Res.getOpcode() != ISD::FADD)
3726       return SDValue(); // Op got folded away.
3727     if (!N0.hasOneUse())
3728       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3729     return Res;
3730   }
3731   case ISD::FMUL:
3732   case AMDGPUISD::FMUL_LEGACY: {
3733     // (fneg (fmul x, y)) -> (fmul x, (fneg y))
3734     // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y))
3735     SDValue LHS = N0.getOperand(0);
3736     SDValue RHS = N0.getOperand(1);
3737 
3738     if (LHS.getOpcode() == ISD::FNEG)
3739       LHS = LHS.getOperand(0);
3740     else if (RHS.getOpcode() == ISD::FNEG)
3741       RHS = RHS.getOperand(0);
3742     else
3743       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3744 
3745     SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags());
3746     if (Res.getOpcode() != Opc)
3747       return SDValue(); // Op got folded away.
3748     if (!N0.hasOneUse())
3749       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3750     return Res;
3751   }
3752   case ISD::FMA:
3753   case ISD::FMAD: {
3754     if (!mayIgnoreSignedZero(N0))
3755       return SDValue();
3756 
3757     // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z))
3758     SDValue LHS = N0.getOperand(0);
3759     SDValue MHS = N0.getOperand(1);
3760     SDValue RHS = N0.getOperand(2);
3761 
3762     if (LHS.getOpcode() == ISD::FNEG)
3763       LHS = LHS.getOperand(0);
3764     else if (MHS.getOpcode() == ISD::FNEG)
3765       MHS = MHS.getOperand(0);
3766     else
3767       MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS);
3768 
3769     if (RHS.getOpcode() != ISD::FNEG)
3770       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3771     else
3772       RHS = RHS.getOperand(0);
3773 
3774     SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS);
3775     if (Res.getOpcode() != Opc)
3776       return SDValue(); // Op got folded away.
3777     if (!N0.hasOneUse())
3778       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3779     return Res;
3780   }
3781   case ISD::FMAXNUM:
3782   case ISD::FMINNUM:
3783   case ISD::FMAXNUM_IEEE:
3784   case ISD::FMINNUM_IEEE:
3785   case AMDGPUISD::FMAX_LEGACY:
3786   case AMDGPUISD::FMIN_LEGACY: {
3787     // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y)
3788     // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y)
3789     // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y)
3790     // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y)
3791 
3792     SDValue LHS = N0.getOperand(0);
3793     SDValue RHS = N0.getOperand(1);
3794 
3795     // 0 doesn't have a negated inline immediate.
3796     // TODO: This constant check should be generalized to other operations.
3797     if (isConstantCostlierToNegate(RHS))
3798       return SDValue();
3799 
3800     SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3801     SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3802     unsigned Opposite = inverseMinMax(Opc);
3803 
3804     SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags());
3805     if (Res.getOpcode() != Opposite)
3806       return SDValue(); // Op got folded away.
3807     if (!N0.hasOneUse())
3808       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3809     return Res;
3810   }
3811   case AMDGPUISD::FMED3: {
3812     SDValue Ops[3];
3813     for (unsigned I = 0; I < 3; ++I)
3814       Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags());
3815 
3816     SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags());
3817     if (Res.getOpcode() != AMDGPUISD::FMED3)
3818       return SDValue(); // Op got folded away.
3819     if (!N0.hasOneUse())
3820       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3821     return Res;
3822   }
3823   case ISD::FP_EXTEND:
3824   case ISD::FTRUNC:
3825   case ISD::FRINT:
3826   case ISD::FNEARBYINT: // XXX - Should fround be handled?
3827   case ISD::FSIN:
3828   case ISD::FCANONICALIZE:
3829   case AMDGPUISD::RCP:
3830   case AMDGPUISD::RCP_LEGACY:
3831   case AMDGPUISD::RCP_IFLAG:
3832   case AMDGPUISD::SIN_HW: {
3833     SDValue CvtSrc = N0.getOperand(0);
3834     if (CvtSrc.getOpcode() == ISD::FNEG) {
3835       // (fneg (fp_extend (fneg x))) -> (fp_extend x)
3836       // (fneg (rcp (fneg x))) -> (rcp x)
3837       return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0));
3838     }
3839 
3840     if (!N0.hasOneUse())
3841       return SDValue();
3842 
3843     // (fneg (fp_extend x)) -> (fp_extend (fneg x))
3844     // (fneg (rcp x)) -> (rcp (fneg x))
3845     SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3846     return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags());
3847   }
3848   case ISD::FP_ROUND: {
3849     SDValue CvtSrc = N0.getOperand(0);
3850 
3851     if (CvtSrc.getOpcode() == ISD::FNEG) {
3852       // (fneg (fp_round (fneg x))) -> (fp_round x)
3853       return DAG.getNode(ISD::FP_ROUND, SL, VT,
3854                          CvtSrc.getOperand(0), N0.getOperand(1));
3855     }
3856 
3857     if (!N0.hasOneUse())
3858       return SDValue();
3859 
3860     // (fneg (fp_round x)) -> (fp_round (fneg x))
3861     SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3862     return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1));
3863   }
3864   case ISD::FP16_TO_FP: {
3865     // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal
3866     // f16, but legalization of f16 fneg ends up pulling it out of the source.
3867     // Put the fneg back as a legal source operation that can be matched later.
3868     SDLoc SL(N);
3869 
3870     SDValue Src = N0.getOperand(0);
3871     EVT SrcVT = Src.getValueType();
3872 
3873     // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000)
3874     SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src,
3875                                   DAG.getConstant(0x8000, SL, SrcVT));
3876     return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg);
3877   }
3878   default:
3879     return SDValue();
3880   }
3881 }
3882 
3883 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N,
3884                                                  DAGCombinerInfo &DCI) const {
3885   SelectionDAG &DAG = DCI.DAG;
3886   SDValue N0 = N->getOperand(0);
3887 
3888   if (!N0.hasOneUse())
3889     return SDValue();
3890 
3891   switch (N0.getOpcode()) {
3892   case ISD::FP16_TO_FP: {
3893     assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal");
3894     SDLoc SL(N);
3895     SDValue Src = N0.getOperand(0);
3896     EVT SrcVT = Src.getValueType();
3897 
3898     // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff)
3899     SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src,
3900                                   DAG.getConstant(0x7fff, SL, SrcVT));
3901     return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs);
3902   }
3903   default:
3904     return SDValue();
3905   }
3906 }
3907 
3908 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N,
3909                                                 DAGCombinerInfo &DCI) const {
3910   const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
3911   if (!CFP)
3912     return SDValue();
3913 
3914   // XXX - Should this flush denormals?
3915   const APFloat &Val = CFP->getValueAPF();
3916   APFloat One(Val.getSemantics(), "1.0");
3917   return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
3918 }
3919 
3920 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
3921                                                 DAGCombinerInfo &DCI) const {
3922   SelectionDAG &DAG = DCI.DAG;
3923   SDLoc DL(N);
3924 
3925   switch(N->getOpcode()) {
3926   default:
3927     break;
3928   case ISD::BITCAST: {
3929     EVT DestVT = N->getValueType(0);
3930 
3931     // Push casts through vector builds. This helps avoid emitting a large
3932     // number of copies when materializing floating point vector constants.
3933     //
3934     // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) =>
3935     //   vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y))
3936     if (DestVT.isVector()) {
3937       SDValue Src = N->getOperand(0);
3938       if (Src.getOpcode() == ISD::BUILD_VECTOR) {
3939         EVT SrcVT = Src.getValueType();
3940         unsigned NElts = DestVT.getVectorNumElements();
3941 
3942         if (SrcVT.getVectorNumElements() == NElts) {
3943           EVT DestEltVT = DestVT.getVectorElementType();
3944 
3945           SmallVector<SDValue, 8> CastedElts;
3946           SDLoc SL(N);
3947           for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) {
3948             SDValue Elt = Src.getOperand(I);
3949             CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt));
3950           }
3951 
3952           return DAG.getBuildVector(DestVT, SL, CastedElts);
3953         }
3954       }
3955     }
3956 
3957     if (DestVT.getSizeInBits() != 64 && !DestVT.isVector())
3958       break;
3959 
3960     // Fold bitcasts of constants.
3961     //
3962     // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
3963     // TODO: Generalize and move to DAGCombiner
3964     SDValue Src = N->getOperand(0);
3965     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
3966       if (Src.getValueType() == MVT::i64) {
3967         SDLoc SL(N);
3968         uint64_t CVal = C->getZExtValue();
3969         SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
3970                                  DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
3971                                  DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
3972         return DAG.getNode(ISD::BITCAST, SL, DestVT, BV);
3973       }
3974     }
3975 
3976     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
3977       const APInt &Val = C->getValueAPF().bitcastToAPInt();
3978       SDLoc SL(N);
3979       uint64_t CVal = Val.getZExtValue();
3980       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
3981                                 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
3982                                 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
3983 
3984       return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
3985     }
3986 
3987     break;
3988   }
3989   case ISD::SHL: {
3990     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3991       break;
3992 
3993     return performShlCombine(N, DCI);
3994   }
3995   case ISD::SRL: {
3996     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3997       break;
3998 
3999     return performSrlCombine(N, DCI);
4000   }
4001   case ISD::SRA: {
4002     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4003       break;
4004 
4005     return performSraCombine(N, DCI);
4006   }
4007   case ISD::TRUNCATE:
4008     return performTruncateCombine(N, DCI);
4009   case ISD::MUL:
4010     return performMulCombine(N, DCI);
4011   case ISD::MULHS:
4012     return performMulhsCombine(N, DCI);
4013   case ISD::MULHU:
4014     return performMulhuCombine(N, DCI);
4015   case AMDGPUISD::MUL_I24:
4016   case AMDGPUISD::MUL_U24:
4017   case AMDGPUISD::MULHI_I24:
4018   case AMDGPUISD::MULHI_U24: {
4019     if (SDValue V = simplifyI24(N, DCI))
4020       return V;
4021     return SDValue();
4022   }
4023   case AMDGPUISD::MUL_LOHI_I24:
4024   case AMDGPUISD::MUL_LOHI_U24:
4025     return performMulLoHi24Combine(N, DCI);
4026   case ISD::SELECT:
4027     return performSelectCombine(N, DCI);
4028   case ISD::FNEG:
4029     return performFNegCombine(N, DCI);
4030   case ISD::FABS:
4031     return performFAbsCombine(N, DCI);
4032   case AMDGPUISD::BFE_I32:
4033   case AMDGPUISD::BFE_U32: {
4034     assert(!N->getValueType(0).isVector() &&
4035            "Vector handling of BFE not implemented");
4036     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
4037     if (!Width)
4038       break;
4039 
4040     uint32_t WidthVal = Width->getZExtValue() & 0x1f;
4041     if (WidthVal == 0)
4042       return DAG.getConstant(0, DL, MVT::i32);
4043 
4044     ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
4045     if (!Offset)
4046       break;
4047 
4048     SDValue BitsFrom = N->getOperand(0);
4049     uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
4050 
4051     bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
4052 
4053     if (OffsetVal == 0) {
4054       // This is already sign / zero extended, so try to fold away extra BFEs.
4055       unsigned SignBits =  Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
4056 
4057       unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
4058       if (OpSignBits >= SignBits)
4059         return BitsFrom;
4060 
4061       EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
4062       if (Signed) {
4063         // This is a sign_extend_inreg. Replace it to take advantage of existing
4064         // DAG Combines. If not eliminated, we will match back to BFE during
4065         // selection.
4066 
4067         // TODO: The sext_inreg of extended types ends, although we can could
4068         // handle them in a single BFE.
4069         return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
4070                            DAG.getValueType(SmallVT));
4071       }
4072 
4073       return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
4074     }
4075 
4076     if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
4077       if (Signed) {
4078         return constantFoldBFE<int32_t>(DAG,
4079                                         CVal->getSExtValue(),
4080                                         OffsetVal,
4081                                         WidthVal,
4082                                         DL);
4083       }
4084 
4085       return constantFoldBFE<uint32_t>(DAG,
4086                                        CVal->getZExtValue(),
4087                                        OffsetVal,
4088                                        WidthVal,
4089                                        DL);
4090     }
4091 
4092     if ((OffsetVal + WidthVal) >= 32 &&
4093         !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
4094       SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
4095       return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
4096                          BitsFrom, ShiftVal);
4097     }
4098 
4099     if (BitsFrom.hasOneUse()) {
4100       APInt Demanded = APInt::getBitsSet(32,
4101                                          OffsetVal,
4102                                          OffsetVal + WidthVal);
4103 
4104       KnownBits Known;
4105       TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
4106                                             !DCI.isBeforeLegalizeOps());
4107       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4108       if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) ||
4109           TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) {
4110         DCI.CommitTargetLoweringOpt(TLO);
4111       }
4112     }
4113 
4114     break;
4115   }
4116   case ISD::LOAD:
4117     return performLoadCombine(N, DCI);
4118   case ISD::STORE:
4119     return performStoreCombine(N, DCI);
4120   case AMDGPUISD::RCP:
4121   case AMDGPUISD::RCP_IFLAG:
4122     return performRcpCombine(N, DCI);
4123   case ISD::AssertZext:
4124   case ISD::AssertSext:
4125     return performAssertSZExtCombine(N, DCI);
4126   case ISD::INTRINSIC_WO_CHAIN:
4127     return performIntrinsicWOChainCombine(N, DCI);
4128   }
4129   return SDValue();
4130 }
4131 
4132 //===----------------------------------------------------------------------===//
4133 // Helper functions
4134 //===----------------------------------------------------------------------===//
4135 
4136 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
4137                                                    const TargetRegisterClass *RC,
4138                                                    unsigned Reg, EVT VT,
4139                                                    const SDLoc &SL,
4140                                                    bool RawReg) const {
4141   MachineFunction &MF = DAG.getMachineFunction();
4142   MachineRegisterInfo &MRI = MF.getRegInfo();
4143   unsigned VReg;
4144 
4145   if (!MRI.isLiveIn(Reg)) {
4146     VReg = MRI.createVirtualRegister(RC);
4147     MRI.addLiveIn(Reg, VReg);
4148   } else {
4149     VReg = MRI.getLiveInVirtReg(Reg);
4150   }
4151 
4152   if (RawReg)
4153     return DAG.getRegister(VReg, VT);
4154 
4155   return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT);
4156 }
4157 
4158 // This may be called multiple times, and nothing prevents creating multiple
4159 // objects at the same offset. See if we already defined this object.
4160 static int getOrCreateFixedStackObject(MachineFrameInfo &MFI, unsigned Size,
4161                                        int64_t Offset) {
4162   for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) {
4163     if (MFI.getObjectOffset(I) == Offset) {
4164       assert(MFI.getObjectSize(I) == Size);
4165       return I;
4166     }
4167   }
4168 
4169   return MFI.CreateFixedObject(Size, Offset, true);
4170 }
4171 
4172 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG,
4173                                                   EVT VT,
4174                                                   const SDLoc &SL,
4175                                                   int64_t Offset) const {
4176   MachineFunction &MF = DAG.getMachineFunction();
4177   MachineFrameInfo &MFI = MF.getFrameInfo();
4178   int FI = getOrCreateFixedStackObject(MFI, VT.getStoreSize(), Offset);
4179 
4180   auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset);
4181   SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32);
4182 
4183   return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, 4,
4184                      MachineMemOperand::MODereferenceable |
4185                      MachineMemOperand::MOInvariant);
4186 }
4187 
4188 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG,
4189                                                    const SDLoc &SL,
4190                                                    SDValue Chain,
4191                                                    SDValue ArgVal,
4192                                                    int64_t Offset) const {
4193   MachineFunction &MF = DAG.getMachineFunction();
4194   MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset);
4195 
4196   SDValue Ptr = DAG.getConstant(Offset, SL, MVT::i32);
4197   SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, 4,
4198                                MachineMemOperand::MODereferenceable);
4199   return Store;
4200 }
4201 
4202 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG,
4203                                              const TargetRegisterClass *RC,
4204                                              EVT VT, const SDLoc &SL,
4205                                              const ArgDescriptor &Arg) const {
4206   assert(Arg && "Attempting to load missing argument");
4207 
4208   SDValue V = Arg.isRegister() ?
4209     CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL) :
4210     loadStackInputValue(DAG, VT, SL, Arg.getStackOffset());
4211 
4212   if (!Arg.isMasked())
4213     return V;
4214 
4215   unsigned Mask = Arg.getMask();
4216   unsigned Shift = countTrailingZeros<unsigned>(Mask);
4217   V = DAG.getNode(ISD::SRL, SL, VT, V,
4218                   DAG.getShiftAmountConstant(Shift, VT, SL));
4219   return DAG.getNode(ISD::AND, SL, VT, V,
4220                      DAG.getConstant(Mask >> Shift, SL, VT));
4221 }
4222 
4223 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
4224     const MachineFunction &MF, const ImplicitParameter Param) const {
4225   const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
4226   const AMDGPUSubtarget &ST =
4227       AMDGPUSubtarget::get(getTargetMachine(), MF.getFunction());
4228   unsigned ExplicitArgOffset = ST.getExplicitKernelArgOffset(MF.getFunction());
4229   const Align Alignment = ST.getAlignmentForImplicitArgPtr();
4230   uint64_t ArgOffset = alignTo(MFI->getExplicitKernArgSize(), Alignment) +
4231                        ExplicitArgOffset;
4232   switch (Param) {
4233   case GRID_DIM:
4234     return ArgOffset;
4235   case GRID_OFFSET:
4236     return ArgOffset + 4;
4237   }
4238   llvm_unreachable("unexpected implicit parameter type");
4239 }
4240 
4241 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
4242 
4243 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
4244   switch ((AMDGPUISD::NodeType)Opcode) {
4245   case AMDGPUISD::FIRST_NUMBER: break;
4246   // AMDIL DAG nodes
4247   NODE_NAME_CASE(UMUL);
4248   NODE_NAME_CASE(BRANCH_COND);
4249 
4250   // AMDGPU DAG nodes
4251   NODE_NAME_CASE(IF)
4252   NODE_NAME_CASE(ELSE)
4253   NODE_NAME_CASE(LOOP)
4254   NODE_NAME_CASE(CALL)
4255   NODE_NAME_CASE(TC_RETURN)
4256   NODE_NAME_CASE(TRAP)
4257   NODE_NAME_CASE(RET_FLAG)
4258   NODE_NAME_CASE(RETURN_TO_EPILOG)
4259   NODE_NAME_CASE(ENDPGM)
4260   NODE_NAME_CASE(DWORDADDR)
4261   NODE_NAME_CASE(FRACT)
4262   NODE_NAME_CASE(SETCC)
4263   NODE_NAME_CASE(SETREG)
4264   NODE_NAME_CASE(DENORM_MODE)
4265   NODE_NAME_CASE(FMA_W_CHAIN)
4266   NODE_NAME_CASE(FMUL_W_CHAIN)
4267   NODE_NAME_CASE(CLAMP)
4268   NODE_NAME_CASE(COS_HW)
4269   NODE_NAME_CASE(SIN_HW)
4270   NODE_NAME_CASE(FMAX_LEGACY)
4271   NODE_NAME_CASE(FMIN_LEGACY)
4272   NODE_NAME_CASE(FMAX3)
4273   NODE_NAME_CASE(SMAX3)
4274   NODE_NAME_CASE(UMAX3)
4275   NODE_NAME_CASE(FMIN3)
4276   NODE_NAME_CASE(SMIN3)
4277   NODE_NAME_CASE(UMIN3)
4278   NODE_NAME_CASE(FMED3)
4279   NODE_NAME_CASE(SMED3)
4280   NODE_NAME_CASE(UMED3)
4281   NODE_NAME_CASE(FDOT2)
4282   NODE_NAME_CASE(URECIP)
4283   NODE_NAME_CASE(DIV_SCALE)
4284   NODE_NAME_CASE(DIV_FMAS)
4285   NODE_NAME_CASE(DIV_FIXUP)
4286   NODE_NAME_CASE(FMAD_FTZ)
4287   NODE_NAME_CASE(TRIG_PREOP)
4288   NODE_NAME_CASE(RCP)
4289   NODE_NAME_CASE(RSQ)
4290   NODE_NAME_CASE(RCP_LEGACY)
4291   NODE_NAME_CASE(RSQ_LEGACY)
4292   NODE_NAME_CASE(RCP_IFLAG)
4293   NODE_NAME_CASE(FMUL_LEGACY)
4294   NODE_NAME_CASE(RSQ_CLAMP)
4295   NODE_NAME_CASE(LDEXP)
4296   NODE_NAME_CASE(FP_CLASS)
4297   NODE_NAME_CASE(DOT4)
4298   NODE_NAME_CASE(CARRY)
4299   NODE_NAME_CASE(BORROW)
4300   NODE_NAME_CASE(BFE_U32)
4301   NODE_NAME_CASE(BFE_I32)
4302   NODE_NAME_CASE(BFI)
4303   NODE_NAME_CASE(BFM)
4304   NODE_NAME_CASE(FFBH_U32)
4305   NODE_NAME_CASE(FFBH_I32)
4306   NODE_NAME_CASE(FFBL_B32)
4307   NODE_NAME_CASE(MUL_U24)
4308   NODE_NAME_CASE(MUL_I24)
4309   NODE_NAME_CASE(MULHI_U24)
4310   NODE_NAME_CASE(MULHI_I24)
4311   NODE_NAME_CASE(MUL_LOHI_U24)
4312   NODE_NAME_CASE(MUL_LOHI_I24)
4313   NODE_NAME_CASE(MAD_U24)
4314   NODE_NAME_CASE(MAD_I24)
4315   NODE_NAME_CASE(MAD_I64_I32)
4316   NODE_NAME_CASE(MAD_U64_U32)
4317   NODE_NAME_CASE(PERM)
4318   NODE_NAME_CASE(TEXTURE_FETCH)
4319   NODE_NAME_CASE(R600_EXPORT)
4320   NODE_NAME_CASE(CONST_ADDRESS)
4321   NODE_NAME_CASE(REGISTER_LOAD)
4322   NODE_NAME_CASE(REGISTER_STORE)
4323   NODE_NAME_CASE(SAMPLE)
4324   NODE_NAME_CASE(SAMPLEB)
4325   NODE_NAME_CASE(SAMPLED)
4326   NODE_NAME_CASE(SAMPLEL)
4327   NODE_NAME_CASE(CVT_F32_UBYTE0)
4328   NODE_NAME_CASE(CVT_F32_UBYTE1)
4329   NODE_NAME_CASE(CVT_F32_UBYTE2)
4330   NODE_NAME_CASE(CVT_F32_UBYTE3)
4331   NODE_NAME_CASE(CVT_PKRTZ_F16_F32)
4332   NODE_NAME_CASE(CVT_PKNORM_I16_F32)
4333   NODE_NAME_CASE(CVT_PKNORM_U16_F32)
4334   NODE_NAME_CASE(CVT_PK_I16_I32)
4335   NODE_NAME_CASE(CVT_PK_U16_U32)
4336   NODE_NAME_CASE(FP_TO_FP16)
4337   NODE_NAME_CASE(FP16_ZEXT)
4338   NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
4339   NODE_NAME_CASE(CONST_DATA_PTR)
4340   NODE_NAME_CASE(PC_ADD_REL_OFFSET)
4341   NODE_NAME_CASE(LDS)
4342   NODE_NAME_CASE(DUMMY_CHAIN)
4343   case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
4344   NODE_NAME_CASE(LOAD_D16_HI)
4345   NODE_NAME_CASE(LOAD_D16_LO)
4346   NODE_NAME_CASE(LOAD_D16_HI_I8)
4347   NODE_NAME_CASE(LOAD_D16_HI_U8)
4348   NODE_NAME_CASE(LOAD_D16_LO_I8)
4349   NODE_NAME_CASE(LOAD_D16_LO_U8)
4350   NODE_NAME_CASE(STORE_MSKOR)
4351   NODE_NAME_CASE(LOAD_CONSTANT)
4352   NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
4353   NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16)
4354   NODE_NAME_CASE(TBUFFER_LOAD_FORMAT)
4355   NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16)
4356   NODE_NAME_CASE(DS_ORDERED_COUNT)
4357   NODE_NAME_CASE(ATOMIC_CMP_SWAP)
4358   NODE_NAME_CASE(ATOMIC_INC)
4359   NODE_NAME_CASE(ATOMIC_DEC)
4360   NODE_NAME_CASE(ATOMIC_LOAD_FMIN)
4361   NODE_NAME_CASE(ATOMIC_LOAD_FMAX)
4362   NODE_NAME_CASE(BUFFER_LOAD)
4363   NODE_NAME_CASE(BUFFER_LOAD_UBYTE)
4364   NODE_NAME_CASE(BUFFER_LOAD_USHORT)
4365   NODE_NAME_CASE(BUFFER_LOAD_BYTE)
4366   NODE_NAME_CASE(BUFFER_LOAD_SHORT)
4367   NODE_NAME_CASE(BUFFER_LOAD_FORMAT)
4368   NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16)
4369   NODE_NAME_CASE(SBUFFER_LOAD)
4370   NODE_NAME_CASE(BUFFER_STORE)
4371   NODE_NAME_CASE(BUFFER_STORE_BYTE)
4372   NODE_NAME_CASE(BUFFER_STORE_SHORT)
4373   NODE_NAME_CASE(BUFFER_STORE_FORMAT)
4374   NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16)
4375   NODE_NAME_CASE(BUFFER_ATOMIC_SWAP)
4376   NODE_NAME_CASE(BUFFER_ATOMIC_ADD)
4377   NODE_NAME_CASE(BUFFER_ATOMIC_SUB)
4378   NODE_NAME_CASE(BUFFER_ATOMIC_SMIN)
4379   NODE_NAME_CASE(BUFFER_ATOMIC_UMIN)
4380   NODE_NAME_CASE(BUFFER_ATOMIC_SMAX)
4381   NODE_NAME_CASE(BUFFER_ATOMIC_UMAX)
4382   NODE_NAME_CASE(BUFFER_ATOMIC_AND)
4383   NODE_NAME_CASE(BUFFER_ATOMIC_OR)
4384   NODE_NAME_CASE(BUFFER_ATOMIC_XOR)
4385   NODE_NAME_CASE(BUFFER_ATOMIC_INC)
4386   NODE_NAME_CASE(BUFFER_ATOMIC_DEC)
4387   NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP)
4388   NODE_NAME_CASE(BUFFER_ATOMIC_FADD)
4389   NODE_NAME_CASE(BUFFER_ATOMIC_PK_FADD)
4390   NODE_NAME_CASE(ATOMIC_PK_FADD)
4391 
4392   case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
4393   }
4394   return nullptr;
4395 }
4396 
4397 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand,
4398                                               SelectionDAG &DAG, int Enabled,
4399                                               int &RefinementSteps,
4400                                               bool &UseOneConstNR,
4401                                               bool Reciprocal) const {
4402   EVT VT = Operand.getValueType();
4403 
4404   if (VT == MVT::f32) {
4405     RefinementSteps = 0;
4406     return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
4407   }
4408 
4409   // TODO: There is also f64 rsq instruction, but the documentation is less
4410   // clear on its precision.
4411 
4412   return SDValue();
4413 }
4414 
4415 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
4416                                                SelectionDAG &DAG, int Enabled,
4417                                                int &RefinementSteps) const {
4418   EVT VT = Operand.getValueType();
4419 
4420   if (VT == MVT::f32) {
4421     // Reciprocal, < 1 ulp error.
4422     //
4423     // This reciprocal approximation converges to < 0.5 ulp error with one
4424     // newton rhapson performed with two fused multiple adds (FMAs).
4425 
4426     RefinementSteps = 0;
4427     return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
4428   }
4429 
4430   // TODO: There is also f64 rcp instruction, but the documentation is less
4431   // clear on its precision.
4432 
4433   return SDValue();
4434 }
4435 
4436 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
4437     const SDValue Op, KnownBits &Known,
4438     const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
4439 
4440   Known.resetAll(); // Don't know anything.
4441 
4442   unsigned Opc = Op.getOpcode();
4443 
4444   switch (Opc) {
4445   default:
4446     break;
4447   case AMDGPUISD::CARRY:
4448   case AMDGPUISD::BORROW: {
4449     Known.Zero = APInt::getHighBitsSet(32, 31);
4450     break;
4451   }
4452 
4453   case AMDGPUISD::BFE_I32:
4454   case AMDGPUISD::BFE_U32: {
4455     ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4456     if (!CWidth)
4457       return;
4458 
4459     uint32_t Width = CWidth->getZExtValue() & 0x1f;
4460 
4461     if (Opc == AMDGPUISD::BFE_U32)
4462       Known.Zero = APInt::getHighBitsSet(32, 32 - Width);
4463 
4464     break;
4465   }
4466   case AMDGPUISD::FP_TO_FP16:
4467   case AMDGPUISD::FP16_ZEXT: {
4468     unsigned BitWidth = Known.getBitWidth();
4469 
4470     // High bits are zero.
4471     Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
4472     break;
4473   }
4474   case AMDGPUISD::MUL_U24:
4475   case AMDGPUISD::MUL_I24: {
4476     KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4477     KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4478     unsigned TrailZ = LHSKnown.countMinTrailingZeros() +
4479                       RHSKnown.countMinTrailingZeros();
4480     Known.Zero.setLowBits(std::min(TrailZ, 32u));
4481     // Skip extra check if all bits are known zeros.
4482     if (TrailZ >= 32)
4483       break;
4484 
4485     // Truncate to 24 bits.
4486     LHSKnown = LHSKnown.trunc(24);
4487     RHSKnown = RHSKnown.trunc(24);
4488 
4489     if (Opc == AMDGPUISD::MUL_I24) {
4490       unsigned LHSValBits = 24 - LHSKnown.countMinSignBits();
4491       unsigned RHSValBits = 24 - RHSKnown.countMinSignBits();
4492       unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
4493       if (MaxValBits >= 32)
4494         break;
4495       bool LHSNegative = LHSKnown.isNegative();
4496       bool LHSNonNegative = LHSKnown.isNonNegative();
4497       bool LHSPositive = LHSKnown.isStrictlyPositive();
4498       bool RHSNegative = RHSKnown.isNegative();
4499       bool RHSNonNegative = RHSKnown.isNonNegative();
4500       bool RHSPositive = RHSKnown.isStrictlyPositive();
4501 
4502       if ((LHSNonNegative && RHSNonNegative) || (LHSNegative && RHSNegative))
4503         Known.Zero.setHighBits(32 - MaxValBits);
4504       else if ((LHSNegative && RHSPositive) || (LHSPositive && RHSNegative))
4505         Known.One.setHighBits(32 - MaxValBits);
4506     } else {
4507       unsigned LHSValBits = 24 - LHSKnown.countMinLeadingZeros();
4508       unsigned RHSValBits = 24 - RHSKnown.countMinLeadingZeros();
4509       unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
4510       if (MaxValBits >= 32)
4511         break;
4512       Known.Zero.setHighBits(32 - MaxValBits);
4513     }
4514     break;
4515   }
4516   case AMDGPUISD::PERM: {
4517     ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4518     if (!CMask)
4519       return;
4520 
4521     KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4522     KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4523     unsigned Sel = CMask->getZExtValue();
4524 
4525     for (unsigned I = 0; I < 32; I += 8) {
4526       unsigned SelBits = Sel & 0xff;
4527       if (SelBits < 4) {
4528         SelBits *= 8;
4529         Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4530         Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4531       } else if (SelBits < 7) {
4532         SelBits = (SelBits & 3) * 8;
4533         Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4534         Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4535       } else if (SelBits == 0x0c) {
4536         Known.Zero |= 0xFFull << I;
4537       } else if (SelBits > 0x0c) {
4538         Known.One |= 0xFFull << I;
4539       }
4540       Sel >>= 8;
4541     }
4542     break;
4543   }
4544   case AMDGPUISD::BUFFER_LOAD_UBYTE:  {
4545     Known.Zero.setHighBits(24);
4546     break;
4547   }
4548   case AMDGPUISD::BUFFER_LOAD_USHORT: {
4549     Known.Zero.setHighBits(16);
4550     break;
4551   }
4552   case AMDGPUISD::LDS: {
4553     auto GA = cast<GlobalAddressSDNode>(Op.getOperand(0).getNode());
4554     unsigned Align = GA->getGlobal()->getAlignment();
4555 
4556     Known.Zero.setHighBits(16);
4557     if (Align)
4558       Known.Zero.setLowBits(Log2_32(Align));
4559     break;
4560   }
4561   case ISD::INTRINSIC_WO_CHAIN: {
4562     unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4563     switch (IID) {
4564     case Intrinsic::amdgcn_mbcnt_lo:
4565     case Intrinsic::amdgcn_mbcnt_hi: {
4566       const GCNSubtarget &ST =
4567           DAG.getMachineFunction().getSubtarget<GCNSubtarget>();
4568       // These return at most the wavefront size - 1.
4569       unsigned Size = Op.getValueType().getSizeInBits();
4570       Known.Zero.setHighBits(Size - ST.getWavefrontSizeLog2());
4571       break;
4572     }
4573     default:
4574       break;
4575     }
4576   }
4577   }
4578 }
4579 
4580 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
4581     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4582     unsigned Depth) const {
4583   switch (Op.getOpcode()) {
4584   case AMDGPUISD::BFE_I32: {
4585     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4586     if (!Width)
4587       return 1;
4588 
4589     unsigned SignBits = 32 - Width->getZExtValue() + 1;
4590     if (!isNullConstant(Op.getOperand(1)))
4591       return SignBits;
4592 
4593     // TODO: Could probably figure something out with non-0 offsets.
4594     unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4595     return std::max(SignBits, Op0SignBits);
4596   }
4597 
4598   case AMDGPUISD::BFE_U32: {
4599     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4600     return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
4601   }
4602 
4603   case AMDGPUISD::CARRY:
4604   case AMDGPUISD::BORROW:
4605     return 31;
4606   case AMDGPUISD::BUFFER_LOAD_BYTE:
4607     return 25;
4608   case AMDGPUISD::BUFFER_LOAD_SHORT:
4609     return 17;
4610   case AMDGPUISD::BUFFER_LOAD_UBYTE:
4611     return 24;
4612   case AMDGPUISD::BUFFER_LOAD_USHORT:
4613     return 16;
4614   case AMDGPUISD::FP_TO_FP16:
4615   case AMDGPUISD::FP16_ZEXT:
4616     return 16;
4617   default:
4618     return 1;
4619   }
4620 }
4621 
4622 bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
4623                                                         const SelectionDAG &DAG,
4624                                                         bool SNaN,
4625                                                         unsigned Depth) const {
4626   unsigned Opcode = Op.getOpcode();
4627   switch (Opcode) {
4628   case AMDGPUISD::FMIN_LEGACY:
4629   case AMDGPUISD::FMAX_LEGACY: {
4630     if (SNaN)
4631       return true;
4632 
4633     // TODO: Can check no nans on one of the operands for each one, but which
4634     // one?
4635     return false;
4636   }
4637   case AMDGPUISD::FMUL_LEGACY:
4638   case AMDGPUISD::CVT_PKRTZ_F16_F32: {
4639     if (SNaN)
4640       return true;
4641     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4642            DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4643   }
4644   case AMDGPUISD::FMED3:
4645   case AMDGPUISD::FMIN3:
4646   case AMDGPUISD::FMAX3:
4647   case AMDGPUISD::FMAD_FTZ: {
4648     if (SNaN)
4649       return true;
4650     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4651            DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4652            DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4653   }
4654   case AMDGPUISD::CVT_F32_UBYTE0:
4655   case AMDGPUISD::CVT_F32_UBYTE1:
4656   case AMDGPUISD::CVT_F32_UBYTE2:
4657   case AMDGPUISD::CVT_F32_UBYTE3:
4658     return true;
4659 
4660   case AMDGPUISD::RCP:
4661   case AMDGPUISD::RSQ:
4662   case AMDGPUISD::RCP_LEGACY:
4663   case AMDGPUISD::RSQ_LEGACY:
4664   case AMDGPUISD::RSQ_CLAMP: {
4665     if (SNaN)
4666       return true;
4667 
4668     // TODO: Need is known positive check.
4669     return false;
4670   }
4671   case AMDGPUISD::LDEXP:
4672   case AMDGPUISD::FRACT: {
4673     if (SNaN)
4674       return true;
4675     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4676   }
4677   case AMDGPUISD::DIV_SCALE:
4678   case AMDGPUISD::DIV_FMAS:
4679   case AMDGPUISD::DIV_FIXUP:
4680   case AMDGPUISD::TRIG_PREOP:
4681     // TODO: Refine on operands.
4682     return SNaN;
4683   case AMDGPUISD::SIN_HW:
4684   case AMDGPUISD::COS_HW: {
4685     // TODO: Need check for infinity
4686     return SNaN;
4687   }
4688   case ISD::INTRINSIC_WO_CHAIN: {
4689     unsigned IntrinsicID
4690       = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4691     // TODO: Handle more intrinsics
4692     switch (IntrinsicID) {
4693     case Intrinsic::amdgcn_cubeid:
4694       return true;
4695 
4696     case Intrinsic::amdgcn_frexp_mant: {
4697       if (SNaN)
4698         return true;
4699       return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4700     }
4701     case Intrinsic::amdgcn_cvt_pkrtz: {
4702       if (SNaN)
4703         return true;
4704       return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4705              DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4706     }
4707     case Intrinsic::amdgcn_fdot2:
4708       // TODO: Refine on operand
4709       return SNaN;
4710     default:
4711       return false;
4712     }
4713   }
4714   default:
4715     return false;
4716   }
4717 }
4718 
4719 TargetLowering::AtomicExpansionKind
4720 AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
4721   switch (RMW->getOperation()) {
4722   case AtomicRMWInst::Nand:
4723   case AtomicRMWInst::FAdd:
4724   case AtomicRMWInst::FSub:
4725     return AtomicExpansionKind::CmpXChg;
4726   default:
4727     return AtomicExpansionKind::None;
4728   }
4729 }
4730