1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This is the parent TargetLowering class for hardware code gen
11 /// targets.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #define AMDGPU_LOG2E_F     1.44269504088896340735992468100189214f
16 #define AMDGPU_LN2_F       0.693147180559945309417232121458176568f
17 #define AMDGPU_LN10_F      2.30258509299404568401799145468436421f
18 
19 #include "AMDGPUISelLowering.h"
20 #include "AMDGPU.h"
21 #include "AMDGPUCallLowering.h"
22 #include "AMDGPUFrameLowering.h"
23 #include "AMDGPURegisterInfo.h"
24 #include "AMDGPUSubtarget.h"
25 #include "AMDGPUTargetMachine.h"
26 #include "Utils/AMDGPUBaseInfo.h"
27 #include "R600MachineFunctionInfo.h"
28 #include "SIInstrInfo.h"
29 #include "SIMachineFunctionInfo.h"
30 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
31 #include "llvm/CodeGen/Analysis.h"
32 #include "llvm/CodeGen/CallingConvLower.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/SelectionDAG.h"
36 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DiagnosticInfo.h"
39 #include "llvm/Support/KnownBits.h"
40 using namespace llvm;
41 
42 #include "AMDGPUGenCallingConv.inc"
43 
44 // Find a larger type to do a load / store of a vector with.
45 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
46   unsigned StoreSize = VT.getStoreSizeInBits();
47   if (StoreSize <= 32)
48     return EVT::getIntegerVT(Ctx, StoreSize);
49 
50   assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
51   return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
52 }
53 
54 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) {
55   EVT VT = Op.getValueType();
56   KnownBits Known = DAG.computeKnownBits(Op);
57   return VT.getSizeInBits() - Known.countMinLeadingZeros();
58 }
59 
60 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) {
61   EVT VT = Op.getValueType();
62 
63   // In order for this to be a signed 24-bit value, bit 23, must
64   // be a sign bit.
65   return VT.getSizeInBits() - DAG.ComputeNumSignBits(Op);
66 }
67 
68 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
69                                            const AMDGPUSubtarget &STI)
70     : TargetLowering(TM), Subtarget(&STI) {
71   // Lower floating point store/load to integer store/load to reduce the number
72   // of patterns in tablegen.
73   setOperationAction(ISD::LOAD, MVT::f32, Promote);
74   AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
75 
76   setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
77   AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
78 
79   setOperationAction(ISD::LOAD, MVT::v3f32, Promote);
80   AddPromotedToType(ISD::LOAD, MVT::v3f32, MVT::v3i32);
81 
82   setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
83   AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
84 
85   setOperationAction(ISD::LOAD, MVT::v5f32, Promote);
86   AddPromotedToType(ISD::LOAD, MVT::v5f32, MVT::v5i32);
87 
88   setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
89   AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
90 
91   setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
92   AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
93 
94   setOperationAction(ISD::LOAD, MVT::v32f32, Promote);
95   AddPromotedToType(ISD::LOAD, MVT::v32f32, MVT::v32i32);
96 
97   setOperationAction(ISD::LOAD, MVT::i64, Promote);
98   AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
99 
100   setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
101   AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
102 
103   setOperationAction(ISD::LOAD, MVT::f64, Promote);
104   AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32);
105 
106   setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
107   AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
108 
109   // There are no 64-bit extloads. These should be done as a 32-bit extload and
110   // an extension to 64-bit.
111   for (MVT VT : MVT::integer_valuetypes()) {
112     setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
113     setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
114     setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
115   }
116 
117   for (MVT VT : MVT::integer_valuetypes()) {
118     if (VT == MVT::i64)
119       continue;
120 
121     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
122     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal);
123     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal);
124     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
125 
126     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
127     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal);
128     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal);
129     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
130 
131     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
132     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal);
133     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal);
134     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
135   }
136 
137   for (MVT VT : MVT::integer_vector_valuetypes()) {
138     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
139     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
140     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
141     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
142     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
143     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
144     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
145     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
146     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
147     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v3i16, Expand);
148     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v3i16, Expand);
149     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v3i16, Expand);
150     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
151     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
152     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
153   }
154 
155   setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
156   setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
157   setLoadExtAction(ISD::EXTLOAD, MVT::v3f32, MVT::v3f16, Expand);
158   setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
159   setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
160 
161   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
162   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
163   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
164   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand);
165 
166   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
167   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
168   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
169   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
170 
171   setOperationAction(ISD::STORE, MVT::f32, Promote);
172   AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
173 
174   setOperationAction(ISD::STORE, MVT::v2f32, Promote);
175   AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
176 
177   setOperationAction(ISD::STORE, MVT::v3f32, Promote);
178   AddPromotedToType(ISD::STORE, MVT::v3f32, MVT::v3i32);
179 
180   setOperationAction(ISD::STORE, MVT::v4f32, Promote);
181   AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
182 
183   setOperationAction(ISD::STORE, MVT::v5f32, Promote);
184   AddPromotedToType(ISD::STORE, MVT::v5f32, MVT::v5i32);
185 
186   setOperationAction(ISD::STORE, MVT::v8f32, Promote);
187   AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
188 
189   setOperationAction(ISD::STORE, MVT::v16f32, Promote);
190   AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
191 
192   setOperationAction(ISD::STORE, MVT::v32f32, Promote);
193   AddPromotedToType(ISD::STORE, MVT::v32f32, MVT::v32i32);
194 
195   setOperationAction(ISD::STORE, MVT::i64, Promote);
196   AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
197 
198   setOperationAction(ISD::STORE, MVT::v2i64, Promote);
199   AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
200 
201   setOperationAction(ISD::STORE, MVT::f64, Promote);
202   AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32);
203 
204   setOperationAction(ISD::STORE, MVT::v2f64, Promote);
205   AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
206 
207   setTruncStoreAction(MVT::i64, MVT::i1, Expand);
208   setTruncStoreAction(MVT::i64, MVT::i8, Expand);
209   setTruncStoreAction(MVT::i64, MVT::i16, Expand);
210   setTruncStoreAction(MVT::i64, MVT::i32, Expand);
211 
212   setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
213   setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand);
214   setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand);
215   setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
216 
217   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
218   setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
219   setTruncStoreAction(MVT::v3f32, MVT::v3f16, Expand);
220   setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
221   setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
222 
223   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
224   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
225 
226   setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
227   setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
228 
229   setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
230   setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
231 
232   setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand);
233   setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand);
234 
235 
236   setOperationAction(ISD::Constant, MVT::i32, Legal);
237   setOperationAction(ISD::Constant, MVT::i64, Legal);
238   setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
239   setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
240 
241   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
242   setOperationAction(ISD::BRIND, MVT::Other, Expand);
243 
244   // This is totally unsupported, just custom lower to produce an error.
245   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
246 
247   // Library functions.  These default to Expand, but we have instructions
248   // for them.
249   setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
250   setOperationAction(ISD::FEXP2,  MVT::f32, Legal);
251   setOperationAction(ISD::FPOW,   MVT::f32, Legal);
252   setOperationAction(ISD::FLOG2,  MVT::f32, Legal);
253   setOperationAction(ISD::FABS,   MVT::f32, Legal);
254   setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
255   setOperationAction(ISD::FRINT,  MVT::f32, Legal);
256   setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
257   setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
258   setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
259 
260   setOperationAction(ISD::FROUND, MVT::f32, Custom);
261   setOperationAction(ISD::FROUND, MVT::f64, Custom);
262 
263   setOperationAction(ISD::FLOG, MVT::f32, Custom);
264   setOperationAction(ISD::FLOG10, MVT::f32, Custom);
265   setOperationAction(ISD::FEXP, MVT::f32, Custom);
266 
267 
268   setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
269   setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
270 
271   setOperationAction(ISD::FREM, MVT::f32, Custom);
272   setOperationAction(ISD::FREM, MVT::f64, Custom);
273 
274   // Expand to fneg + fadd.
275   setOperationAction(ISD::FSUB, MVT::f64, Expand);
276 
277   setOperationAction(ISD::CONCAT_VECTORS, MVT::v3i32, Custom);
278   setOperationAction(ISD::CONCAT_VECTORS, MVT::v3f32, Custom);
279   setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
280   setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
281   setOperationAction(ISD::CONCAT_VECTORS, MVT::v5i32, Custom);
282   setOperationAction(ISD::CONCAT_VECTORS, MVT::v5f32, Custom);
283   setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
284   setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
285   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
286   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
287   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3f32, Custom);
288   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3i32, Custom);
289   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
290   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
291   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5f32, Custom);
292   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5i32, Custom);
293   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
294   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
295   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16f32, Custom);
296   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i32, Custom);
297   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32f32, Custom);
298   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32i32, Custom);
299 
300   setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
301   setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom);
302   setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom);
303 
304   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
305   for (MVT VT : ScalarIntVTs) {
306     // These should use [SU]DIVREM, so set them to expand
307     setOperationAction(ISD::SDIV, VT, Expand);
308     setOperationAction(ISD::UDIV, VT, Expand);
309     setOperationAction(ISD::SREM, VT, Expand);
310     setOperationAction(ISD::UREM, VT, Expand);
311 
312     // GPU does not have divrem function for signed or unsigned.
313     setOperationAction(ISD::SDIVREM, VT, Custom);
314     setOperationAction(ISD::UDIVREM, VT, Custom);
315 
316     // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
317     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
318     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
319 
320     setOperationAction(ISD::BSWAP, VT, Expand);
321     setOperationAction(ISD::CTTZ, VT, Expand);
322     setOperationAction(ISD::CTLZ, VT, Expand);
323 
324     // AMDGPU uses ADDC/SUBC/ADDE/SUBE
325     setOperationAction(ISD::ADDC, VT, Legal);
326     setOperationAction(ISD::SUBC, VT, Legal);
327     setOperationAction(ISD::ADDE, VT, Legal);
328     setOperationAction(ISD::SUBE, VT, Legal);
329   }
330 
331   // The hardware supports 32-bit ROTR, but not ROTL.
332   setOperationAction(ISD::ROTL, MVT::i32, Expand);
333   setOperationAction(ISD::ROTL, MVT::i64, Expand);
334   setOperationAction(ISD::ROTR, MVT::i64, Expand);
335 
336   setOperationAction(ISD::MUL, MVT::i64, Expand);
337   setOperationAction(ISD::MULHU, MVT::i64, Expand);
338   setOperationAction(ISD::MULHS, MVT::i64, Expand);
339   setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
340   setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
341   setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
342   setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
343   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
344 
345   setOperationAction(ISD::SMIN, MVT::i32, Legal);
346   setOperationAction(ISD::UMIN, MVT::i32, Legal);
347   setOperationAction(ISD::SMAX, MVT::i32, Legal);
348   setOperationAction(ISD::UMAX, MVT::i32, Legal);
349 
350   setOperationAction(ISD::CTTZ, MVT::i64, Custom);
351   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom);
352   setOperationAction(ISD::CTLZ, MVT::i64, Custom);
353   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
354 
355   static const MVT::SimpleValueType VectorIntTypes[] = {
356     MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32
357   };
358 
359   for (MVT VT : VectorIntTypes) {
360     // Expand the following operations for the current type by default.
361     setOperationAction(ISD::ADD,  VT, Expand);
362     setOperationAction(ISD::AND,  VT, Expand);
363     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
364     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
365     setOperationAction(ISD::MUL,  VT, Expand);
366     setOperationAction(ISD::MULHU, VT, Expand);
367     setOperationAction(ISD::MULHS, VT, Expand);
368     setOperationAction(ISD::OR,   VT, Expand);
369     setOperationAction(ISD::SHL,  VT, Expand);
370     setOperationAction(ISD::SRA,  VT, Expand);
371     setOperationAction(ISD::SRL,  VT, Expand);
372     setOperationAction(ISD::ROTL, VT, Expand);
373     setOperationAction(ISD::ROTR, VT, Expand);
374     setOperationAction(ISD::SUB,  VT, Expand);
375     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
376     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
377     setOperationAction(ISD::SDIV, VT, Expand);
378     setOperationAction(ISD::UDIV, VT, Expand);
379     setOperationAction(ISD::SREM, VT, Expand);
380     setOperationAction(ISD::UREM, VT, Expand);
381     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
382     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
383     setOperationAction(ISD::SDIVREM, VT, Custom);
384     setOperationAction(ISD::UDIVREM, VT, Expand);
385     setOperationAction(ISD::SELECT, VT, Expand);
386     setOperationAction(ISD::VSELECT, VT, Expand);
387     setOperationAction(ISD::SELECT_CC, VT, Expand);
388     setOperationAction(ISD::XOR,  VT, Expand);
389     setOperationAction(ISD::BSWAP, VT, Expand);
390     setOperationAction(ISD::CTPOP, VT, Expand);
391     setOperationAction(ISD::CTTZ, VT, Expand);
392     setOperationAction(ISD::CTLZ, VT, Expand);
393     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
394     setOperationAction(ISD::SETCC, VT, Expand);
395   }
396 
397   static const MVT::SimpleValueType FloatVectorTypes[] = {
398      MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32
399   };
400 
401   for (MVT VT : FloatVectorTypes) {
402     setOperationAction(ISD::FABS, VT, Expand);
403     setOperationAction(ISD::FMINNUM, VT, Expand);
404     setOperationAction(ISD::FMAXNUM, VT, Expand);
405     setOperationAction(ISD::FADD, VT, Expand);
406     setOperationAction(ISD::FCEIL, VT, Expand);
407     setOperationAction(ISD::FCOS, VT, Expand);
408     setOperationAction(ISD::FDIV, VT, Expand);
409     setOperationAction(ISD::FEXP2, VT, Expand);
410     setOperationAction(ISD::FEXP, VT, Expand);
411     setOperationAction(ISD::FLOG2, VT, Expand);
412     setOperationAction(ISD::FREM, VT, Expand);
413     setOperationAction(ISD::FLOG, VT, Expand);
414     setOperationAction(ISD::FLOG10, VT, Expand);
415     setOperationAction(ISD::FPOW, VT, Expand);
416     setOperationAction(ISD::FFLOOR, VT, Expand);
417     setOperationAction(ISD::FTRUNC, VT, Expand);
418     setOperationAction(ISD::FMUL, VT, Expand);
419     setOperationAction(ISD::FMA, VT, Expand);
420     setOperationAction(ISD::FRINT, VT, Expand);
421     setOperationAction(ISD::FNEARBYINT, VT, Expand);
422     setOperationAction(ISD::FSQRT, VT, Expand);
423     setOperationAction(ISD::FSIN, VT, Expand);
424     setOperationAction(ISD::FSUB, VT, Expand);
425     setOperationAction(ISD::FNEG, VT, Expand);
426     setOperationAction(ISD::VSELECT, VT, Expand);
427     setOperationAction(ISD::SELECT_CC, VT, Expand);
428     setOperationAction(ISD::FCOPYSIGN, VT, Expand);
429     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
430     setOperationAction(ISD::SETCC, VT, Expand);
431     setOperationAction(ISD::FCANONICALIZE, VT, Expand);
432   }
433 
434   // This causes using an unrolled select operation rather than expansion with
435   // bit operations. This is in general better, but the alternative using BFI
436   // instructions may be better if the select sources are SGPRs.
437   setOperationAction(ISD::SELECT, MVT::v2f32, Promote);
438   AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32);
439 
440   setOperationAction(ISD::SELECT, MVT::v3f32, Promote);
441   AddPromotedToType(ISD::SELECT, MVT::v3f32, MVT::v3i32);
442 
443   setOperationAction(ISD::SELECT, MVT::v4f32, Promote);
444   AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32);
445 
446   setOperationAction(ISD::SELECT, MVT::v5f32, Promote);
447   AddPromotedToType(ISD::SELECT, MVT::v5f32, MVT::v5i32);
448 
449   // There are no libcalls of any kind.
450   for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I)
451     setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr);
452 
453   setBooleanContents(ZeroOrNegativeOneBooleanContent);
454   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
455 
456   setSchedulingPreference(Sched::RegPressure);
457   setJumpIsExpensive(true);
458 
459   // FIXME: This is only partially true. If we have to do vector compares, any
460   // SGPR pair can be a condition register. If we have a uniform condition, we
461   // are better off doing SALU operations, where there is only one SCC. For now,
462   // we don't have a way of knowing during instruction selection if a condition
463   // will be uniform and we always use vector compares. Assume we are using
464   // vector compares until that is fixed.
465   setHasMultipleConditionRegisters(true);
466 
467   setMinCmpXchgSizeInBits(32);
468   setSupportsUnalignedAtomics(false);
469 
470   PredictableSelectIsExpensive = false;
471 
472   // We want to find all load dependencies for long chains of stores to enable
473   // merging into very wide vectors. The problem is with vectors with > 4
474   // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
475   // vectors are a legal type, even though we have to split the loads
476   // usually. When we can more precisely specify load legality per address
477   // space, we should be able to make FindBetterChain/MergeConsecutiveStores
478   // smarter so that they can figure out what to do in 2 iterations without all
479   // N > 4 stores on the same chain.
480   GatherAllAliasesMaxDepth = 16;
481 
482   // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry
483   // about these during lowering.
484   MaxStoresPerMemcpy  = 0xffffffff;
485   MaxStoresPerMemmove = 0xffffffff;
486   MaxStoresPerMemset  = 0xffffffff;
487 
488   setTargetDAGCombine(ISD::BITCAST);
489   setTargetDAGCombine(ISD::SHL);
490   setTargetDAGCombine(ISD::SRA);
491   setTargetDAGCombine(ISD::SRL);
492   setTargetDAGCombine(ISD::TRUNCATE);
493   setTargetDAGCombine(ISD::MUL);
494   setTargetDAGCombine(ISD::MULHU);
495   setTargetDAGCombine(ISD::MULHS);
496   setTargetDAGCombine(ISD::SELECT);
497   setTargetDAGCombine(ISD::SELECT_CC);
498   setTargetDAGCombine(ISD::STORE);
499   setTargetDAGCombine(ISD::FADD);
500   setTargetDAGCombine(ISD::FSUB);
501   setTargetDAGCombine(ISD::FNEG);
502   setTargetDAGCombine(ISD::FABS);
503   setTargetDAGCombine(ISD::AssertZext);
504   setTargetDAGCombine(ISD::AssertSext);
505 }
506 
507 //===----------------------------------------------------------------------===//
508 // Target Information
509 //===----------------------------------------------------------------------===//
510 
511 LLVM_READNONE
512 static bool fnegFoldsIntoOp(unsigned Opc) {
513   switch (Opc) {
514   case ISD::FADD:
515   case ISD::FSUB:
516   case ISD::FMUL:
517   case ISD::FMA:
518   case ISD::FMAD:
519   case ISD::FMINNUM:
520   case ISD::FMAXNUM:
521   case ISD::FMINNUM_IEEE:
522   case ISD::FMAXNUM_IEEE:
523   case ISD::FSIN:
524   case ISD::FTRUNC:
525   case ISD::FRINT:
526   case ISD::FNEARBYINT:
527   case ISD::FCANONICALIZE:
528   case AMDGPUISD::RCP:
529   case AMDGPUISD::RCP_LEGACY:
530   case AMDGPUISD::RCP_IFLAG:
531   case AMDGPUISD::SIN_HW:
532   case AMDGPUISD::FMUL_LEGACY:
533   case AMDGPUISD::FMIN_LEGACY:
534   case AMDGPUISD::FMAX_LEGACY:
535   case AMDGPUISD::FMED3:
536     return true;
537   default:
538     return false;
539   }
540 }
541 
542 /// \p returns true if the operation will definitely need to use a 64-bit
543 /// encoding, and thus will use a VOP3 encoding regardless of the source
544 /// modifiers.
545 LLVM_READONLY
546 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) {
547   return N->getNumOperands() > 2 || VT == MVT::f64;
548 }
549 
550 // Most FP instructions support source modifiers, but this could be refined
551 // slightly.
552 LLVM_READONLY
553 static bool hasSourceMods(const SDNode *N) {
554   if (isa<MemSDNode>(N))
555     return false;
556 
557   switch (N->getOpcode()) {
558   case ISD::CopyToReg:
559   case ISD::SELECT:
560   case ISD::FDIV:
561   case ISD::FREM:
562   case ISD::INLINEASM:
563   case ISD::INLINEASM_BR:
564   case AMDGPUISD::INTERP_P1:
565   case AMDGPUISD::INTERP_P2:
566   case AMDGPUISD::DIV_SCALE:
567 
568   // TODO: Should really be looking at the users of the bitcast. These are
569   // problematic because bitcasts are used to legalize all stores to integer
570   // types.
571   case ISD::BITCAST:
572     return false;
573   default:
574     return true;
575   }
576 }
577 
578 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N,
579                                                  unsigned CostThreshold) {
580   // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
581   // it is truly free to use a source modifier in all cases. If there are
582   // multiple users but for each one will necessitate using VOP3, there will be
583   // a code size increase. Try to avoid increasing code size unless we know it
584   // will save on the instruction count.
585   unsigned NumMayIncreaseSize = 0;
586   MVT VT = N->getValueType(0).getScalarType().getSimpleVT();
587 
588   // XXX - Should this limit number of uses to check?
589   for (const SDNode *U : N->uses()) {
590     if (!hasSourceMods(U))
591       return false;
592 
593     if (!opMustUseVOP3Encoding(U, VT)) {
594       if (++NumMayIncreaseSize > CostThreshold)
595         return false;
596     }
597   }
598 
599   return true;
600 }
601 
602 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
603   return MVT::i32;
604 }
605 
606 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
607   return true;
608 }
609 
610 // The backend supports 32 and 64 bit floating point immediates.
611 // FIXME: Why are we reporting vectors of FP immediates as legal?
612 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
613                                         bool ForCodeSize) const {
614   EVT ScalarVT = VT.getScalarType();
615   return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 ||
616          (ScalarVT == MVT::f16 && Subtarget->has16BitInsts()));
617 }
618 
619 // We don't want to shrink f64 / f32 constants.
620 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
621   EVT ScalarVT = VT.getScalarType();
622   return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
623 }
624 
625 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
626                                                  ISD::LoadExtType ExtTy,
627                                                  EVT NewVT) const {
628   // TODO: This may be worth removing. Check regression tests for diffs.
629   if (!TargetLoweringBase::shouldReduceLoadWidth(N, ExtTy, NewVT))
630     return false;
631 
632   unsigned NewSize = NewVT.getStoreSizeInBits();
633 
634   // If we are reducing to a 32-bit load, this is always better.
635   if (NewSize == 32)
636     return true;
637 
638   EVT OldVT = N->getValueType(0);
639   unsigned OldSize = OldVT.getStoreSizeInBits();
640 
641   MemSDNode *MN = cast<MemSDNode>(N);
642   unsigned AS = MN->getAddressSpace();
643   // Do not shrink an aligned scalar load to sub-dword.
644   // Scalar engine cannot do sub-dword loads.
645   if (OldSize >= 32 && NewSize < 32 && MN->getAlignment() >= 4 &&
646       (AS == AMDGPUAS::CONSTANT_ADDRESS ||
647        AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
648        (isa<LoadSDNode>(N) &&
649         AS == AMDGPUAS::GLOBAL_ADDRESS && MN->isInvariant())) &&
650       AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand()))
651     return false;
652 
653   // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
654   // extloads, so doing one requires using a buffer_load. In cases where we
655   // still couldn't use a scalar load, using the wider load shouldn't really
656   // hurt anything.
657 
658   // If the old size already had to be an extload, there's no harm in continuing
659   // to reduce the width.
660   return (OldSize < 32);
661 }
662 
663 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy,
664                                                    const SelectionDAG &DAG,
665                                                    const MachineMemOperand &MMO) const {
666 
667   assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits());
668 
669   if (LoadTy.getScalarType() == MVT::i32)
670     return false;
671 
672   unsigned LScalarSize = LoadTy.getScalarSizeInBits();
673   unsigned CastScalarSize = CastTy.getScalarSizeInBits();
674 
675   if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32))
676     return false;
677 
678   bool Fast = false;
679   return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), CastTy,
680                             MMO, &Fast) && Fast;
681 }
682 
683 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
684 // profitable with the expansion for 64-bit since it's generally good to
685 // speculate things.
686 // FIXME: These should really have the size as a parameter.
687 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
688   return true;
689 }
690 
691 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
692   return true;
693 }
694 
695 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode * N) const {
696   switch (N->getOpcode()) {
697     default:
698     return false;
699     case ISD::EntryToken:
700     case ISD::TokenFactor:
701       return true;
702     case ISD::INTRINSIC_WO_CHAIN:
703     {
704       unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
705       switch (IntrID) {
706         default:
707         return false;
708         case Intrinsic::amdgcn_readfirstlane:
709         case Intrinsic::amdgcn_readlane:
710           return true;
711       }
712     }
713     break;
714     case ISD::LOAD:
715     {
716       const LoadSDNode * L = dyn_cast<LoadSDNode>(N);
717       if (L->getMemOperand()->getAddrSpace()
718       == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
719         return true;
720       return false;
721     }
722     break;
723   }
724 }
725 
726 //===---------------------------------------------------------------------===//
727 // Target Properties
728 //===---------------------------------------------------------------------===//
729 
730 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
731   assert(VT.isFloatingPoint());
732 
733   // Packed operations do not have a fabs modifier.
734   return VT == MVT::f32 || VT == MVT::f64 ||
735          (Subtarget->has16BitInsts() && VT == MVT::f16);
736 }
737 
738 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
739   assert(VT.isFloatingPoint());
740   return VT == MVT::f32 || VT == MVT::f64 ||
741          (Subtarget->has16BitInsts() && VT == MVT::f16) ||
742          (Subtarget->hasVOP3PInsts() && VT == MVT::v2f16);
743 }
744 
745 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
746                                                          unsigned NumElem,
747                                                          unsigned AS) const {
748   return true;
749 }
750 
751 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
752   // There are few operations which truly have vector input operands. Any vector
753   // operation is going to involve operations on each component, and a
754   // build_vector will be a copy per element, so it always makes sense to use a
755   // build_vector input in place of the extracted element to avoid a copy into a
756   // super register.
757   //
758   // We should probably only do this if all users are extracts only, but this
759   // should be the common case.
760   return true;
761 }
762 
763 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
764   // Truncate is just accessing a subregister.
765 
766   unsigned SrcSize = Source.getSizeInBits();
767   unsigned DestSize = Dest.getSizeInBits();
768 
769   return DestSize < SrcSize && DestSize % 32 == 0 ;
770 }
771 
772 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
773   // Truncate is just accessing a subregister.
774 
775   unsigned SrcSize = Source->getScalarSizeInBits();
776   unsigned DestSize = Dest->getScalarSizeInBits();
777 
778   if (DestSize== 16 && Subtarget->has16BitInsts())
779     return SrcSize >= 32;
780 
781   return DestSize < SrcSize && DestSize % 32 == 0;
782 }
783 
784 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
785   unsigned SrcSize = Src->getScalarSizeInBits();
786   unsigned DestSize = Dest->getScalarSizeInBits();
787 
788   if (SrcSize == 16 && Subtarget->has16BitInsts())
789     return DestSize >= 32;
790 
791   return SrcSize == 32 && DestSize == 64;
792 }
793 
794 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
795   // Any register load of a 64-bit value really requires 2 32-bit moves. For all
796   // practical purposes, the extra mov 0 to load a 64-bit is free.  As used,
797   // this will enable reducing 64-bit operations the 32-bit, which is always
798   // good.
799 
800   if (Src == MVT::i16)
801     return Dest == MVT::i32 ||Dest == MVT::i64 ;
802 
803   return Src == MVT::i32 && Dest == MVT::i64;
804 }
805 
806 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
807   return isZExtFree(Val.getValueType(), VT2);
808 }
809 
810 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
811   // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
812   // limited number of native 64-bit operations. Shrinking an operation to fit
813   // in a single 32-bit register should always be helpful. As currently used,
814   // this is much less general than the name suggests, and is only used in
815   // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
816   // not profitable, and may actually be harmful.
817   return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
818 }
819 
820 //===---------------------------------------------------------------------===//
821 // TargetLowering Callbacks
822 //===---------------------------------------------------------------------===//
823 
824 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC,
825                                                   bool IsVarArg) {
826   switch (CC) {
827   case CallingConv::AMDGPU_VS:
828   case CallingConv::AMDGPU_GS:
829   case CallingConv::AMDGPU_PS:
830   case CallingConv::AMDGPU_CS:
831   case CallingConv::AMDGPU_HS:
832   case CallingConv::AMDGPU_ES:
833   case CallingConv::AMDGPU_LS:
834     return CC_AMDGPU;
835   case CallingConv::C:
836   case CallingConv::Fast:
837   case CallingConv::Cold:
838     return CC_AMDGPU_Func;
839   case CallingConv::AMDGPU_KERNEL:
840   case CallingConv::SPIR_KERNEL:
841   default:
842     report_fatal_error("Unsupported calling convention for call");
843   }
844 }
845 
846 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC,
847                                                     bool IsVarArg) {
848   switch (CC) {
849   case CallingConv::AMDGPU_KERNEL:
850   case CallingConv::SPIR_KERNEL:
851     llvm_unreachable("kernels should not be handled here");
852   case CallingConv::AMDGPU_VS:
853   case CallingConv::AMDGPU_GS:
854   case CallingConv::AMDGPU_PS:
855   case CallingConv::AMDGPU_CS:
856   case CallingConv::AMDGPU_HS:
857   case CallingConv::AMDGPU_ES:
858   case CallingConv::AMDGPU_LS:
859     return RetCC_SI_Shader;
860   case CallingConv::C:
861   case CallingConv::Fast:
862   case CallingConv::Cold:
863     return RetCC_AMDGPU_Func;
864   default:
865     report_fatal_error("Unsupported calling convention.");
866   }
867 }
868 
869 /// The SelectionDAGBuilder will automatically promote function arguments
870 /// with illegal types.  However, this does not work for the AMDGPU targets
871 /// since the function arguments are stored in memory as these illegal types.
872 /// In order to handle this properly we need to get the original types sizes
873 /// from the LLVM IR Function and fixup the ISD:InputArg values before
874 /// passing them to AnalyzeFormalArguments()
875 
876 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting
877 /// input values across multiple registers.  Each item in the Ins array
878 /// represents a single value that will be stored in registers.  Ins[x].VT is
879 /// the value type of the value that will be stored in the register, so
880 /// whatever SDNode we lower the argument to needs to be this type.
881 ///
882 /// In order to correctly lower the arguments we need to know the size of each
883 /// argument.  Since Ins[x].VT gives us the size of the register that will
884 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type
885 /// for the orignal function argument so that we can deduce the correct memory
886 /// type to use for Ins[x].  In most cases the correct memory type will be
887 /// Ins[x].ArgVT.  However, this will not always be the case.  If, for example,
888 /// we have a kernel argument of type v8i8, this argument will be split into
889 /// 8 parts and each part will be represented by its own item in the Ins array.
890 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of
891 /// the argument before it was split.  From this, we deduce that the memory type
892 /// for each individual part is i8.  We pass the memory type as LocVT to the
893 /// calling convention analysis function and the register type (Ins[x].VT) as
894 /// the ValVT.
895 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
896   CCState &State,
897   const SmallVectorImpl<ISD::InputArg> &Ins) const {
898   const MachineFunction &MF = State.getMachineFunction();
899   const Function &Fn = MF.getFunction();
900   LLVMContext &Ctx = Fn.getParent()->getContext();
901   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
902   const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn);
903   CallingConv::ID CC = Fn.getCallingConv();
904 
905   unsigned MaxAlign = 1;
906   uint64_t ExplicitArgOffset = 0;
907   const DataLayout &DL = Fn.getParent()->getDataLayout();
908 
909   unsigned InIndex = 0;
910 
911   for (const Argument &Arg : Fn.args()) {
912     Type *BaseArgTy = Arg.getType();
913     unsigned Align = DL.getABITypeAlignment(BaseArgTy);
914     MaxAlign = std::max(Align, MaxAlign);
915     unsigned AllocSize = DL.getTypeAllocSize(BaseArgTy);
916 
917     uint64_t ArgOffset = alignTo(ExplicitArgOffset, Align) + ExplicitOffset;
918     ExplicitArgOffset = alignTo(ExplicitArgOffset, Align) + AllocSize;
919 
920     // We're basically throwing away everything passed into us and starting over
921     // to get accurate in-memory offsets. The "PartOffset" is completely useless
922     // to us as computed in Ins.
923     //
924     // We also need to figure out what type legalization is trying to do to get
925     // the correct memory offsets.
926 
927     SmallVector<EVT, 16> ValueVTs;
928     SmallVector<uint64_t, 16> Offsets;
929     ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset);
930 
931     for (unsigned Value = 0, NumValues = ValueVTs.size();
932          Value != NumValues; ++Value) {
933       uint64_t BasePartOffset = Offsets[Value];
934 
935       EVT ArgVT = ValueVTs[Value];
936       EVT MemVT = ArgVT;
937       MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT);
938       unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT);
939 
940       if (NumRegs == 1) {
941         // This argument is not split, so the IR type is the memory type.
942         if (ArgVT.isExtended()) {
943           // We have an extended type, like i24, so we should just use the
944           // register type.
945           MemVT = RegisterVT;
946         } else {
947           MemVT = ArgVT;
948         }
949       } else if (ArgVT.isVector() && RegisterVT.isVector() &&
950                  ArgVT.getScalarType() == RegisterVT.getScalarType()) {
951         assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements());
952         // We have a vector value which has been split into a vector with
953         // the same scalar type, but fewer elements.  This should handle
954         // all the floating-point vector types.
955         MemVT = RegisterVT;
956       } else if (ArgVT.isVector() &&
957                  ArgVT.getVectorNumElements() == NumRegs) {
958         // This arg has been split so that each element is stored in a separate
959         // register.
960         MemVT = ArgVT.getScalarType();
961       } else if (ArgVT.isExtended()) {
962         // We have an extended type, like i65.
963         MemVT = RegisterVT;
964       } else {
965         unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs;
966         assert(ArgVT.getStoreSizeInBits() % NumRegs == 0);
967         if (RegisterVT.isInteger()) {
968           MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
969         } else if (RegisterVT.isVector()) {
970           assert(!RegisterVT.getScalarType().isFloatingPoint());
971           unsigned NumElements = RegisterVT.getVectorNumElements();
972           assert(MemoryBits % NumElements == 0);
973           // This vector type has been split into another vector type with
974           // a different elements size.
975           EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
976                                            MemoryBits / NumElements);
977           MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements);
978         } else {
979           llvm_unreachable("cannot deduce memory type.");
980         }
981       }
982 
983       // Convert one element vectors to scalar.
984       if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
985         MemVT = MemVT.getScalarType();
986 
987       // Round up vec3/vec5 argument.
988       if (MemVT.isVector() && !MemVT.isPow2VectorType()) {
989         assert(MemVT.getVectorNumElements() == 3 ||
990                MemVT.getVectorNumElements() == 5);
991         MemVT = MemVT.getPow2VectorType(State.getContext());
992       }
993 
994       unsigned PartOffset = 0;
995       for (unsigned i = 0; i != NumRegs; ++i) {
996         State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT,
997                                                BasePartOffset + PartOffset,
998                                                MemVT.getSimpleVT(),
999                                                CCValAssign::Full));
1000         PartOffset += MemVT.getStoreSize();
1001       }
1002     }
1003   }
1004 }
1005 
1006 SDValue AMDGPUTargetLowering::LowerReturn(
1007   SDValue Chain, CallingConv::ID CallConv,
1008   bool isVarArg,
1009   const SmallVectorImpl<ISD::OutputArg> &Outs,
1010   const SmallVectorImpl<SDValue> &OutVals,
1011   const SDLoc &DL, SelectionDAG &DAG) const {
1012   // FIXME: Fails for r600 tests
1013   //assert(!isVarArg && Outs.empty() && OutVals.empty() &&
1014   // "wave terminate should not have return values");
1015   return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain);
1016 }
1017 
1018 //===---------------------------------------------------------------------===//
1019 // Target specific lowering
1020 //===---------------------------------------------------------------------===//
1021 
1022 /// Selects the correct CCAssignFn for a given CallingConvention value.
1023 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1024                                                     bool IsVarArg) {
1025   return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg);
1026 }
1027 
1028 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1029                                                       bool IsVarArg) {
1030   return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg);
1031 }
1032 
1033 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain,
1034                                                   SelectionDAG &DAG,
1035                                                   MachineFrameInfo &MFI,
1036                                                   int ClobberedFI) const {
1037   SmallVector<SDValue, 8> ArgChains;
1038   int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
1039   int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
1040 
1041   // Include the original chain at the beginning of the list. When this is
1042   // used by target LowerCall hooks, this helps legalize find the
1043   // CALLSEQ_BEGIN node.
1044   ArgChains.push_back(Chain);
1045 
1046   // Add a chain value for each stack argument corresponding
1047   for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
1048                             UE = DAG.getEntryNode().getNode()->use_end();
1049        U != UE; ++U) {
1050     if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) {
1051       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) {
1052         if (FI->getIndex() < 0) {
1053           int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
1054           int64_t InLastByte = InFirstByte;
1055           InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
1056 
1057           if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1058               (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1059             ArgChains.push_back(SDValue(L, 1));
1060         }
1061       }
1062     }
1063   }
1064 
1065   // Build a tokenfactor for all the chains.
1066   return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
1067 }
1068 
1069 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI,
1070                                                  SmallVectorImpl<SDValue> &InVals,
1071                                                  StringRef Reason) const {
1072   SDValue Callee = CLI.Callee;
1073   SelectionDAG &DAG = CLI.DAG;
1074 
1075   const Function &Fn = DAG.getMachineFunction().getFunction();
1076 
1077   StringRef FuncName("<unknown>");
1078 
1079   if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
1080     FuncName = G->getSymbol();
1081   else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1082     FuncName = G->getGlobal()->getName();
1083 
1084   DiagnosticInfoUnsupported NoCalls(
1085     Fn, Reason + FuncName, CLI.DL.getDebugLoc());
1086   DAG.getContext()->diagnose(NoCalls);
1087 
1088   if (!CLI.IsTailCall) {
1089     for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
1090       InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
1091   }
1092 
1093   return DAG.getEntryNode();
1094 }
1095 
1096 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
1097                                         SmallVectorImpl<SDValue> &InVals) const {
1098   return lowerUnhandledCall(CLI, InVals, "unsupported call to function ");
1099 }
1100 
1101 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
1102                                                       SelectionDAG &DAG) const {
1103   const Function &Fn = DAG.getMachineFunction().getFunction();
1104 
1105   DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
1106                                             SDLoc(Op).getDebugLoc());
1107   DAG.getContext()->diagnose(NoDynamicAlloca);
1108   auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
1109   return DAG.getMergeValues(Ops, SDLoc());
1110 }
1111 
1112 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
1113                                              SelectionDAG &DAG) const {
1114   switch (Op.getOpcode()) {
1115   default:
1116     Op->print(errs(), &DAG);
1117     llvm_unreachable("Custom lowering code for this"
1118                      "instruction is not implemented yet!");
1119     break;
1120   case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
1121   case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
1122   case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
1123   case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
1124   case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
1125   case ISD::FREM: return LowerFREM(Op, DAG);
1126   case ISD::FCEIL: return LowerFCEIL(Op, DAG);
1127   case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
1128   case ISD::FRINT: return LowerFRINT(Op, DAG);
1129   case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
1130   case ISD::FROUND: return LowerFROUND(Op, DAG);
1131   case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
1132   case ISD::FLOG:
1133     return LowerFLOG(Op, DAG, 1 / AMDGPU_LOG2E_F);
1134   case ISD::FLOG10:
1135     return LowerFLOG(Op, DAG, AMDGPU_LN2_F / AMDGPU_LN10_F);
1136   case ISD::FEXP:
1137     return lowerFEXP(Op, DAG);
1138   case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
1139   case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
1140   case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
1141   case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
1142   case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
1143   case ISD::CTTZ:
1144   case ISD::CTTZ_ZERO_UNDEF:
1145   case ISD::CTLZ:
1146   case ISD::CTLZ_ZERO_UNDEF:
1147     return LowerCTLZ_CTTZ(Op, DAG);
1148   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1149   }
1150   return Op;
1151 }
1152 
1153 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
1154                                               SmallVectorImpl<SDValue> &Results,
1155                                               SelectionDAG &DAG) const {
1156   switch (N->getOpcode()) {
1157   case ISD::SIGN_EXTEND_INREG:
1158     // Different parts of legalization seem to interpret which type of
1159     // sign_extend_inreg is the one to check for custom lowering. The extended
1160     // from type is what really matters, but some places check for custom
1161     // lowering of the result type. This results in trying to use
1162     // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
1163     // nothing here and let the illegal result integer be handled normally.
1164     return;
1165   default:
1166     return;
1167   }
1168 }
1169 
1170 static bool hasDefinedInitializer(const GlobalValue *GV) {
1171   const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1172   if (!GVar || !GVar->hasInitializer())
1173     return false;
1174 
1175   return !isa<UndefValue>(GVar->getInitializer());
1176 }
1177 
1178 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
1179                                                  SDValue Op,
1180                                                  SelectionDAG &DAG) const {
1181 
1182   const DataLayout &DL = DAG.getDataLayout();
1183   GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
1184   const GlobalValue *GV = G->getGlobal();
1185 
1186   if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1187       G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) {
1188     if (!MFI->isEntryFunction()) {
1189       const Function &Fn = DAG.getMachineFunction().getFunction();
1190       DiagnosticInfoUnsupported BadLDSDecl(
1191         Fn, "local memory global used by non-kernel function", SDLoc(Op).getDebugLoc());
1192       DAG.getContext()->diagnose(BadLDSDecl);
1193     }
1194 
1195     // XXX: What does the value of G->getOffset() mean?
1196     assert(G->getOffset() == 0 &&
1197          "Do not know what to do with an non-zero offset");
1198 
1199     // TODO: We could emit code to handle the initialization somewhere.
1200     if (!hasDefinedInitializer(GV)) {
1201       unsigned Offset = MFI->allocateLDSGlobal(DL, *GV);
1202       return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType());
1203     }
1204   }
1205 
1206   const Function &Fn = DAG.getMachineFunction().getFunction();
1207   DiagnosticInfoUnsupported BadInit(
1208       Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc());
1209   DAG.getContext()->diagnose(BadInit);
1210   return SDValue();
1211 }
1212 
1213 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
1214                                                   SelectionDAG &DAG) const {
1215   SmallVector<SDValue, 8> Args;
1216 
1217   EVT VT = Op.getValueType();
1218   if (VT == MVT::v4i16 || VT == MVT::v4f16) {
1219     SDLoc SL(Op);
1220     SDValue Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(0));
1221     SDValue Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(1));
1222 
1223     SDValue BV = DAG.getBuildVector(MVT::v2i32, SL, { Lo, Hi });
1224     return DAG.getNode(ISD::BITCAST, SL, VT, BV);
1225   }
1226 
1227   for (const SDUse &U : Op->ops())
1228     DAG.ExtractVectorElements(U.get(), Args);
1229 
1230   return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1231 }
1232 
1233 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
1234                                                      SelectionDAG &DAG) const {
1235 
1236   SmallVector<SDValue, 8> Args;
1237   unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1238   EVT VT = Op.getValueType();
1239   DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
1240                             VT.getVectorNumElements());
1241 
1242   return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1243 }
1244 
1245 /// Generate Min/Max node
1246 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT,
1247                                                    SDValue LHS, SDValue RHS,
1248                                                    SDValue True, SDValue False,
1249                                                    SDValue CC,
1250                                                    DAGCombinerInfo &DCI) const {
1251   if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
1252     return SDValue();
1253 
1254   SelectionDAG &DAG = DCI.DAG;
1255   ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1256   switch (CCOpcode) {
1257   case ISD::SETOEQ:
1258   case ISD::SETONE:
1259   case ISD::SETUNE:
1260   case ISD::SETNE:
1261   case ISD::SETUEQ:
1262   case ISD::SETEQ:
1263   case ISD::SETFALSE:
1264   case ISD::SETFALSE2:
1265   case ISD::SETTRUE:
1266   case ISD::SETTRUE2:
1267   case ISD::SETUO:
1268   case ISD::SETO:
1269     break;
1270   case ISD::SETULE:
1271   case ISD::SETULT: {
1272     if (LHS == True)
1273       return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1274     return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1275   }
1276   case ISD::SETOLE:
1277   case ISD::SETOLT:
1278   case ISD::SETLE:
1279   case ISD::SETLT: {
1280     // Ordered. Assume ordered for undefined.
1281 
1282     // Only do this after legalization to avoid interfering with other combines
1283     // which might occur.
1284     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1285         !DCI.isCalledByLegalizer())
1286       return SDValue();
1287 
1288     // We need to permute the operands to get the correct NaN behavior. The
1289     // selected operand is the second one based on the failing compare with NaN,
1290     // so permute it based on the compare type the hardware uses.
1291     if (LHS == True)
1292       return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1293     return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1294   }
1295   case ISD::SETUGE:
1296   case ISD::SETUGT: {
1297     if (LHS == True)
1298       return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1299     return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1300   }
1301   case ISD::SETGT:
1302   case ISD::SETGE:
1303   case ISD::SETOGE:
1304   case ISD::SETOGT: {
1305     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1306         !DCI.isCalledByLegalizer())
1307       return SDValue();
1308 
1309     if (LHS == True)
1310       return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1311     return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1312   }
1313   case ISD::SETCC_INVALID:
1314     llvm_unreachable("Invalid setcc condcode!");
1315   }
1316   return SDValue();
1317 }
1318 
1319 std::pair<SDValue, SDValue>
1320 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
1321   SDLoc SL(Op);
1322 
1323   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1324 
1325   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1326   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1327 
1328   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1329   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1330 
1331   return std::make_pair(Lo, Hi);
1332 }
1333 
1334 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
1335   SDLoc SL(Op);
1336 
1337   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1338   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1339   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1340 }
1341 
1342 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
1343   SDLoc SL(Op);
1344 
1345   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1346   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1347   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1348 }
1349 
1350 // Split a vector type into two parts. The first part is a power of two vector.
1351 // The second part is whatever is left over, and is a scalar if it would
1352 // otherwise be a 1-vector.
1353 std::pair<EVT, EVT>
1354 AMDGPUTargetLowering::getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const {
1355   EVT LoVT, HiVT;
1356   EVT EltVT = VT.getVectorElementType();
1357   unsigned NumElts = VT.getVectorNumElements();
1358   unsigned LoNumElts = PowerOf2Ceil((NumElts + 1) / 2);
1359   LoVT = EVT::getVectorVT(*DAG.getContext(), EltVT, LoNumElts);
1360   HiVT = NumElts - LoNumElts == 1
1361              ? EltVT
1362              : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts);
1363   return std::make_pair(LoVT, HiVT);
1364 }
1365 
1366 // Split a vector value into two parts of types LoVT and HiVT. HiVT could be
1367 // scalar.
1368 std::pair<SDValue, SDValue>
1369 AMDGPUTargetLowering::splitVector(const SDValue &N, const SDLoc &DL,
1370                                   const EVT &LoVT, const EVT &HiVT,
1371                                   SelectionDAG &DAG) const {
1372   assert(LoVT.getVectorNumElements() +
1373                  (HiVT.isVector() ? HiVT.getVectorNumElements() : 1) <=
1374              N.getValueType().getVectorNumElements() &&
1375          "More vector elements requested than available!");
1376   auto IdxTy = getVectorIdxTy(DAG.getDataLayout());
1377   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
1378                            DAG.getConstant(0, DL, IdxTy));
1379   SDValue Hi = DAG.getNode(
1380       HiVT.isVector() ? ISD::EXTRACT_SUBVECTOR : ISD::EXTRACT_VECTOR_ELT, DL,
1381       HiVT, N, DAG.getConstant(LoVT.getVectorNumElements(), DL, IdxTy));
1382   return std::make_pair(Lo, Hi);
1383 }
1384 
1385 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1386                                               SelectionDAG &DAG) const {
1387   LoadSDNode *Load = cast<LoadSDNode>(Op);
1388   EVT VT = Op.getValueType();
1389 
1390 
1391   // If this is a 2 element vector, we really want to scalarize and not create
1392   // weird 1 element vectors.
1393   if (VT.getVectorNumElements() == 2)
1394     return scalarizeVectorLoad(Load, DAG);
1395 
1396   SDValue BasePtr = Load->getBasePtr();
1397   EVT MemVT = Load->getMemoryVT();
1398   SDLoc SL(Op);
1399 
1400   const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1401 
1402   EVT LoVT, HiVT;
1403   EVT LoMemVT, HiMemVT;
1404   SDValue Lo, Hi;
1405 
1406   std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1407   std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1408   std::tie(Lo, Hi) = splitVector(Op, SL, LoVT, HiVT, DAG);
1409 
1410   unsigned Size = LoMemVT.getStoreSize();
1411   unsigned BaseAlign = Load->getAlignment();
1412   unsigned HiAlign = MinAlign(BaseAlign, Size);
1413 
1414   SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1415                                   Load->getChain(), BasePtr, SrcValue, LoMemVT,
1416                                   BaseAlign, Load->getMemOperand()->getFlags());
1417   SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, Size);
1418   SDValue HiLoad =
1419       DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
1420                      HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1421                      HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
1422 
1423   auto IdxTy = getVectorIdxTy(DAG.getDataLayout());
1424   SDValue Join;
1425   if (LoVT == HiVT) {
1426     // This is the case that the vector is power of two so was evenly split.
1427     Join = DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad);
1428   } else {
1429     Join = DAG.getNode(ISD::INSERT_SUBVECTOR, SL, VT, DAG.getUNDEF(VT), LoLoad,
1430                        DAG.getConstant(0, SL, IdxTy));
1431     Join = DAG.getNode(HiVT.isVector() ? ISD::INSERT_SUBVECTOR
1432                                        : ISD::INSERT_VECTOR_ELT,
1433                        SL, VT, Join, HiLoad,
1434                        DAG.getConstant(LoVT.getVectorNumElements(), SL, IdxTy));
1435   }
1436 
1437   SDValue Ops[] = {Join, DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1438                                      LoLoad.getValue(1), HiLoad.getValue(1))};
1439 
1440   return DAG.getMergeValues(Ops, SL);
1441 }
1442 
1443 // Widen a vector load from vec3 to vec4.
1444 SDValue AMDGPUTargetLowering::WidenVectorLoad(SDValue Op,
1445                                               SelectionDAG &DAG) const {
1446   LoadSDNode *Load = cast<LoadSDNode>(Op);
1447   EVT VT = Op.getValueType();
1448   assert(VT.getVectorNumElements() == 3);
1449   SDValue BasePtr = Load->getBasePtr();
1450   EVT MemVT = Load->getMemoryVT();
1451   SDLoc SL(Op);
1452   const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1453   unsigned BaseAlign = Load->getAlignment();
1454 
1455   EVT WideVT =
1456       EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
1457   EVT WideMemVT =
1458       EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 4);
1459   SDValue WideLoad = DAG.getExtLoad(
1460       Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue,
1461       WideMemVT, BaseAlign, Load->getMemOperand()->getFlags());
1462   return DAG.getMergeValues(
1463       {DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, VT, WideLoad,
1464                    DAG.getConstant(0, SL, getVectorIdxTy(DAG.getDataLayout()))),
1465        WideLoad.getValue(1)},
1466       SL);
1467 }
1468 
1469 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1470                                                SelectionDAG &DAG) const {
1471   StoreSDNode *Store = cast<StoreSDNode>(Op);
1472   SDValue Val = Store->getValue();
1473   EVT VT = Val.getValueType();
1474 
1475   // If this is a 2 element vector, we really want to scalarize and not create
1476   // weird 1 element vectors.
1477   if (VT.getVectorNumElements() == 2)
1478     return scalarizeVectorStore(Store, DAG);
1479 
1480   EVT MemVT = Store->getMemoryVT();
1481   SDValue Chain = Store->getChain();
1482   SDValue BasePtr = Store->getBasePtr();
1483   SDLoc SL(Op);
1484 
1485   EVT LoVT, HiVT;
1486   EVT LoMemVT, HiMemVT;
1487   SDValue Lo, Hi;
1488 
1489   std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1490   std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1491   std::tie(Lo, Hi) = splitVector(Val, SL, LoVT, HiVT, DAG);
1492 
1493   SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize());
1494 
1495   const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1496   unsigned BaseAlign = Store->getAlignment();
1497   unsigned Size = LoMemVT.getStoreSize();
1498   unsigned HiAlign = MinAlign(BaseAlign, Size);
1499 
1500   SDValue LoStore =
1501       DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1502                         Store->getMemOperand()->getFlags());
1503   SDValue HiStore =
1504       DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size),
1505                         HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
1506 
1507   return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1508 }
1509 
1510 // This is a shortcut for integer division because we have fast i32<->f32
1511 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1512 // float is enough to accurately represent up to a 24-bit signed integer.
1513 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG,
1514                                             bool Sign) const {
1515   SDLoc DL(Op);
1516   EVT VT = Op.getValueType();
1517   SDValue LHS = Op.getOperand(0);
1518   SDValue RHS = Op.getOperand(1);
1519   MVT IntVT = MVT::i32;
1520   MVT FltVT = MVT::f32;
1521 
1522   unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS);
1523   if (LHSSignBits < 9)
1524     return SDValue();
1525 
1526   unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS);
1527   if (RHSSignBits < 9)
1528     return SDValue();
1529 
1530   unsigned BitSize = VT.getSizeInBits();
1531   unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1532   unsigned DivBits = BitSize - SignBits;
1533   if (Sign)
1534     ++DivBits;
1535 
1536   ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1537   ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1538 
1539   SDValue jq = DAG.getConstant(1, DL, IntVT);
1540 
1541   if (Sign) {
1542     // char|short jq = ia ^ ib;
1543     jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1544 
1545     // jq = jq >> (bitsize - 2)
1546     jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1547                      DAG.getConstant(BitSize - 2, DL, VT));
1548 
1549     // jq = jq | 0x1
1550     jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1551   }
1552 
1553   // int ia = (int)LHS;
1554   SDValue ia = LHS;
1555 
1556   // int ib, (int)RHS;
1557   SDValue ib = RHS;
1558 
1559   // float fa = (float)ia;
1560   SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1561 
1562   // float fb = (float)ib;
1563   SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1564 
1565   SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1566                            fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1567 
1568   // fq = trunc(fq);
1569   fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1570 
1571   // float fqneg = -fq;
1572   SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1573 
1574   // float fr = mad(fqneg, fb, fa);
1575   unsigned OpCode = Subtarget->hasFP32Denormals() ?
1576                     (unsigned)AMDGPUISD::FMAD_FTZ :
1577                     (unsigned)ISD::FMAD;
1578   SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa);
1579 
1580   // int iq = (int)fq;
1581   SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1582 
1583   // fr = fabs(fr);
1584   fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1585 
1586   // fb = fabs(fb);
1587   fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1588 
1589   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1590 
1591   // int cv = fr >= fb;
1592   SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1593 
1594   // jq = (cv ? jq : 0);
1595   jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1596 
1597   // dst = iq + jq;
1598   SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1599 
1600   // Rem needs compensation, it's easier to recompute it
1601   SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1602   Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1603 
1604   // Truncate to number of bits this divide really is.
1605   if (Sign) {
1606     SDValue InRegSize
1607       = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits));
1608     Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize);
1609     Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize);
1610   } else {
1611     SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
1612     Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask);
1613     Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask);
1614   }
1615 
1616   return DAG.getMergeValues({ Div, Rem }, DL);
1617 }
1618 
1619 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1620                                       SelectionDAG &DAG,
1621                                       SmallVectorImpl<SDValue> &Results) const {
1622   SDLoc DL(Op);
1623   EVT VT = Op.getValueType();
1624 
1625   assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64");
1626 
1627   EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1628 
1629   SDValue One = DAG.getConstant(1, DL, HalfVT);
1630   SDValue Zero = DAG.getConstant(0, DL, HalfVT);
1631 
1632   //HiLo split
1633   SDValue LHS = Op.getOperand(0);
1634   SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1635   SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, One);
1636 
1637   SDValue RHS = Op.getOperand(1);
1638   SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1639   SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, One);
1640 
1641   if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1642       DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1643 
1644     SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1645                               LHS_Lo, RHS_Lo);
1646 
1647     SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero});
1648     SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero});
1649 
1650     Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
1651     Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
1652     return;
1653   }
1654 
1655   if (isTypeLegal(MVT::i64)) {
1656     // Compute denominator reciprocal.
1657     unsigned FMAD = Subtarget->hasFP32Denormals() ?
1658                     (unsigned)AMDGPUISD::FMAD_FTZ :
1659                     (unsigned)ISD::FMAD;
1660 
1661     SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo);
1662     SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi);
1663     SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi,
1664       DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32),
1665       Cvt_Lo);
1666     SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1);
1667     SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp,
1668       DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32));
1669     SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1,
1670       DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32));
1671     SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2);
1672     SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc,
1673       DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32),
1674       Mul1);
1675     SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2);
1676     SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc);
1677     SDValue Rcp64 = DAG.getBitcast(VT,
1678                         DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi}));
1679 
1680     SDValue Zero64 = DAG.getConstant(0, DL, VT);
1681     SDValue One64  = DAG.getConstant(1, DL, VT);
1682     SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1);
1683     SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1);
1684 
1685     SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS);
1686     SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64);
1687     SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1);
1688     SDValue Mulhi1_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1,
1689                                     Zero);
1690     SDValue Mulhi1_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1,
1691                                     One);
1692 
1693     SDValue Add1_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Lo,
1694                                   Mulhi1_Lo, Zero1);
1695     SDValue Add1_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Hi,
1696                                   Mulhi1_Hi, Add1_Lo.getValue(1));
1697     SDValue Add1_HiNc = DAG.getNode(ISD::ADD, DL, HalfVT, Rcp_Hi, Mulhi1_Hi);
1698     SDValue Add1 = DAG.getBitcast(VT,
1699                         DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi}));
1700 
1701     SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1);
1702     SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2);
1703     SDValue Mulhi2_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2,
1704                                     Zero);
1705     SDValue Mulhi2_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2,
1706                                     One);
1707 
1708     SDValue Add2_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Lo,
1709                                   Mulhi2_Lo, Zero1);
1710     SDValue Add2_HiC = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_HiNc,
1711                                    Mulhi2_Hi, Add1_Lo.getValue(1));
1712     SDValue Add2_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add2_HiC,
1713                                   Zero, Add2_Lo.getValue(1));
1714     SDValue Add2 = DAG.getBitcast(VT,
1715                         DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi}));
1716     SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2);
1717 
1718     SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3);
1719 
1720     SDValue Mul3_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, Zero);
1721     SDValue Mul3_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, One);
1722     SDValue Sub1_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Lo,
1723                                   Mul3_Lo, Zero1);
1724     SDValue Sub1_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Hi,
1725                                   Mul3_Hi, Sub1_Lo.getValue(1));
1726     SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi);
1727     SDValue Sub1 = DAG.getBitcast(VT,
1728                         DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi}));
1729 
1730     SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT);
1731     SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero,
1732                                  ISD::SETUGE);
1733     SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero,
1734                                  ISD::SETUGE);
1735     SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ);
1736 
1737     // TODO: Here and below portions of the code can be enclosed into if/endif.
1738     // Currently control flow is unconditional and we have 4 selects after
1739     // potential endif to substitute PHIs.
1740 
1741     // if C3 != 0 ...
1742     SDValue Sub2_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Lo,
1743                                   RHS_Lo, Zero1);
1744     SDValue Sub2_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Mi,
1745                                   RHS_Hi, Sub1_Lo.getValue(1));
1746     SDValue Sub2_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1747                                   Zero, Sub2_Lo.getValue(1));
1748     SDValue Sub2 = DAG.getBitcast(VT,
1749                         DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi}));
1750 
1751     SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64);
1752 
1753     SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero,
1754                                  ISD::SETUGE);
1755     SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero,
1756                                  ISD::SETUGE);
1757     SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ);
1758 
1759     // if (C6 != 0)
1760     SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64);
1761 
1762     SDValue Sub3_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Lo,
1763                                   RHS_Lo, Zero1);
1764     SDValue Sub3_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1765                                   RHS_Hi, Sub2_Lo.getValue(1));
1766     SDValue Sub3_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub3_Mi,
1767                                   Zero, Sub3_Lo.getValue(1));
1768     SDValue Sub3 = DAG.getBitcast(VT,
1769                         DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi}));
1770 
1771     // endif C6
1772     // endif C3
1773 
1774     SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE);
1775     SDValue Div  = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE);
1776 
1777     SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE);
1778     SDValue Rem  = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE);
1779 
1780     Results.push_back(Div);
1781     Results.push_back(Rem);
1782 
1783     return;
1784   }
1785 
1786   // r600 expandion.
1787   // Get Speculative values
1788   SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
1789   SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
1790 
1791   SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ);
1792   SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero});
1793   REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
1794 
1795   SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ);
1796   SDValue DIV_Lo = Zero;
1797 
1798   const unsigned halfBitWidth = HalfVT.getSizeInBits();
1799 
1800   for (unsigned i = 0; i < halfBitWidth; ++i) {
1801     const unsigned bitPos = halfBitWidth - i - 1;
1802     SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
1803     // Get value of high bit
1804     SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
1805     HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One);
1806     HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
1807 
1808     // Shift
1809     REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
1810     // Add LHS high bit
1811     REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
1812 
1813     SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
1814     SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE);
1815 
1816     DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
1817 
1818     // Update REM
1819     SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
1820     REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
1821   }
1822 
1823   SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
1824   DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
1825   Results.push_back(DIV);
1826   Results.push_back(REM);
1827 }
1828 
1829 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1830                                            SelectionDAG &DAG) const {
1831   SDLoc DL(Op);
1832   EVT VT = Op.getValueType();
1833 
1834   if (VT == MVT::i64) {
1835     SmallVector<SDValue, 2> Results;
1836     LowerUDIVREM64(Op, DAG, Results);
1837     return DAG.getMergeValues(Results, DL);
1838   }
1839 
1840   if (VT == MVT::i32) {
1841     if (SDValue Res = LowerDIVREM24(Op, DAG, false))
1842       return Res;
1843   }
1844 
1845   SDValue Num = Op.getOperand(0);
1846   SDValue Den = Op.getOperand(1);
1847 
1848   // RCP =  URECIP(Den) = 2^32 / Den + e
1849   // e is rounding error.
1850   SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1851 
1852   // RCP_LO = mul(RCP, Den) */
1853   SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den);
1854 
1855   // RCP_HI = mulhu (RCP, Den) */
1856   SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1857 
1858   // NEG_RCP_LO = -RCP_LO
1859   SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
1860                                                      RCP_LO);
1861 
1862   // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
1863   SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1864                                            NEG_RCP_LO, RCP_LO,
1865                                            ISD::SETEQ);
1866   // Calculate the rounding error from the URECIP instruction
1867   // E = mulhu(ABS_RCP_LO, RCP)
1868   SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1869 
1870   // RCP_A_E = RCP + E
1871   SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1872 
1873   // RCP_S_E = RCP - E
1874   SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1875 
1876   // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
1877   SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1878                                      RCP_A_E, RCP_S_E,
1879                                      ISD::SETEQ);
1880   // Quotient = mulhu(Tmp0, Num)
1881   SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1882 
1883   // Num_S_Remainder = Quotient * Den
1884   SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den);
1885 
1886   // Remainder = Num - Num_S_Remainder
1887   SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1888 
1889   // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1890   SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1891                                                  DAG.getConstant(-1, DL, VT),
1892                                                  DAG.getConstant(0, DL, VT),
1893                                                  ISD::SETUGE);
1894   // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1895   SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1896                                                   Num_S_Remainder,
1897                                                   DAG.getConstant(-1, DL, VT),
1898                                                   DAG.getConstant(0, DL, VT),
1899                                                   ISD::SETUGE);
1900   // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1901   SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1902                                                Remainder_GE_Zero);
1903 
1904   // Calculate Division result:
1905 
1906   // Quotient_A_One = Quotient + 1
1907   SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1908                                        DAG.getConstant(1, DL, VT));
1909 
1910   // Quotient_S_One = Quotient - 1
1911   SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1912                                        DAG.getConstant(1, DL, VT));
1913 
1914   // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1915   SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1916                                      Quotient, Quotient_A_One, ISD::SETEQ);
1917 
1918   // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1919   Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1920                             Quotient_S_One, Div, ISD::SETEQ);
1921 
1922   // Calculate Rem result:
1923 
1924   // Remainder_S_Den = Remainder - Den
1925   SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1926 
1927   // Remainder_A_Den = Remainder + Den
1928   SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1929 
1930   // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
1931   SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1932                                     Remainder, Remainder_S_Den, ISD::SETEQ);
1933 
1934   // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
1935   Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1936                             Remainder_A_Den, Rem, ISD::SETEQ);
1937   SDValue Ops[2] = {
1938     Div,
1939     Rem
1940   };
1941   return DAG.getMergeValues(Ops, DL);
1942 }
1943 
1944 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
1945                                            SelectionDAG &DAG) const {
1946   SDLoc DL(Op);
1947   EVT VT = Op.getValueType();
1948 
1949   SDValue LHS = Op.getOperand(0);
1950   SDValue RHS = Op.getOperand(1);
1951 
1952   SDValue Zero = DAG.getConstant(0, DL, VT);
1953   SDValue NegOne = DAG.getConstant(-1, DL, VT);
1954 
1955   if (VT == MVT::i32) {
1956     if (SDValue Res = LowerDIVREM24(Op, DAG, true))
1957       return Res;
1958   }
1959 
1960   if (VT == MVT::i64 &&
1961       DAG.ComputeNumSignBits(LHS) > 32 &&
1962       DAG.ComputeNumSignBits(RHS) > 32) {
1963     EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1964 
1965     //HiLo split
1966     SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1967     SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1968     SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1969                                  LHS_Lo, RHS_Lo);
1970     SDValue Res[2] = {
1971       DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
1972       DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
1973     };
1974     return DAG.getMergeValues(Res, DL);
1975   }
1976 
1977   SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
1978   SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
1979   SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
1980   SDValue RSign = LHSign; // Remainder sign is the same as LHS
1981 
1982   LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
1983   RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
1984 
1985   LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
1986   RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
1987 
1988   SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
1989   SDValue Rem = Div.getValue(1);
1990 
1991   Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
1992   Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
1993 
1994   Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
1995   Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
1996 
1997   SDValue Res[2] = {
1998     Div,
1999     Rem
2000   };
2001   return DAG.getMergeValues(Res, DL);
2002 }
2003 
2004 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y))
2005 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
2006   SDLoc SL(Op);
2007   EVT VT = Op.getValueType();
2008   SDValue X = Op.getOperand(0);
2009   SDValue Y = Op.getOperand(1);
2010 
2011   // TODO: Should this propagate fast-math-flags?
2012 
2013   SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y);
2014   SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div);
2015   SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y);
2016 
2017   return DAG.getNode(ISD::FSUB, SL, VT, X, Mul);
2018 }
2019 
2020 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
2021   SDLoc SL(Op);
2022   SDValue Src = Op.getOperand(0);
2023 
2024   // result = trunc(src)
2025   // if (src > 0.0 && src != result)
2026   //   result += 1.0
2027 
2028   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2029 
2030   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2031   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
2032 
2033   EVT SetCCVT =
2034       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2035 
2036   SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
2037   SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2038   SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2039 
2040   SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
2041   // TODO: Should this propagate fast-math-flags?
2042   return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2043 }
2044 
2045 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
2046                                   SelectionDAG &DAG) {
2047   const unsigned FractBits = 52;
2048   const unsigned ExpBits = 11;
2049 
2050   SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
2051                                 Hi,
2052                                 DAG.getConstant(FractBits - 32, SL, MVT::i32),
2053                                 DAG.getConstant(ExpBits, SL, MVT::i32));
2054   SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
2055                             DAG.getConstant(1023, SL, MVT::i32));
2056 
2057   return Exp;
2058 }
2059 
2060 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
2061   SDLoc SL(Op);
2062   SDValue Src = Op.getOperand(0);
2063 
2064   assert(Op.getValueType() == MVT::f64);
2065 
2066   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2067   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2068 
2069   SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2070 
2071   // Extract the upper half, since this is where we will find the sign and
2072   // exponent.
2073   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
2074 
2075   SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2076 
2077   const unsigned FractBits = 52;
2078 
2079   // Extract the sign bit.
2080   const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
2081   SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
2082 
2083   // Extend back to 64-bits.
2084   SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
2085   SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
2086 
2087   SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
2088   const SDValue FractMask
2089     = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2090 
2091   SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
2092   SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
2093   SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
2094 
2095   EVT SetCCVT =
2096       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2097 
2098   const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
2099 
2100   SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2101   SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2102 
2103   SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
2104   SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
2105 
2106   return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
2107 }
2108 
2109 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
2110   SDLoc SL(Op);
2111   SDValue Src = Op.getOperand(0);
2112 
2113   assert(Op.getValueType() == MVT::f64);
2114 
2115   APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
2116   SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
2117   SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
2118 
2119   // TODO: Should this propagate fast-math-flags?
2120 
2121   SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
2122   SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
2123 
2124   SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
2125 
2126   APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
2127   SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
2128 
2129   EVT SetCCVT =
2130       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2131   SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
2132 
2133   return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
2134 }
2135 
2136 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
2137   // FNEARBYINT and FRINT are the same, except in their handling of FP
2138   // exceptions. Those aren't really meaningful for us, and OpenCL only has
2139   // rint, so just treat them as equivalent.
2140   return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
2141 }
2142 
2143 // XXX - May require not supporting f32 denormals?
2144 
2145 // Don't handle v2f16. The extra instructions to scalarize and repack around the
2146 // compare and vselect end up producing worse code than scalarizing the whole
2147 // operation.
2148 SDValue AMDGPUTargetLowering::LowerFROUND32_16(SDValue Op, SelectionDAG &DAG) const {
2149   SDLoc SL(Op);
2150   SDValue X = Op.getOperand(0);
2151   EVT VT = Op.getValueType();
2152 
2153   SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X);
2154 
2155   // TODO: Should this propagate fast-math-flags?
2156 
2157   SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T);
2158 
2159   SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff);
2160 
2161   const SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2162   const SDValue One = DAG.getConstantFP(1.0, SL, VT);
2163   const SDValue Half = DAG.getConstantFP(0.5, SL, VT);
2164 
2165   SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X);
2166 
2167   EVT SetCCVT =
2168       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2169 
2170   SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
2171 
2172   SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero);
2173 
2174   return DAG.getNode(ISD::FADD, SL, VT, T, Sel);
2175 }
2176 
2177 SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const {
2178   SDLoc SL(Op);
2179   SDValue X = Op.getOperand(0);
2180 
2181   SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X);
2182 
2183   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2184   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2185   const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32);
2186   const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32);
2187   EVT SetCCVT =
2188       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2189 
2190   SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
2191 
2192   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One);
2193 
2194   SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2195 
2196   const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL,
2197                                        MVT::i64);
2198 
2199   SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp);
2200   SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64,
2201                           DAG.getConstant(INT64_C(0x0008000000000000), SL,
2202                                           MVT::i64),
2203                           Exp);
2204 
2205   SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M);
2206   SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT,
2207                               DAG.getConstant(0, SL, MVT::i64), Tmp0,
2208                               ISD::SETNE);
2209 
2210   SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1,
2211                              D, DAG.getConstant(0, SL, MVT::i64));
2212   SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2);
2213 
2214   K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64));
2215   K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K);
2216 
2217   SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2218   SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2219   SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ);
2220 
2221   SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64,
2222                             ExpEqNegOne,
2223                             DAG.getConstantFP(1.0, SL, MVT::f64),
2224                             DAG.getConstantFP(0.0, SL, MVT::f64));
2225 
2226   SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X);
2227 
2228   K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K);
2229   K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K);
2230 
2231   return K;
2232 }
2233 
2234 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2235   EVT VT = Op.getValueType();
2236 
2237   if (VT == MVT::f32 || VT == MVT::f16)
2238     return LowerFROUND32_16(Op, DAG);
2239 
2240   if (VT == MVT::f64)
2241     return LowerFROUND64(Op, DAG);
2242 
2243   llvm_unreachable("unhandled type");
2244 }
2245 
2246 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
2247   SDLoc SL(Op);
2248   SDValue Src = Op.getOperand(0);
2249 
2250   // result = trunc(src);
2251   // if (src < 0.0 && src != result)
2252   //   result += -1.0.
2253 
2254   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2255 
2256   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2257   const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
2258 
2259   EVT SetCCVT =
2260       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2261 
2262   SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
2263   SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2264   SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2265 
2266   SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
2267   // TODO: Should this propagate fast-math-flags?
2268   return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2269 }
2270 
2271 SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG,
2272                                         double Log2BaseInverted) const {
2273   EVT VT = Op.getValueType();
2274 
2275   SDLoc SL(Op);
2276   SDValue Operand = Op.getOperand(0);
2277   SDValue Log2Operand = DAG.getNode(ISD::FLOG2, SL, VT, Operand);
2278   SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT);
2279 
2280   return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand);
2281 }
2282 
2283 // Return M_LOG2E of appropriate type
2284 static SDValue getLog2EVal(SelectionDAG &DAG, const SDLoc &SL, EVT VT) {
2285   switch (VT.getScalarType().getSimpleVT().SimpleTy) {
2286   case MVT::f32:
2287     return DAG.getConstantFP(1.44269504088896340735992468100189214f, SL, VT);
2288   case MVT::f16:
2289     return DAG.getConstantFP(
2290       APFloat(APFloat::IEEEhalf(), "1.44269504088896340735992468100189214"),
2291       SL, VT);
2292   case MVT::f64:
2293     return DAG.getConstantFP(
2294       APFloat(APFloat::IEEEdouble(), "0x1.71547652b82fep+0"), SL, VT);
2295   default:
2296     llvm_unreachable("unsupported fp type");
2297   }
2298 }
2299 
2300 // exp2(M_LOG2E_F * f);
2301 SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const {
2302   EVT VT = Op.getValueType();
2303   SDLoc SL(Op);
2304   SDValue Src = Op.getOperand(0);
2305 
2306   const SDValue K = getLog2EVal(DAG, SL, VT);
2307   SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Src, K, Op->getFlags());
2308   return DAG.getNode(ISD::FEXP2, SL, VT, Mul, Op->getFlags());
2309 }
2310 
2311 static bool isCtlzOpc(unsigned Opc) {
2312   return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
2313 }
2314 
2315 static bool isCttzOpc(unsigned Opc) {
2316   return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF;
2317 }
2318 
2319 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const {
2320   SDLoc SL(Op);
2321   SDValue Src = Op.getOperand(0);
2322   bool ZeroUndef = Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
2323                    Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF;
2324 
2325   unsigned ISDOpc, NewOpc;
2326   if (isCtlzOpc(Op.getOpcode())) {
2327     ISDOpc = ISD::CTLZ_ZERO_UNDEF;
2328     NewOpc = AMDGPUISD::FFBH_U32;
2329   } else if (isCttzOpc(Op.getOpcode())) {
2330     ISDOpc = ISD::CTTZ_ZERO_UNDEF;
2331     NewOpc = AMDGPUISD::FFBL_B32;
2332   } else
2333     llvm_unreachable("Unexpected OPCode!!!");
2334 
2335 
2336   if (ZeroUndef && Src.getValueType() == MVT::i32)
2337     return DAG.getNode(NewOpc, SL, MVT::i32, Src);
2338 
2339   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2340 
2341   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2342   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2343 
2344   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
2345   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
2346 
2347   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2348                                    *DAG.getContext(), MVT::i32);
2349 
2350   SDValue HiOrLo = isCtlzOpc(Op.getOpcode()) ? Hi : Lo;
2351   SDValue Hi0orLo0 = DAG.getSetCC(SL, SetCCVT, HiOrLo, Zero, ISD::SETEQ);
2352 
2353   SDValue OprLo = DAG.getNode(ISDOpc, SL, MVT::i32, Lo);
2354   SDValue OprHi = DAG.getNode(ISDOpc, SL, MVT::i32, Hi);
2355 
2356   const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32);
2357   SDValue Add, NewOpr;
2358   if (isCtlzOpc(Op.getOpcode())) {
2359     Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprLo, Bits32);
2360     // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x))
2361     NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprHi);
2362   } else {
2363     Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprHi, Bits32);
2364     // cttz(x) = lo_32(x) == 0 ? cttz(hi_32(x)) + 32 : cttz(lo_32(x))
2365     NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprLo);
2366   }
2367 
2368   if (!ZeroUndef) {
2369     // Test if the full 64-bit input is zero.
2370 
2371     // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32,
2372     // which we probably don't want.
2373     SDValue LoOrHi = isCtlzOpc(Op.getOpcode()) ? Lo : Hi;
2374     SDValue Lo0OrHi0 = DAG.getSetCC(SL, SetCCVT, LoOrHi, Zero, ISD::SETEQ);
2375     SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0OrHi0, Hi0orLo0);
2376 
2377     // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction
2378     // with the same cycles, otherwise it is slower.
2379     // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src,
2380     // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ);
2381 
2382     const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32);
2383 
2384     // The instruction returns -1 for 0 input, but the defined intrinsic
2385     // behavior is to return the number of bits.
2386     NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32,
2387                          SrcIsZero, Bits32, NewOpr);
2388   }
2389 
2390   return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr);
2391 }
2392 
2393 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
2394                                                bool Signed) const {
2395   // Unsigned
2396   // cul2f(ulong u)
2397   //{
2398   //  uint lz = clz(u);
2399   //  uint e = (u != 0) ? 127U + 63U - lz : 0;
2400   //  u = (u << lz) & 0x7fffffffffffffffUL;
2401   //  ulong t = u & 0xffffffffffUL;
2402   //  uint v = (e << 23) | (uint)(u >> 40);
2403   //  uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U);
2404   //  return as_float(v + r);
2405   //}
2406   // Signed
2407   // cl2f(long l)
2408   //{
2409   //  long s = l >> 63;
2410   //  float r = cul2f((l + s) ^ s);
2411   //  return s ? -r : r;
2412   //}
2413 
2414   SDLoc SL(Op);
2415   SDValue Src = Op.getOperand(0);
2416   SDValue L = Src;
2417 
2418   SDValue S;
2419   if (Signed) {
2420     const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64);
2421     S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit);
2422 
2423     SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S);
2424     L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S);
2425   }
2426 
2427   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2428                                    *DAG.getContext(), MVT::f32);
2429 
2430 
2431   SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32);
2432   SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64);
2433   SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L);
2434   LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ);
2435 
2436   SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32);
2437   SDValue E = DAG.getSelect(SL, MVT::i32,
2438     DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE),
2439     DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ),
2440     ZeroI32);
2441 
2442   SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64,
2443     DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ),
2444     DAG.getConstant((-1ULL) >> 1, SL, MVT::i64));
2445 
2446   SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U,
2447                           DAG.getConstant(0xffffffffffULL, SL, MVT::i64));
2448 
2449   SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64,
2450                              U, DAG.getConstant(40, SL, MVT::i64));
2451 
2452   SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32,
2453     DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)),
2454     DAG.getNode(ISD::TRUNCATE, SL, MVT::i32,  UShl));
2455 
2456   SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64);
2457   SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT);
2458   SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ);
2459 
2460   SDValue One = DAG.getConstant(1, SL, MVT::i32);
2461 
2462   SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One);
2463 
2464   SDValue R = DAG.getSelect(SL, MVT::i32,
2465     RCmp,
2466     One,
2467     DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32));
2468   R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R);
2469   R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R);
2470 
2471   if (!Signed)
2472     return R;
2473 
2474   SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R);
2475   return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R);
2476 }
2477 
2478 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
2479                                                bool Signed) const {
2480   SDLoc SL(Op);
2481   SDValue Src = Op.getOperand(0);
2482 
2483   SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2484 
2485   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2486                            DAG.getConstant(0, SL, MVT::i32));
2487   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2488                            DAG.getConstant(1, SL, MVT::i32));
2489 
2490   SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
2491                               SL, MVT::f64, Hi);
2492 
2493   SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
2494 
2495   SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
2496                               DAG.getConstant(32, SL, MVT::i32));
2497   // TODO: Should this propagate fast-math-flags?
2498   return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
2499 }
2500 
2501 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
2502                                                SelectionDAG &DAG) const {
2503   assert(Op.getOperand(0).getValueType() == MVT::i64 &&
2504          "operation should be legal");
2505 
2506   // TODO: Factor out code common with LowerSINT_TO_FP.
2507 
2508   EVT DestVT = Op.getValueType();
2509   if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2510     SDLoc DL(Op);
2511     SDValue Src = Op.getOperand(0);
2512 
2513     SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2514     SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2515     SDValue FPRound =
2516         DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2517 
2518     return FPRound;
2519   }
2520 
2521   if (DestVT == MVT::f32)
2522     return LowerINT_TO_FP32(Op, DAG, false);
2523 
2524   assert(DestVT == MVT::f64);
2525   return LowerINT_TO_FP64(Op, DAG, false);
2526 }
2527 
2528 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
2529                                               SelectionDAG &DAG) const {
2530   assert(Op.getOperand(0).getValueType() == MVT::i64 &&
2531          "operation should be legal");
2532 
2533   // TODO: Factor out code common with LowerUINT_TO_FP.
2534 
2535   EVT DestVT = Op.getValueType();
2536   if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2537     SDLoc DL(Op);
2538     SDValue Src = Op.getOperand(0);
2539 
2540     SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2541     SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2542     SDValue FPRound =
2543         DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2544 
2545     return FPRound;
2546   }
2547 
2548   if (DestVT == MVT::f32)
2549     return LowerINT_TO_FP32(Op, DAG, true);
2550 
2551   assert(DestVT == MVT::f64);
2552   return LowerINT_TO_FP64(Op, DAG, true);
2553 }
2554 
2555 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
2556                                                bool Signed) const {
2557   SDLoc SL(Op);
2558 
2559   SDValue Src = Op.getOperand(0);
2560 
2561   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2562 
2563   SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL,
2564                                  MVT::f64);
2565   SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL,
2566                                  MVT::f64);
2567   // TODO: Should this propagate fast-math-flags?
2568   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0);
2569 
2570   SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul);
2571 
2572 
2573   SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc);
2574 
2575   SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL,
2576                            MVT::i32, FloorMul);
2577   SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
2578 
2579   SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi});
2580 
2581   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
2582 }
2583 
2584 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const {
2585   SDLoc DL(Op);
2586   SDValue N0 = Op.getOperand(0);
2587 
2588   // Convert to target node to get known bits
2589   if (N0.getValueType() == MVT::f32)
2590     return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
2591 
2592   if (getTargetMachine().Options.UnsafeFPMath) {
2593     // There is a generic expand for FP_TO_FP16 with unsafe fast math.
2594     return SDValue();
2595   }
2596 
2597   assert(N0.getSimpleValueType() == MVT::f64);
2598 
2599   // f64 -> f16 conversion using round-to-nearest-even rounding mode.
2600   const unsigned ExpMask = 0x7ff;
2601   const unsigned ExpBiasf64 = 1023;
2602   const unsigned ExpBiasf16 = 15;
2603   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
2604   SDValue One = DAG.getConstant(1, DL, MVT::i32);
2605   SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0);
2606   SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U,
2607                            DAG.getConstant(32, DL, MVT::i64));
2608   UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32);
2609   U = DAG.getZExtOrTrunc(U, DL, MVT::i32);
2610   SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2611                           DAG.getConstant(20, DL, MVT::i64));
2612   E = DAG.getNode(ISD::AND, DL, MVT::i32, E,
2613                   DAG.getConstant(ExpMask, DL, MVT::i32));
2614   // Subtract the fp64 exponent bias (1023) to get the real exponent and
2615   // add the f16 bias (15) to get the biased exponent for the f16 format.
2616   E = DAG.getNode(ISD::ADD, DL, MVT::i32, E,
2617                   DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32));
2618 
2619   SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2620                           DAG.getConstant(8, DL, MVT::i32));
2621   M = DAG.getNode(ISD::AND, DL, MVT::i32, M,
2622                   DAG.getConstant(0xffe, DL, MVT::i32));
2623 
2624   SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH,
2625                                   DAG.getConstant(0x1ff, DL, MVT::i32));
2626   MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U);
2627 
2628   SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ);
2629   M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set);
2630 
2631   // (M != 0 ? 0x0200 : 0) | 0x7c00;
2632   SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32,
2633       DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32),
2634                       Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32));
2635 
2636   // N = M | (E << 12);
2637   SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2638       DAG.getNode(ISD::SHL, DL, MVT::i32, E,
2639                   DAG.getConstant(12, DL, MVT::i32)));
2640 
2641   // B = clamp(1-E, 0, 13);
2642   SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32,
2643                                   One, E);
2644   SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero);
2645   B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B,
2646                   DAG.getConstant(13, DL, MVT::i32));
2647 
2648   SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2649                                    DAG.getConstant(0x1000, DL, MVT::i32));
2650 
2651   SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B);
2652   SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B);
2653   SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE);
2654   D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1);
2655 
2656   SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT);
2657   SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V,
2658                               DAG.getConstant(0x7, DL, MVT::i32));
2659   V = DAG.getNode(ISD::SRL, DL, MVT::i32, V,
2660                   DAG.getConstant(2, DL, MVT::i32));
2661   SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32),
2662                                One, Zero, ISD::SETEQ);
2663   SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32),
2664                                One, Zero, ISD::SETGT);
2665   V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1);
2666   V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1);
2667 
2668   V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32),
2669                       DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT);
2670   V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32),
2671                       I, V, ISD::SETEQ);
2672 
2673   // Extract the sign bit.
2674   SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2675                             DAG.getConstant(16, DL, MVT::i32));
2676   Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign,
2677                      DAG.getConstant(0x8000, DL, MVT::i32));
2678 
2679   V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V);
2680   return DAG.getZExtOrTrunc(V, DL, Op.getValueType());
2681 }
2682 
2683 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op,
2684                                               SelectionDAG &DAG) const {
2685   SDValue Src = Op.getOperand(0);
2686 
2687   // TODO: Factor out code common with LowerFP_TO_UINT.
2688 
2689   EVT SrcVT = Src.getValueType();
2690   if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) {
2691     SDLoc DL(Op);
2692 
2693     SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
2694     SDValue FpToInt32 =
2695         DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend);
2696 
2697     return FpToInt32;
2698   }
2699 
2700   if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2701     return LowerFP64_TO_INT(Op, DAG, true);
2702 
2703   return SDValue();
2704 }
2705 
2706 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op,
2707                                               SelectionDAG &DAG) const {
2708   SDValue Src = Op.getOperand(0);
2709 
2710   // TODO: Factor out code common with LowerFP_TO_SINT.
2711 
2712   EVT SrcVT = Src.getValueType();
2713   if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) {
2714     SDLoc DL(Op);
2715 
2716     SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
2717     SDValue FpToInt32 =
2718         DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend);
2719 
2720     return FpToInt32;
2721   }
2722 
2723   if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2724     return LowerFP64_TO_INT(Op, DAG, false);
2725 
2726   return SDValue();
2727 }
2728 
2729 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2730                                                      SelectionDAG &DAG) const {
2731   EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2732   MVT VT = Op.getSimpleValueType();
2733   MVT ScalarVT = VT.getScalarType();
2734 
2735   assert(VT.isVector());
2736 
2737   SDValue Src = Op.getOperand(0);
2738   SDLoc DL(Op);
2739 
2740   // TODO: Don't scalarize on Evergreen?
2741   unsigned NElts = VT.getVectorNumElements();
2742   SmallVector<SDValue, 8> Args;
2743   DAG.ExtractVectorElements(Src, Args, 0, NElts);
2744 
2745   SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
2746   for (unsigned I = 0; I < NElts; ++I)
2747     Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
2748 
2749   return DAG.getBuildVector(VT, DL, Args);
2750 }
2751 
2752 //===----------------------------------------------------------------------===//
2753 // Custom DAG optimizations
2754 //===----------------------------------------------------------------------===//
2755 
2756 static bool isU24(SDValue Op, SelectionDAG &DAG) {
2757   return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24;
2758 }
2759 
2760 static bool isI24(SDValue Op, SelectionDAG &DAG) {
2761   EVT VT = Op.getValueType();
2762   return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
2763                                      // as unsigned 24-bit values.
2764     AMDGPUTargetLowering::numBitsSigned(Op, DAG) < 24;
2765 }
2766 
2767 static SDValue simplifyI24(SDNode *Node24,
2768                            TargetLowering::DAGCombinerInfo &DCI) {
2769   SelectionDAG &DAG = DCI.DAG;
2770   SDValue LHS = Node24->getOperand(0);
2771   SDValue RHS = Node24->getOperand(1);
2772 
2773   APInt Demanded = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 24);
2774 
2775   // First try to simplify using GetDemandedBits which allows the operands to
2776   // have other uses, but will only perform simplifications that involve
2777   // bypassing some nodes for this user.
2778   SDValue DemandedLHS = DAG.GetDemandedBits(LHS, Demanded);
2779   SDValue DemandedRHS = DAG.GetDemandedBits(RHS, Demanded);
2780   if (DemandedLHS || DemandedRHS)
2781     return DAG.getNode(Node24->getOpcode(), SDLoc(Node24), Node24->getVTList(),
2782                        DemandedLHS ? DemandedLHS : LHS,
2783                        DemandedRHS ? DemandedRHS : RHS);
2784 
2785   // Now try SimplifyDemandedBits which can simplify the nodes used by our
2786   // operands if this node is the only user.
2787   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2788   if (TLI.SimplifyDemandedBits(LHS, Demanded, DCI))
2789     return SDValue(Node24, 0);
2790   if (TLI.SimplifyDemandedBits(RHS, Demanded, DCI))
2791     return SDValue(Node24, 0);
2792 
2793   return SDValue();
2794 }
2795 
2796 template <typename IntTy>
2797 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
2798                                uint32_t Width, const SDLoc &DL) {
2799   if (Width + Offset < 32) {
2800     uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
2801     IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
2802     return DAG.getConstant(Result, DL, MVT::i32);
2803   }
2804 
2805   return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
2806 }
2807 
2808 static bool hasVolatileUser(SDNode *Val) {
2809   for (SDNode *U : Val->uses()) {
2810     if (MemSDNode *M = dyn_cast<MemSDNode>(U)) {
2811       if (M->isVolatile())
2812         return true;
2813     }
2814   }
2815 
2816   return false;
2817 }
2818 
2819 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const {
2820   // i32 vectors are the canonical memory type.
2821   if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT))
2822     return false;
2823 
2824   if (!VT.isByteSized())
2825     return false;
2826 
2827   unsigned Size = VT.getStoreSize();
2828 
2829   if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector())
2830     return false;
2831 
2832   if (Size == 3 || (Size > 4 && (Size % 4 != 0)))
2833     return false;
2834 
2835   return true;
2836 }
2837 
2838 // Find a load or store from corresponding pattern root.
2839 // Roots may be build_vector, bitconvert or their combinations.
2840 static MemSDNode* findMemSDNode(SDNode *N) {
2841   N = AMDGPUTargetLowering::stripBitcast(SDValue(N,0)).getNode();
2842   if (MemSDNode *MN = dyn_cast<MemSDNode>(N))
2843     return MN;
2844   assert(isa<BuildVectorSDNode>(N));
2845   for (SDValue V : N->op_values())
2846     if (MemSDNode *MN =
2847           dyn_cast<MemSDNode>(AMDGPUTargetLowering::stripBitcast(V)))
2848       return MN;
2849   llvm_unreachable("cannot find MemSDNode in the pattern!");
2850 }
2851 
2852 bool AMDGPUTargetLowering::SelectFlatOffset(bool IsSigned,
2853                                             SelectionDAG &DAG,
2854                                             SDNode *N,
2855                                             SDValue Addr,
2856                                             SDValue &VAddr,
2857                                             SDValue &Offset,
2858                                             SDValue &SLC) const {
2859   const GCNSubtarget &ST =
2860         DAG.getMachineFunction().getSubtarget<GCNSubtarget>();
2861   int64_t OffsetVal = 0;
2862 
2863   if (ST.hasFlatInstOffsets() &&
2864       (!ST.hasFlatSegmentOffsetBug() ||
2865        findMemSDNode(N)->getAddressSpace() != AMDGPUAS::FLAT_ADDRESS) &&
2866       DAG.isBaseWithConstantOffset(Addr)) {
2867     SDValue N0 = Addr.getOperand(0);
2868     SDValue N1 = Addr.getOperand(1);
2869     int64_t COffsetVal = cast<ConstantSDNode>(N1)->getSExtValue();
2870 
2871     const SIInstrInfo *TII = ST.getInstrInfo();
2872     if (TII->isLegalFLATOffset(COffsetVal, findMemSDNode(N)->getAddressSpace(),
2873                                IsSigned)) {
2874       Addr = N0;
2875       OffsetVal = COffsetVal;
2876     }
2877   }
2878 
2879   VAddr = Addr;
2880   Offset = DAG.getTargetConstant(OffsetVal, SDLoc(), MVT::i16);
2881   SLC = DAG.getTargetConstant(0, SDLoc(), MVT::i1);
2882 
2883   return true;
2884 }
2885 
2886 // Replace load of an illegal type with a store of a bitcast to a friendlier
2887 // type.
2888 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
2889                                                  DAGCombinerInfo &DCI) const {
2890   if (!DCI.isBeforeLegalize())
2891     return SDValue();
2892 
2893   LoadSDNode *LN = cast<LoadSDNode>(N);
2894   if (LN->isVolatile() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
2895     return SDValue();
2896 
2897   SDLoc SL(N);
2898   SelectionDAG &DAG = DCI.DAG;
2899   EVT VT = LN->getMemoryVT();
2900 
2901   unsigned Size = VT.getStoreSize();
2902   unsigned Align = LN->getAlignment();
2903   if (Align < Size && isTypeLegal(VT)) {
2904     bool IsFast;
2905     unsigned AS = LN->getAddressSpace();
2906 
2907     // Expand unaligned loads earlier than legalization. Due to visitation order
2908     // problems during legalization, the emitted instructions to pack and unpack
2909     // the bytes again are not eliminated in the case of an unaligned copy.
2910     if (!allowsMisalignedMemoryAccesses(
2911             VT, AS, Align, LN->getMemOperand()->getFlags(), &IsFast)) {
2912       if (VT.isVector())
2913         return scalarizeVectorLoad(LN, DAG);
2914 
2915       SDValue Ops[2];
2916       std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
2917       return DAG.getMergeValues(Ops, SDLoc(N));
2918     }
2919 
2920     if (!IsFast)
2921       return SDValue();
2922   }
2923 
2924   if (!shouldCombineMemoryType(VT))
2925     return SDValue();
2926 
2927   EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2928 
2929   SDValue NewLoad
2930     = DAG.getLoad(NewVT, SL, LN->getChain(),
2931                   LN->getBasePtr(), LN->getMemOperand());
2932 
2933   SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad);
2934   DCI.CombineTo(N, BC, NewLoad.getValue(1));
2935   return SDValue(N, 0);
2936 }
2937 
2938 // Replace store of an illegal type with a store of a bitcast to a friendlier
2939 // type.
2940 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
2941                                                   DAGCombinerInfo &DCI) const {
2942   if (!DCI.isBeforeLegalize())
2943     return SDValue();
2944 
2945   StoreSDNode *SN = cast<StoreSDNode>(N);
2946   if (SN->isVolatile() || !ISD::isNormalStore(SN))
2947     return SDValue();
2948 
2949   EVT VT = SN->getMemoryVT();
2950   unsigned Size = VT.getStoreSize();
2951 
2952   SDLoc SL(N);
2953   SelectionDAG &DAG = DCI.DAG;
2954   unsigned Align = SN->getAlignment();
2955   if (Align < Size && isTypeLegal(VT)) {
2956     bool IsFast;
2957     unsigned AS = SN->getAddressSpace();
2958 
2959     // Expand unaligned stores earlier than legalization. Due to visitation
2960     // order problems during legalization, the emitted instructions to pack and
2961     // unpack the bytes again are not eliminated in the case of an unaligned
2962     // copy.
2963     if (!allowsMisalignedMemoryAccesses(
2964             VT, AS, Align, SN->getMemOperand()->getFlags(), &IsFast)) {
2965       if (VT.isVector())
2966         return scalarizeVectorStore(SN, DAG);
2967 
2968       return expandUnalignedStore(SN, DAG);
2969     }
2970 
2971     if (!IsFast)
2972       return SDValue();
2973   }
2974 
2975   if (!shouldCombineMemoryType(VT))
2976     return SDValue();
2977 
2978   EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2979   SDValue Val = SN->getValue();
2980 
2981   //DCI.AddToWorklist(Val.getNode());
2982 
2983   bool OtherUses = !Val.hasOneUse();
2984   SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val);
2985   if (OtherUses) {
2986     SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal);
2987     DAG.ReplaceAllUsesOfValueWith(Val, CastBack);
2988   }
2989 
2990   return DAG.getStore(SN->getChain(), SL, CastVal,
2991                       SN->getBasePtr(), SN->getMemOperand());
2992 }
2993 
2994 // FIXME: This should go in generic DAG combiner with an isTruncateFree check,
2995 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU
2996 // issues.
2997 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N,
2998                                                         DAGCombinerInfo &DCI) const {
2999   SelectionDAG &DAG = DCI.DAG;
3000   SDValue N0 = N->getOperand(0);
3001 
3002   // (vt2 (assertzext (truncate vt0:x), vt1)) ->
3003   //     (vt2 (truncate (assertzext vt0:x, vt1)))
3004   if (N0.getOpcode() == ISD::TRUNCATE) {
3005     SDValue N1 = N->getOperand(1);
3006     EVT ExtVT = cast<VTSDNode>(N1)->getVT();
3007     SDLoc SL(N);
3008 
3009     SDValue Src = N0.getOperand(0);
3010     EVT SrcVT = Src.getValueType();
3011     if (SrcVT.bitsGE(ExtVT)) {
3012       SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1);
3013       return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg);
3014     }
3015   }
3016 
3017   return SDValue();
3018 }
3019 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
3020 /// binary operation \p Opc to it with the corresponding constant operands.
3021 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl(
3022   DAGCombinerInfo &DCI, const SDLoc &SL,
3023   unsigned Opc, SDValue LHS,
3024   uint32_t ValLo, uint32_t ValHi) const {
3025   SelectionDAG &DAG = DCI.DAG;
3026   SDValue Lo, Hi;
3027   std::tie(Lo, Hi) = split64BitValue(LHS, DAG);
3028 
3029   SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32);
3030   SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32);
3031 
3032   SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS);
3033   SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS);
3034 
3035   // Re-visit the ands. It's possible we eliminated one of them and it could
3036   // simplify the vector.
3037   DCI.AddToWorklist(Lo.getNode());
3038   DCI.AddToWorklist(Hi.getNode());
3039 
3040   SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
3041   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3042 }
3043 
3044 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
3045                                                 DAGCombinerInfo &DCI) const {
3046   EVT VT = N->getValueType(0);
3047 
3048   ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3049   if (!RHS)
3050     return SDValue();
3051 
3052   SDValue LHS = N->getOperand(0);
3053   unsigned RHSVal = RHS->getZExtValue();
3054   if (!RHSVal)
3055     return LHS;
3056 
3057   SDLoc SL(N);
3058   SelectionDAG &DAG = DCI.DAG;
3059 
3060   switch (LHS->getOpcode()) {
3061   default:
3062     break;
3063   case ISD::ZERO_EXTEND:
3064   case ISD::SIGN_EXTEND:
3065   case ISD::ANY_EXTEND: {
3066     SDValue X = LHS->getOperand(0);
3067 
3068     if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 &&
3069         isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) {
3070       // Prefer build_vector as the canonical form if packed types are legal.
3071       // (shl ([asz]ext i16:x), 16 -> build_vector 0, x
3072       SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL,
3073        { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) });
3074       return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
3075     }
3076 
3077     // shl (ext x) => zext (shl x), if shift does not overflow int
3078     if (VT != MVT::i64)
3079       break;
3080     KnownBits Known = DAG.computeKnownBits(X);
3081     unsigned LZ = Known.countMinLeadingZeros();
3082     if (LZ < RHSVal)
3083       break;
3084     EVT XVT = X.getValueType();
3085     SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
3086     return DAG.getZExtOrTrunc(Shl, SL, VT);
3087   }
3088   }
3089 
3090   if (VT != MVT::i64)
3091     return SDValue();
3092 
3093   // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
3094 
3095   // On some subtargets, 64-bit shift is a quarter rate instruction. In the
3096   // common case, splitting this into a move and a 32-bit shift is faster and
3097   // the same code size.
3098   if (RHSVal < 32)
3099     return SDValue();
3100 
3101   SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
3102 
3103   SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
3104   SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
3105 
3106   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3107 
3108   SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
3109   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3110 }
3111 
3112 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
3113                                                 DAGCombinerInfo &DCI) const {
3114   if (N->getValueType(0) != MVT::i64)
3115     return SDValue();
3116 
3117   const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3118   if (!RHS)
3119     return SDValue();
3120 
3121   SelectionDAG &DAG = DCI.DAG;
3122   SDLoc SL(N);
3123   unsigned RHSVal = RHS->getZExtValue();
3124 
3125   // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
3126   if (RHSVal == 32) {
3127     SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3128     SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3129                                    DAG.getConstant(31, SL, MVT::i32));
3130 
3131     SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
3132     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3133   }
3134 
3135   // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
3136   if (RHSVal == 63) {
3137     SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3138     SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3139                                    DAG.getConstant(31, SL, MVT::i32));
3140     SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
3141     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3142   }
3143 
3144   return SDValue();
3145 }
3146 
3147 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
3148                                                 DAGCombinerInfo &DCI) const {
3149   auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3150   if (!RHS)
3151     return SDValue();
3152 
3153   EVT VT = N->getValueType(0);
3154   SDValue LHS = N->getOperand(0);
3155   unsigned ShiftAmt = RHS->getZExtValue();
3156   SelectionDAG &DAG = DCI.DAG;
3157   SDLoc SL(N);
3158 
3159   // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1)
3160   // this improves the ability to match BFE patterns in isel.
3161   if (LHS.getOpcode() == ISD::AND) {
3162     if (auto *Mask = dyn_cast<ConstantSDNode>(LHS.getOperand(1))) {
3163       if (Mask->getAPIntValue().isShiftedMask() &&
3164           Mask->getAPIntValue().countTrailingZeros() == ShiftAmt) {
3165         return DAG.getNode(
3166             ISD::AND, SL, VT,
3167             DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)),
3168             DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1)));
3169       }
3170     }
3171   }
3172 
3173   if (VT != MVT::i64)
3174     return SDValue();
3175 
3176   if (ShiftAmt < 32)
3177     return SDValue();
3178 
3179   // srl i64:x, C for C >= 32
3180   // =>
3181   //   build_pair (srl hi_32(x), C - 32), 0
3182   SDValue One = DAG.getConstant(1, SL, MVT::i32);
3183   SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3184 
3185   SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, LHS);
3186   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecOp, One);
3187 
3188   SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
3189   SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
3190 
3191   SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
3192 
3193   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
3194 }
3195 
3196 SDValue AMDGPUTargetLowering::performTruncateCombine(
3197   SDNode *N, DAGCombinerInfo &DCI) const {
3198   SDLoc SL(N);
3199   SelectionDAG &DAG = DCI.DAG;
3200   EVT VT = N->getValueType(0);
3201   SDValue Src = N->getOperand(0);
3202 
3203   // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x)
3204   if (Src.getOpcode() == ISD::BITCAST && !VT.isVector()) {
3205     SDValue Vec = Src.getOperand(0);
3206     if (Vec.getOpcode() == ISD::BUILD_VECTOR) {
3207       SDValue Elt0 = Vec.getOperand(0);
3208       EVT EltVT = Elt0.getValueType();
3209       if (VT.getSizeInBits() <= EltVT.getSizeInBits()) {
3210         if (EltVT.isFloatingPoint()) {
3211           Elt0 = DAG.getNode(ISD::BITCAST, SL,
3212                              EltVT.changeTypeToInteger(), Elt0);
3213         }
3214 
3215         return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0);
3216       }
3217     }
3218   }
3219 
3220   // Equivalent of above for accessing the high element of a vector as an
3221   // integer operation.
3222   // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y)
3223   if (Src.getOpcode() == ISD::SRL && !VT.isVector()) {
3224     if (auto K = isConstOrConstSplat(Src.getOperand(1))) {
3225       if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) {
3226         SDValue BV = stripBitcast(Src.getOperand(0));
3227         if (BV.getOpcode() == ISD::BUILD_VECTOR &&
3228             BV.getValueType().getVectorNumElements() == 2) {
3229           SDValue SrcElt = BV.getOperand(1);
3230           EVT SrcEltVT = SrcElt.getValueType();
3231           if (SrcEltVT.isFloatingPoint()) {
3232             SrcElt = DAG.getNode(ISD::BITCAST, SL,
3233                                  SrcEltVT.changeTypeToInteger(), SrcElt);
3234           }
3235 
3236           return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt);
3237         }
3238       }
3239     }
3240   }
3241 
3242   // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit.
3243   //
3244   // i16 (trunc (srl i64:x, K)), K <= 16 ->
3245   //     i16 (trunc (srl (i32 (trunc x), K)))
3246   if (VT.getScalarSizeInBits() < 32) {
3247     EVT SrcVT = Src.getValueType();
3248     if (SrcVT.getScalarSizeInBits() > 32 &&
3249         (Src.getOpcode() == ISD::SRL ||
3250          Src.getOpcode() == ISD::SRA ||
3251          Src.getOpcode() == ISD::SHL)) {
3252       SDValue Amt = Src.getOperand(1);
3253       KnownBits Known = DAG.computeKnownBits(Amt);
3254       unsigned Size = VT.getScalarSizeInBits();
3255       if ((Known.isConstant() && Known.getConstant().ule(Size)) ||
3256           (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size))) {
3257         EVT MidVT = VT.isVector() ?
3258           EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3259                            VT.getVectorNumElements()) : MVT::i32;
3260 
3261         EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout());
3262         SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT,
3263                                     Src.getOperand(0));
3264         DCI.AddToWorklist(Trunc.getNode());
3265 
3266         if (Amt.getValueType() != NewShiftVT) {
3267           Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT);
3268           DCI.AddToWorklist(Amt.getNode());
3269         }
3270 
3271         SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT,
3272                                           Trunc, Amt);
3273         return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift);
3274       }
3275     }
3276   }
3277 
3278   return SDValue();
3279 }
3280 
3281 // We need to specifically handle i64 mul here to avoid unnecessary conversion
3282 // instructions. If we only match on the legalized i64 mul expansion,
3283 // SimplifyDemandedBits will be unable to remove them because there will be
3284 // multiple uses due to the separate mul + mulh[su].
3285 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL,
3286                         SDValue N0, SDValue N1, unsigned Size, bool Signed) {
3287   if (Size <= 32) {
3288     unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3289     return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1);
3290   }
3291 
3292   // Because we want to eliminate extension instructions before the
3293   // operation, we need to create a single user here (i.e. not the separate
3294   // mul_lo + mul_hi) so that SimplifyDemandedBits will deal with it.
3295 
3296   unsigned MulOpc = Signed ? AMDGPUISD::MUL_LOHI_I24 : AMDGPUISD::MUL_LOHI_U24;
3297 
3298   SDValue Mul = DAG.getNode(MulOpc, SL,
3299                             DAG.getVTList(MVT::i32, MVT::i32), N0, N1);
3300 
3301   return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64,
3302                      Mul.getValue(0), Mul.getValue(1));
3303 }
3304 
3305 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
3306                                                 DAGCombinerInfo &DCI) const {
3307   EVT VT = N->getValueType(0);
3308 
3309   unsigned Size = VT.getSizeInBits();
3310   if (VT.isVector() || Size > 64)
3311     return SDValue();
3312 
3313   // There are i16 integer mul/mad.
3314   if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16))
3315     return SDValue();
3316 
3317   SelectionDAG &DAG = DCI.DAG;
3318   SDLoc DL(N);
3319 
3320   SDValue N0 = N->getOperand(0);
3321   SDValue N1 = N->getOperand(1);
3322 
3323   // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
3324   // in the source into any_extends if the result of the mul is truncated. Since
3325   // we can assume the high bits are whatever we want, use the underlying value
3326   // to avoid the unknown high bits from interfering.
3327   if (N0.getOpcode() == ISD::ANY_EXTEND)
3328     N0 = N0.getOperand(0);
3329 
3330   if (N1.getOpcode() == ISD::ANY_EXTEND)
3331     N1 = N1.getOperand(0);
3332 
3333   SDValue Mul;
3334 
3335   if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
3336     N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3337     N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3338     Mul = getMul24(DAG, DL, N0, N1, Size, false);
3339   } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
3340     N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3341     N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3342     Mul = getMul24(DAG, DL, N0, N1, Size, true);
3343   } else {
3344     return SDValue();
3345   }
3346 
3347   // We need to use sext even for MUL_U24, because MUL_U24 is used
3348   // for signed multiply of 8 and 16-bit types.
3349   return DAG.getSExtOrTrunc(Mul, DL, VT);
3350 }
3351 
3352 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
3353                                                   DAGCombinerInfo &DCI) const {
3354   EVT VT = N->getValueType(0);
3355 
3356   if (!Subtarget->hasMulI24() || VT.isVector())
3357     return SDValue();
3358 
3359   SelectionDAG &DAG = DCI.DAG;
3360   SDLoc DL(N);
3361 
3362   SDValue N0 = N->getOperand(0);
3363   SDValue N1 = N->getOperand(1);
3364 
3365   if (!isI24(N0, DAG) || !isI24(N1, DAG))
3366     return SDValue();
3367 
3368   N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3369   N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3370 
3371   SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1);
3372   DCI.AddToWorklist(Mulhi.getNode());
3373   return DAG.getSExtOrTrunc(Mulhi, DL, VT);
3374 }
3375 
3376 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N,
3377                                                   DAGCombinerInfo &DCI) const {
3378   EVT VT = N->getValueType(0);
3379 
3380   if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
3381     return SDValue();
3382 
3383   SelectionDAG &DAG = DCI.DAG;
3384   SDLoc DL(N);
3385 
3386   SDValue N0 = N->getOperand(0);
3387   SDValue N1 = N->getOperand(1);
3388 
3389   if (!isU24(N0, DAG) || !isU24(N1, DAG))
3390     return SDValue();
3391 
3392   N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3393   N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3394 
3395   SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1);
3396   DCI.AddToWorklist(Mulhi.getNode());
3397   return DAG.getZExtOrTrunc(Mulhi, DL, VT);
3398 }
3399 
3400 SDValue AMDGPUTargetLowering::performMulLoHi24Combine(
3401   SDNode *N, DAGCombinerInfo &DCI) const {
3402   SelectionDAG &DAG = DCI.DAG;
3403 
3404   // Simplify demanded bits before splitting into multiple users.
3405   if (SDValue V = simplifyI24(N, DCI))
3406     return V;
3407 
3408   SDValue N0 = N->getOperand(0);
3409   SDValue N1 = N->getOperand(1);
3410 
3411   bool Signed = (N->getOpcode() == AMDGPUISD::MUL_LOHI_I24);
3412 
3413   unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3414   unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
3415 
3416   SDLoc SL(N);
3417 
3418   SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1);
3419   SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1);
3420   return DAG.getMergeValues({ MulLo, MulHi }, SL);
3421 }
3422 
3423 static bool isNegativeOne(SDValue Val) {
3424   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val))
3425     return C->isAllOnesValue();
3426   return false;
3427 }
3428 
3429 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG,
3430                                           SDValue Op,
3431                                           const SDLoc &DL,
3432                                           unsigned Opc) const {
3433   EVT VT = Op.getValueType();
3434   EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT);
3435   if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() &&
3436                               LegalVT != MVT::i16))
3437     return SDValue();
3438 
3439   if (VT != MVT::i32)
3440     Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op);
3441 
3442   SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op);
3443   if (VT != MVT::i32)
3444     FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX);
3445 
3446   return FFBX;
3447 }
3448 
3449 // The native instructions return -1 on 0 input. Optimize out a select that
3450 // produces -1 on 0.
3451 //
3452 // TODO: If zero is not undef, we could also do this if the output is compared
3453 // against the bitwidth.
3454 //
3455 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
3456 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond,
3457                                                  SDValue LHS, SDValue RHS,
3458                                                  DAGCombinerInfo &DCI) const {
3459   ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
3460   if (!CmpRhs || !CmpRhs->isNullValue())
3461     return SDValue();
3462 
3463   SelectionDAG &DAG = DCI.DAG;
3464   ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
3465   SDValue CmpLHS = Cond.getOperand(0);
3466 
3467   unsigned Opc = isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 :
3468                                            AMDGPUISD::FFBH_U32;
3469 
3470   // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
3471   // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x
3472   if (CCOpcode == ISD::SETEQ &&
3473       (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
3474       RHS.getOperand(0) == CmpLHS &&
3475       isNegativeOne(LHS)) {
3476     return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3477   }
3478 
3479   // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
3480   // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x
3481   if (CCOpcode == ISD::SETNE &&
3482       (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
3483       LHS.getOperand(0) == CmpLHS &&
3484       isNegativeOne(RHS)) {
3485     return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3486   }
3487 
3488   return SDValue();
3489 }
3490 
3491 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI,
3492                                          unsigned Op,
3493                                          const SDLoc &SL,
3494                                          SDValue Cond,
3495                                          SDValue N1,
3496                                          SDValue N2) {
3497   SelectionDAG &DAG = DCI.DAG;
3498   EVT VT = N1.getValueType();
3499 
3500   SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond,
3501                                   N1.getOperand(0), N2.getOperand(0));
3502   DCI.AddToWorklist(NewSelect.getNode());
3503   return DAG.getNode(Op, SL, VT, NewSelect);
3504 }
3505 
3506 // Pull a free FP operation out of a select so it may fold into uses.
3507 //
3508 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
3509 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
3510 //
3511 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
3512 // select c, (fabs x), +k -> fabs (select c, x, k)
3513 static SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
3514                                     SDValue N) {
3515   SelectionDAG &DAG = DCI.DAG;
3516   SDValue Cond = N.getOperand(0);
3517   SDValue LHS = N.getOperand(1);
3518   SDValue RHS = N.getOperand(2);
3519 
3520   EVT VT = N.getValueType();
3521   if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) ||
3522       (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) {
3523     return distributeOpThroughSelect(DCI, LHS.getOpcode(),
3524                                      SDLoc(N), Cond, LHS, RHS);
3525   }
3526 
3527   bool Inv = false;
3528   if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) {
3529     std::swap(LHS, RHS);
3530     Inv = true;
3531   }
3532 
3533   // TODO: Support vector constants.
3534   ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
3535   if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS) {
3536     SDLoc SL(N);
3537     // If one side is an fneg/fabs and the other is a constant, we can push the
3538     // fneg/fabs down. If it's an fabs, the constant needs to be non-negative.
3539     SDValue NewLHS = LHS.getOperand(0);
3540     SDValue NewRHS = RHS;
3541 
3542     // Careful: if the neg can be folded up, don't try to pull it back down.
3543     bool ShouldFoldNeg = true;
3544 
3545     if (NewLHS.hasOneUse()) {
3546       unsigned Opc = NewLHS.getOpcode();
3547       if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(Opc))
3548         ShouldFoldNeg = false;
3549       if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL)
3550         ShouldFoldNeg = false;
3551     }
3552 
3553     if (ShouldFoldNeg) {
3554       if (LHS.getOpcode() == ISD::FNEG)
3555         NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3556       else if (CRHS->isNegative())
3557         return SDValue();
3558 
3559       if (Inv)
3560         std::swap(NewLHS, NewRHS);
3561 
3562       SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT,
3563                                       Cond, NewLHS, NewRHS);
3564       DCI.AddToWorklist(NewSelect.getNode());
3565       return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect);
3566     }
3567   }
3568 
3569   return SDValue();
3570 }
3571 
3572 
3573 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
3574                                                    DAGCombinerInfo &DCI) const {
3575   if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0)))
3576     return Folded;
3577 
3578   SDValue Cond = N->getOperand(0);
3579   if (Cond.getOpcode() != ISD::SETCC)
3580     return SDValue();
3581 
3582   EVT VT = N->getValueType(0);
3583   SDValue LHS = Cond.getOperand(0);
3584   SDValue RHS = Cond.getOperand(1);
3585   SDValue CC = Cond.getOperand(2);
3586 
3587   SDValue True = N->getOperand(1);
3588   SDValue False = N->getOperand(2);
3589 
3590   if (Cond.hasOneUse()) { // TODO: Look for multiple select uses.
3591     SelectionDAG &DAG = DCI.DAG;
3592     if (DAG.isConstantValueOfAnyType(True) &&
3593         !DAG.isConstantValueOfAnyType(False)) {
3594       // Swap cmp + select pair to move constant to false input.
3595       // This will allow using VOPC cndmasks more often.
3596       // select (setcc x, y), k, x -> select (setccinv x, y), x, k
3597 
3598       SDLoc SL(N);
3599       ISD::CondCode NewCC = getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
3600                                             LHS.getValueType().isInteger());
3601 
3602       SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC);
3603       return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True);
3604     }
3605 
3606     if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
3607       SDValue MinMax
3608         = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
3609       // Revisit this node so we can catch min3/max3/med3 patterns.
3610       //DCI.AddToWorklist(MinMax.getNode());
3611       return MinMax;
3612     }
3613   }
3614 
3615   // There's no reason to not do this if the condition has other uses.
3616   return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI);
3617 }
3618 
3619 static bool isInv2Pi(const APFloat &APF) {
3620   static const APFloat KF16(APFloat::IEEEhalf(), APInt(16, 0x3118));
3621   static const APFloat KF32(APFloat::IEEEsingle(), APInt(32, 0x3e22f983));
3622   static const APFloat KF64(APFloat::IEEEdouble(), APInt(64, 0x3fc45f306dc9c882));
3623 
3624   return APF.bitwiseIsEqual(KF16) ||
3625          APF.bitwiseIsEqual(KF32) ||
3626          APF.bitwiseIsEqual(KF64);
3627 }
3628 
3629 // 0 and 1.0 / (0.5 * pi) do not have inline immmediates, so there is an
3630 // additional cost to negate them.
3631 bool AMDGPUTargetLowering::isConstantCostlierToNegate(SDValue N) const {
3632   if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N)) {
3633     if (C->isZero() && !C->isNegative())
3634       return true;
3635 
3636     if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF()))
3637       return true;
3638   }
3639 
3640   return false;
3641 }
3642 
3643 static unsigned inverseMinMax(unsigned Opc) {
3644   switch (Opc) {
3645   case ISD::FMAXNUM:
3646     return ISD::FMINNUM;
3647   case ISD::FMINNUM:
3648     return ISD::FMAXNUM;
3649   case ISD::FMAXNUM_IEEE:
3650     return ISD::FMINNUM_IEEE;
3651   case ISD::FMINNUM_IEEE:
3652     return ISD::FMAXNUM_IEEE;
3653   case AMDGPUISD::FMAX_LEGACY:
3654     return AMDGPUISD::FMIN_LEGACY;
3655   case AMDGPUISD::FMIN_LEGACY:
3656     return  AMDGPUISD::FMAX_LEGACY;
3657   default:
3658     llvm_unreachable("invalid min/max opcode");
3659   }
3660 }
3661 
3662 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
3663                                                  DAGCombinerInfo &DCI) const {
3664   SelectionDAG &DAG = DCI.DAG;
3665   SDValue N0 = N->getOperand(0);
3666   EVT VT = N->getValueType(0);
3667 
3668   unsigned Opc = N0.getOpcode();
3669 
3670   // If the input has multiple uses and we can either fold the negate down, or
3671   // the other uses cannot, give up. This both prevents unprofitable
3672   // transformations and infinite loops: we won't repeatedly try to fold around
3673   // a negate that has no 'good' form.
3674   if (N0.hasOneUse()) {
3675     // This may be able to fold into the source, but at a code size cost. Don't
3676     // fold if the fold into the user is free.
3677     if (allUsesHaveSourceMods(N, 0))
3678       return SDValue();
3679   } else {
3680     if (fnegFoldsIntoOp(Opc) &&
3681         (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode())))
3682       return SDValue();
3683   }
3684 
3685   SDLoc SL(N);
3686   switch (Opc) {
3687   case ISD::FADD: {
3688     if (!mayIgnoreSignedZero(N0))
3689       return SDValue();
3690 
3691     // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y))
3692     SDValue LHS = N0.getOperand(0);
3693     SDValue RHS = N0.getOperand(1);
3694 
3695     if (LHS.getOpcode() != ISD::FNEG)
3696       LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3697     else
3698       LHS = LHS.getOperand(0);
3699 
3700     if (RHS.getOpcode() != ISD::FNEG)
3701       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3702     else
3703       RHS = RHS.getOperand(0);
3704 
3705     SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags());
3706     if (Res.getOpcode() != ISD::FADD)
3707       return SDValue(); // Op got folded away.
3708     if (!N0.hasOneUse())
3709       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3710     return Res;
3711   }
3712   case ISD::FMUL:
3713   case AMDGPUISD::FMUL_LEGACY: {
3714     // (fneg (fmul x, y)) -> (fmul x, (fneg y))
3715     // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y))
3716     SDValue LHS = N0.getOperand(0);
3717     SDValue RHS = N0.getOperand(1);
3718 
3719     if (LHS.getOpcode() == ISD::FNEG)
3720       LHS = LHS.getOperand(0);
3721     else if (RHS.getOpcode() == ISD::FNEG)
3722       RHS = RHS.getOperand(0);
3723     else
3724       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3725 
3726     SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags());
3727     if (Res.getOpcode() != Opc)
3728       return SDValue(); // Op got folded away.
3729     if (!N0.hasOneUse())
3730       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3731     return Res;
3732   }
3733   case ISD::FMA:
3734   case ISD::FMAD: {
3735     if (!mayIgnoreSignedZero(N0))
3736       return SDValue();
3737 
3738     // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z))
3739     SDValue LHS = N0.getOperand(0);
3740     SDValue MHS = N0.getOperand(1);
3741     SDValue RHS = N0.getOperand(2);
3742 
3743     if (LHS.getOpcode() == ISD::FNEG)
3744       LHS = LHS.getOperand(0);
3745     else if (MHS.getOpcode() == ISD::FNEG)
3746       MHS = MHS.getOperand(0);
3747     else
3748       MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS);
3749 
3750     if (RHS.getOpcode() != ISD::FNEG)
3751       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3752     else
3753       RHS = RHS.getOperand(0);
3754 
3755     SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS);
3756     if (Res.getOpcode() != Opc)
3757       return SDValue(); // Op got folded away.
3758     if (!N0.hasOneUse())
3759       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3760     return Res;
3761   }
3762   case ISD::FMAXNUM:
3763   case ISD::FMINNUM:
3764   case ISD::FMAXNUM_IEEE:
3765   case ISD::FMINNUM_IEEE:
3766   case AMDGPUISD::FMAX_LEGACY:
3767   case AMDGPUISD::FMIN_LEGACY: {
3768     // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y)
3769     // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y)
3770     // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y)
3771     // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y)
3772 
3773     SDValue LHS = N0.getOperand(0);
3774     SDValue RHS = N0.getOperand(1);
3775 
3776     // 0 doesn't have a negated inline immediate.
3777     // TODO: This constant check should be generalized to other operations.
3778     if (isConstantCostlierToNegate(RHS))
3779       return SDValue();
3780 
3781     SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3782     SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3783     unsigned Opposite = inverseMinMax(Opc);
3784 
3785     SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags());
3786     if (Res.getOpcode() != Opposite)
3787       return SDValue(); // Op got folded away.
3788     if (!N0.hasOneUse())
3789       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3790     return Res;
3791   }
3792   case AMDGPUISD::FMED3: {
3793     SDValue Ops[3];
3794     for (unsigned I = 0; I < 3; ++I)
3795       Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags());
3796 
3797     SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags());
3798     if (Res.getOpcode() != AMDGPUISD::FMED3)
3799       return SDValue(); // Op got folded away.
3800     if (!N0.hasOneUse())
3801       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3802     return Res;
3803   }
3804   case ISD::FP_EXTEND:
3805   case ISD::FTRUNC:
3806   case ISD::FRINT:
3807   case ISD::FNEARBYINT: // XXX - Should fround be handled?
3808   case ISD::FSIN:
3809   case ISD::FCANONICALIZE:
3810   case AMDGPUISD::RCP:
3811   case AMDGPUISD::RCP_LEGACY:
3812   case AMDGPUISD::RCP_IFLAG:
3813   case AMDGPUISD::SIN_HW: {
3814     SDValue CvtSrc = N0.getOperand(0);
3815     if (CvtSrc.getOpcode() == ISD::FNEG) {
3816       // (fneg (fp_extend (fneg x))) -> (fp_extend x)
3817       // (fneg (rcp (fneg x))) -> (rcp x)
3818       return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0));
3819     }
3820 
3821     if (!N0.hasOneUse())
3822       return SDValue();
3823 
3824     // (fneg (fp_extend x)) -> (fp_extend (fneg x))
3825     // (fneg (rcp x)) -> (rcp (fneg x))
3826     SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3827     return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags());
3828   }
3829   case ISD::FP_ROUND: {
3830     SDValue CvtSrc = N0.getOperand(0);
3831 
3832     if (CvtSrc.getOpcode() == ISD::FNEG) {
3833       // (fneg (fp_round (fneg x))) -> (fp_round x)
3834       return DAG.getNode(ISD::FP_ROUND, SL, VT,
3835                          CvtSrc.getOperand(0), N0.getOperand(1));
3836     }
3837 
3838     if (!N0.hasOneUse())
3839       return SDValue();
3840 
3841     // (fneg (fp_round x)) -> (fp_round (fneg x))
3842     SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3843     return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1));
3844   }
3845   case ISD::FP16_TO_FP: {
3846     // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal
3847     // f16, but legalization of f16 fneg ends up pulling it out of the source.
3848     // Put the fneg back as a legal source operation that can be matched later.
3849     SDLoc SL(N);
3850 
3851     SDValue Src = N0.getOperand(0);
3852     EVT SrcVT = Src.getValueType();
3853 
3854     // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000)
3855     SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src,
3856                                   DAG.getConstant(0x8000, SL, SrcVT));
3857     return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg);
3858   }
3859   default:
3860     return SDValue();
3861   }
3862 }
3863 
3864 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N,
3865                                                  DAGCombinerInfo &DCI) const {
3866   SelectionDAG &DAG = DCI.DAG;
3867   SDValue N0 = N->getOperand(0);
3868 
3869   if (!N0.hasOneUse())
3870     return SDValue();
3871 
3872   switch (N0.getOpcode()) {
3873   case ISD::FP16_TO_FP: {
3874     assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal");
3875     SDLoc SL(N);
3876     SDValue Src = N0.getOperand(0);
3877     EVT SrcVT = Src.getValueType();
3878 
3879     // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff)
3880     SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src,
3881                                   DAG.getConstant(0x7fff, SL, SrcVT));
3882     return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs);
3883   }
3884   default:
3885     return SDValue();
3886   }
3887 }
3888 
3889 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N,
3890                                                 DAGCombinerInfo &DCI) const {
3891   const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
3892   if (!CFP)
3893     return SDValue();
3894 
3895   // XXX - Should this flush denormals?
3896   const APFloat &Val = CFP->getValueAPF();
3897   APFloat One(Val.getSemantics(), "1.0");
3898   return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
3899 }
3900 
3901 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
3902                                                 DAGCombinerInfo &DCI) const {
3903   SelectionDAG &DAG = DCI.DAG;
3904   SDLoc DL(N);
3905 
3906   switch(N->getOpcode()) {
3907   default:
3908     break;
3909   case ISD::BITCAST: {
3910     EVT DestVT = N->getValueType(0);
3911 
3912     // Push casts through vector builds. This helps avoid emitting a large
3913     // number of copies when materializing floating point vector constants.
3914     //
3915     // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) =>
3916     //   vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y))
3917     if (DestVT.isVector()) {
3918       SDValue Src = N->getOperand(0);
3919       if (Src.getOpcode() == ISD::BUILD_VECTOR) {
3920         EVT SrcVT = Src.getValueType();
3921         unsigned NElts = DestVT.getVectorNumElements();
3922 
3923         if (SrcVT.getVectorNumElements() == NElts) {
3924           EVT DestEltVT = DestVT.getVectorElementType();
3925 
3926           SmallVector<SDValue, 8> CastedElts;
3927           SDLoc SL(N);
3928           for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) {
3929             SDValue Elt = Src.getOperand(I);
3930             CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt));
3931           }
3932 
3933           return DAG.getBuildVector(DestVT, SL, CastedElts);
3934         }
3935       }
3936     }
3937 
3938     if (DestVT.getSizeInBits() != 64 && !DestVT.isVector())
3939       break;
3940 
3941     // Fold bitcasts of constants.
3942     //
3943     // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
3944     // TODO: Generalize and move to DAGCombiner
3945     SDValue Src = N->getOperand(0);
3946     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
3947       if (Src.getValueType() == MVT::i64) {
3948         SDLoc SL(N);
3949         uint64_t CVal = C->getZExtValue();
3950         SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
3951                                  DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
3952                                  DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
3953         return DAG.getNode(ISD::BITCAST, SL, DestVT, BV);
3954       }
3955     }
3956 
3957     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
3958       const APInt &Val = C->getValueAPF().bitcastToAPInt();
3959       SDLoc SL(N);
3960       uint64_t CVal = Val.getZExtValue();
3961       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
3962                                 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
3963                                 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
3964 
3965       return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
3966     }
3967 
3968     break;
3969   }
3970   case ISD::SHL: {
3971     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3972       break;
3973 
3974     return performShlCombine(N, DCI);
3975   }
3976   case ISD::SRL: {
3977     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3978       break;
3979 
3980     return performSrlCombine(N, DCI);
3981   }
3982   case ISD::SRA: {
3983     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3984       break;
3985 
3986     return performSraCombine(N, DCI);
3987   }
3988   case ISD::TRUNCATE:
3989     return performTruncateCombine(N, DCI);
3990   case ISD::MUL:
3991     return performMulCombine(N, DCI);
3992   case ISD::MULHS:
3993     return performMulhsCombine(N, DCI);
3994   case ISD::MULHU:
3995     return performMulhuCombine(N, DCI);
3996   case AMDGPUISD::MUL_I24:
3997   case AMDGPUISD::MUL_U24:
3998   case AMDGPUISD::MULHI_I24:
3999   case AMDGPUISD::MULHI_U24: {
4000     if (SDValue V = simplifyI24(N, DCI))
4001       return V;
4002     return SDValue();
4003   }
4004   case AMDGPUISD::MUL_LOHI_I24:
4005   case AMDGPUISD::MUL_LOHI_U24:
4006     return performMulLoHi24Combine(N, DCI);
4007   case ISD::SELECT:
4008     return performSelectCombine(N, DCI);
4009   case ISD::FNEG:
4010     return performFNegCombine(N, DCI);
4011   case ISD::FABS:
4012     return performFAbsCombine(N, DCI);
4013   case AMDGPUISD::BFE_I32:
4014   case AMDGPUISD::BFE_U32: {
4015     assert(!N->getValueType(0).isVector() &&
4016            "Vector handling of BFE not implemented");
4017     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
4018     if (!Width)
4019       break;
4020 
4021     uint32_t WidthVal = Width->getZExtValue() & 0x1f;
4022     if (WidthVal == 0)
4023       return DAG.getConstant(0, DL, MVT::i32);
4024 
4025     ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
4026     if (!Offset)
4027       break;
4028 
4029     SDValue BitsFrom = N->getOperand(0);
4030     uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
4031 
4032     bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
4033 
4034     if (OffsetVal == 0) {
4035       // This is already sign / zero extended, so try to fold away extra BFEs.
4036       unsigned SignBits =  Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
4037 
4038       unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
4039       if (OpSignBits >= SignBits)
4040         return BitsFrom;
4041 
4042       EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
4043       if (Signed) {
4044         // This is a sign_extend_inreg. Replace it to take advantage of existing
4045         // DAG Combines. If not eliminated, we will match back to BFE during
4046         // selection.
4047 
4048         // TODO: The sext_inreg of extended types ends, although we can could
4049         // handle them in a single BFE.
4050         return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
4051                            DAG.getValueType(SmallVT));
4052       }
4053 
4054       return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
4055     }
4056 
4057     if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
4058       if (Signed) {
4059         return constantFoldBFE<int32_t>(DAG,
4060                                         CVal->getSExtValue(),
4061                                         OffsetVal,
4062                                         WidthVal,
4063                                         DL);
4064       }
4065 
4066       return constantFoldBFE<uint32_t>(DAG,
4067                                        CVal->getZExtValue(),
4068                                        OffsetVal,
4069                                        WidthVal,
4070                                        DL);
4071     }
4072 
4073     if ((OffsetVal + WidthVal) >= 32 &&
4074         !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
4075       SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
4076       return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
4077                          BitsFrom, ShiftVal);
4078     }
4079 
4080     if (BitsFrom.hasOneUse()) {
4081       APInt Demanded = APInt::getBitsSet(32,
4082                                          OffsetVal,
4083                                          OffsetVal + WidthVal);
4084 
4085       KnownBits Known;
4086       TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
4087                                             !DCI.isBeforeLegalizeOps());
4088       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4089       if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) ||
4090           TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) {
4091         DCI.CommitTargetLoweringOpt(TLO);
4092       }
4093     }
4094 
4095     break;
4096   }
4097   case ISD::LOAD:
4098     return performLoadCombine(N, DCI);
4099   case ISD::STORE:
4100     return performStoreCombine(N, DCI);
4101   case AMDGPUISD::RCP:
4102   case AMDGPUISD::RCP_IFLAG:
4103     return performRcpCombine(N, DCI);
4104   case ISD::AssertZext:
4105   case ISD::AssertSext:
4106     return performAssertSZExtCombine(N, DCI);
4107   }
4108   return SDValue();
4109 }
4110 
4111 //===----------------------------------------------------------------------===//
4112 // Helper functions
4113 //===----------------------------------------------------------------------===//
4114 
4115 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
4116                                                    const TargetRegisterClass *RC,
4117                                                    unsigned Reg, EVT VT,
4118                                                    const SDLoc &SL,
4119                                                    bool RawReg) const {
4120   MachineFunction &MF = DAG.getMachineFunction();
4121   MachineRegisterInfo &MRI = MF.getRegInfo();
4122   unsigned VReg;
4123 
4124   if (!MRI.isLiveIn(Reg)) {
4125     VReg = MRI.createVirtualRegister(RC);
4126     MRI.addLiveIn(Reg, VReg);
4127   } else {
4128     VReg = MRI.getLiveInVirtReg(Reg);
4129   }
4130 
4131   if (RawReg)
4132     return DAG.getRegister(VReg, VT);
4133 
4134   return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT);
4135 }
4136 
4137 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG,
4138                                                   EVT VT,
4139                                                   const SDLoc &SL,
4140                                                   int64_t Offset) const {
4141   MachineFunction &MF = DAG.getMachineFunction();
4142   MachineFrameInfo &MFI = MF.getFrameInfo();
4143 
4144   int FI = MFI.CreateFixedObject(VT.getStoreSize(), Offset, true);
4145   auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset);
4146   SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32);
4147 
4148   return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, 4,
4149                      MachineMemOperand::MODereferenceable |
4150                      MachineMemOperand::MOInvariant);
4151 }
4152 
4153 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG,
4154                                                    const SDLoc &SL,
4155                                                    SDValue Chain,
4156                                                    SDValue ArgVal,
4157                                                    int64_t Offset) const {
4158   MachineFunction &MF = DAG.getMachineFunction();
4159   MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset);
4160 
4161   SDValue Ptr = DAG.getConstant(Offset, SL, MVT::i32);
4162   SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, 4,
4163                                MachineMemOperand::MODereferenceable);
4164   return Store;
4165 }
4166 
4167 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG,
4168                                              const TargetRegisterClass *RC,
4169                                              EVT VT, const SDLoc &SL,
4170                                              const ArgDescriptor &Arg) const {
4171   assert(Arg && "Attempting to load missing argument");
4172 
4173   SDValue V = Arg.isRegister() ?
4174     CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL) :
4175     loadStackInputValue(DAG, VT, SL, Arg.getStackOffset());
4176 
4177   if (!Arg.isMasked())
4178     return V;
4179 
4180   unsigned Mask = Arg.getMask();
4181   unsigned Shift = countTrailingZeros<unsigned>(Mask);
4182   V = DAG.getNode(ISD::SRL, SL, VT, V,
4183                   DAG.getShiftAmountConstant(Shift, VT, SL));
4184   return DAG.getNode(ISD::AND, SL, VT, V,
4185                      DAG.getConstant(Mask >> Shift, SL, VT));
4186 }
4187 
4188 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
4189     const MachineFunction &MF, const ImplicitParameter Param) const {
4190   const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
4191   const AMDGPUSubtarget &ST =
4192       AMDGPUSubtarget::get(getTargetMachine(), MF.getFunction());
4193   unsigned ExplicitArgOffset = ST.getExplicitKernelArgOffset(MF.getFunction());
4194   unsigned Alignment = ST.getAlignmentForImplicitArgPtr();
4195   uint64_t ArgOffset = alignTo(MFI->getExplicitKernArgSize(), Alignment) +
4196                        ExplicitArgOffset;
4197   switch (Param) {
4198   case GRID_DIM:
4199     return ArgOffset;
4200   case GRID_OFFSET:
4201     return ArgOffset + 4;
4202   }
4203   llvm_unreachable("unexpected implicit parameter type");
4204 }
4205 
4206 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
4207 
4208 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
4209   switch ((AMDGPUISD::NodeType)Opcode) {
4210   case AMDGPUISD::FIRST_NUMBER: break;
4211   // AMDIL DAG nodes
4212   NODE_NAME_CASE(UMUL);
4213   NODE_NAME_CASE(BRANCH_COND);
4214 
4215   // AMDGPU DAG nodes
4216   NODE_NAME_CASE(IF)
4217   NODE_NAME_CASE(ELSE)
4218   NODE_NAME_CASE(LOOP)
4219   NODE_NAME_CASE(CALL)
4220   NODE_NAME_CASE(TC_RETURN)
4221   NODE_NAME_CASE(TRAP)
4222   NODE_NAME_CASE(RET_FLAG)
4223   NODE_NAME_CASE(RETURN_TO_EPILOG)
4224   NODE_NAME_CASE(ENDPGM)
4225   NODE_NAME_CASE(DWORDADDR)
4226   NODE_NAME_CASE(FRACT)
4227   NODE_NAME_CASE(SETCC)
4228   NODE_NAME_CASE(SETREG)
4229   NODE_NAME_CASE(DENORM_MODE)
4230   NODE_NAME_CASE(FMA_W_CHAIN)
4231   NODE_NAME_CASE(FMUL_W_CHAIN)
4232   NODE_NAME_CASE(CLAMP)
4233   NODE_NAME_CASE(COS_HW)
4234   NODE_NAME_CASE(SIN_HW)
4235   NODE_NAME_CASE(FMAX_LEGACY)
4236   NODE_NAME_CASE(FMIN_LEGACY)
4237   NODE_NAME_CASE(FMAX3)
4238   NODE_NAME_CASE(SMAX3)
4239   NODE_NAME_CASE(UMAX3)
4240   NODE_NAME_CASE(FMIN3)
4241   NODE_NAME_CASE(SMIN3)
4242   NODE_NAME_CASE(UMIN3)
4243   NODE_NAME_CASE(FMED3)
4244   NODE_NAME_CASE(SMED3)
4245   NODE_NAME_CASE(UMED3)
4246   NODE_NAME_CASE(FDOT2)
4247   NODE_NAME_CASE(URECIP)
4248   NODE_NAME_CASE(DIV_SCALE)
4249   NODE_NAME_CASE(DIV_FMAS)
4250   NODE_NAME_CASE(DIV_FIXUP)
4251   NODE_NAME_CASE(FMAD_FTZ)
4252   NODE_NAME_CASE(TRIG_PREOP)
4253   NODE_NAME_CASE(RCP)
4254   NODE_NAME_CASE(RSQ)
4255   NODE_NAME_CASE(RCP_LEGACY)
4256   NODE_NAME_CASE(RSQ_LEGACY)
4257   NODE_NAME_CASE(RCP_IFLAG)
4258   NODE_NAME_CASE(FMUL_LEGACY)
4259   NODE_NAME_CASE(RSQ_CLAMP)
4260   NODE_NAME_CASE(LDEXP)
4261   NODE_NAME_CASE(FP_CLASS)
4262   NODE_NAME_CASE(DOT4)
4263   NODE_NAME_CASE(CARRY)
4264   NODE_NAME_CASE(BORROW)
4265   NODE_NAME_CASE(BFE_U32)
4266   NODE_NAME_CASE(BFE_I32)
4267   NODE_NAME_CASE(BFI)
4268   NODE_NAME_CASE(BFM)
4269   NODE_NAME_CASE(FFBH_U32)
4270   NODE_NAME_CASE(FFBH_I32)
4271   NODE_NAME_CASE(FFBL_B32)
4272   NODE_NAME_CASE(MUL_U24)
4273   NODE_NAME_CASE(MUL_I24)
4274   NODE_NAME_CASE(MULHI_U24)
4275   NODE_NAME_CASE(MULHI_I24)
4276   NODE_NAME_CASE(MUL_LOHI_U24)
4277   NODE_NAME_CASE(MUL_LOHI_I24)
4278   NODE_NAME_CASE(MAD_U24)
4279   NODE_NAME_CASE(MAD_I24)
4280   NODE_NAME_CASE(MAD_I64_I32)
4281   NODE_NAME_CASE(MAD_U64_U32)
4282   NODE_NAME_CASE(PERM)
4283   NODE_NAME_CASE(TEXTURE_FETCH)
4284   NODE_NAME_CASE(EXPORT)
4285   NODE_NAME_CASE(EXPORT_DONE)
4286   NODE_NAME_CASE(R600_EXPORT)
4287   NODE_NAME_CASE(CONST_ADDRESS)
4288   NODE_NAME_CASE(REGISTER_LOAD)
4289   NODE_NAME_CASE(REGISTER_STORE)
4290   NODE_NAME_CASE(SAMPLE)
4291   NODE_NAME_CASE(SAMPLEB)
4292   NODE_NAME_CASE(SAMPLED)
4293   NODE_NAME_CASE(SAMPLEL)
4294   NODE_NAME_CASE(CVT_F32_UBYTE0)
4295   NODE_NAME_CASE(CVT_F32_UBYTE1)
4296   NODE_NAME_CASE(CVT_F32_UBYTE2)
4297   NODE_NAME_CASE(CVT_F32_UBYTE3)
4298   NODE_NAME_CASE(CVT_PKRTZ_F16_F32)
4299   NODE_NAME_CASE(CVT_PKNORM_I16_F32)
4300   NODE_NAME_CASE(CVT_PKNORM_U16_F32)
4301   NODE_NAME_CASE(CVT_PK_I16_I32)
4302   NODE_NAME_CASE(CVT_PK_U16_U32)
4303   NODE_NAME_CASE(FP_TO_FP16)
4304   NODE_NAME_CASE(FP16_ZEXT)
4305   NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
4306   NODE_NAME_CASE(CONST_DATA_PTR)
4307   NODE_NAME_CASE(PC_ADD_REL_OFFSET)
4308   NODE_NAME_CASE(LDS)
4309   NODE_NAME_CASE(KILL)
4310   NODE_NAME_CASE(DUMMY_CHAIN)
4311   case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
4312   NODE_NAME_CASE(INIT_EXEC)
4313   NODE_NAME_CASE(INIT_EXEC_FROM_INPUT)
4314   NODE_NAME_CASE(SENDMSG)
4315   NODE_NAME_CASE(SENDMSGHALT)
4316   NODE_NAME_CASE(INTERP_MOV)
4317   NODE_NAME_CASE(INTERP_P1)
4318   NODE_NAME_CASE(INTERP_P2)
4319   NODE_NAME_CASE(INTERP_P1LL_F16)
4320   NODE_NAME_CASE(INTERP_P1LV_F16)
4321   NODE_NAME_CASE(INTERP_P2_F16)
4322   NODE_NAME_CASE(LOAD_D16_HI)
4323   NODE_NAME_CASE(LOAD_D16_LO)
4324   NODE_NAME_CASE(LOAD_D16_HI_I8)
4325   NODE_NAME_CASE(LOAD_D16_HI_U8)
4326   NODE_NAME_CASE(LOAD_D16_LO_I8)
4327   NODE_NAME_CASE(LOAD_D16_LO_U8)
4328   NODE_NAME_CASE(STORE_MSKOR)
4329   NODE_NAME_CASE(LOAD_CONSTANT)
4330   NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
4331   NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16)
4332   NODE_NAME_CASE(TBUFFER_LOAD_FORMAT)
4333   NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16)
4334   NODE_NAME_CASE(DS_ORDERED_COUNT)
4335   NODE_NAME_CASE(ATOMIC_CMP_SWAP)
4336   NODE_NAME_CASE(ATOMIC_INC)
4337   NODE_NAME_CASE(ATOMIC_DEC)
4338   NODE_NAME_CASE(ATOMIC_LOAD_FMIN)
4339   NODE_NAME_CASE(ATOMIC_LOAD_FMAX)
4340   NODE_NAME_CASE(BUFFER_LOAD)
4341   NODE_NAME_CASE(BUFFER_LOAD_UBYTE)
4342   NODE_NAME_CASE(BUFFER_LOAD_USHORT)
4343   NODE_NAME_CASE(BUFFER_LOAD_BYTE)
4344   NODE_NAME_CASE(BUFFER_LOAD_SHORT)
4345   NODE_NAME_CASE(BUFFER_LOAD_FORMAT)
4346   NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16)
4347   NODE_NAME_CASE(SBUFFER_LOAD)
4348   NODE_NAME_CASE(BUFFER_STORE)
4349   NODE_NAME_CASE(BUFFER_STORE_BYTE)
4350   NODE_NAME_CASE(BUFFER_STORE_SHORT)
4351   NODE_NAME_CASE(BUFFER_STORE_FORMAT)
4352   NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16)
4353   NODE_NAME_CASE(BUFFER_ATOMIC_SWAP)
4354   NODE_NAME_CASE(BUFFER_ATOMIC_ADD)
4355   NODE_NAME_CASE(BUFFER_ATOMIC_SUB)
4356   NODE_NAME_CASE(BUFFER_ATOMIC_SMIN)
4357   NODE_NAME_CASE(BUFFER_ATOMIC_UMIN)
4358   NODE_NAME_CASE(BUFFER_ATOMIC_SMAX)
4359   NODE_NAME_CASE(BUFFER_ATOMIC_UMAX)
4360   NODE_NAME_CASE(BUFFER_ATOMIC_AND)
4361   NODE_NAME_CASE(BUFFER_ATOMIC_OR)
4362   NODE_NAME_CASE(BUFFER_ATOMIC_XOR)
4363   NODE_NAME_CASE(BUFFER_ATOMIC_INC)
4364   NODE_NAME_CASE(BUFFER_ATOMIC_DEC)
4365   NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP)
4366   NODE_NAME_CASE(BUFFER_ATOMIC_FADD)
4367   NODE_NAME_CASE(BUFFER_ATOMIC_PK_FADD)
4368   NODE_NAME_CASE(ATOMIC_FADD)
4369   NODE_NAME_CASE(ATOMIC_PK_FADD)
4370 
4371   case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
4372   }
4373   return nullptr;
4374 }
4375 
4376 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand,
4377                                               SelectionDAG &DAG, int Enabled,
4378                                               int &RefinementSteps,
4379                                               bool &UseOneConstNR,
4380                                               bool Reciprocal) const {
4381   EVT VT = Operand.getValueType();
4382 
4383   if (VT == MVT::f32) {
4384     RefinementSteps = 0;
4385     return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
4386   }
4387 
4388   // TODO: There is also f64 rsq instruction, but the documentation is less
4389   // clear on its precision.
4390 
4391   return SDValue();
4392 }
4393 
4394 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
4395                                                SelectionDAG &DAG, int Enabled,
4396                                                int &RefinementSteps) const {
4397   EVT VT = Operand.getValueType();
4398 
4399   if (VT == MVT::f32) {
4400     // Reciprocal, < 1 ulp error.
4401     //
4402     // This reciprocal approximation converges to < 0.5 ulp error with one
4403     // newton rhapson performed with two fused multiple adds (FMAs).
4404 
4405     RefinementSteps = 0;
4406     return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
4407   }
4408 
4409   // TODO: There is also f64 rcp instruction, but the documentation is less
4410   // clear on its precision.
4411 
4412   return SDValue();
4413 }
4414 
4415 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
4416     const SDValue Op, KnownBits &Known,
4417     const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
4418 
4419   Known.resetAll(); // Don't know anything.
4420 
4421   unsigned Opc = Op.getOpcode();
4422 
4423   switch (Opc) {
4424   default:
4425     break;
4426   case AMDGPUISD::CARRY:
4427   case AMDGPUISD::BORROW: {
4428     Known.Zero = APInt::getHighBitsSet(32, 31);
4429     break;
4430   }
4431 
4432   case AMDGPUISD::BFE_I32:
4433   case AMDGPUISD::BFE_U32: {
4434     ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4435     if (!CWidth)
4436       return;
4437 
4438     uint32_t Width = CWidth->getZExtValue() & 0x1f;
4439 
4440     if (Opc == AMDGPUISD::BFE_U32)
4441       Known.Zero = APInt::getHighBitsSet(32, 32 - Width);
4442 
4443     break;
4444   }
4445   case AMDGPUISD::FP_TO_FP16:
4446   case AMDGPUISD::FP16_ZEXT: {
4447     unsigned BitWidth = Known.getBitWidth();
4448 
4449     // High bits are zero.
4450     Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
4451     break;
4452   }
4453   case AMDGPUISD::MUL_U24:
4454   case AMDGPUISD::MUL_I24: {
4455     KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4456     KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4457     unsigned TrailZ = LHSKnown.countMinTrailingZeros() +
4458                       RHSKnown.countMinTrailingZeros();
4459     Known.Zero.setLowBits(std::min(TrailZ, 32u));
4460 
4461     // Truncate to 24 bits.
4462     LHSKnown = LHSKnown.trunc(24);
4463     RHSKnown = RHSKnown.trunc(24);
4464 
4465     bool Negative = false;
4466     if (Opc == AMDGPUISD::MUL_I24) {
4467       unsigned LHSValBits = 24 - LHSKnown.countMinSignBits();
4468       unsigned RHSValBits = 24 - RHSKnown.countMinSignBits();
4469       unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
4470       if (MaxValBits >= 32)
4471         break;
4472       bool LHSNegative = LHSKnown.isNegative();
4473       bool LHSPositive = LHSKnown.isNonNegative();
4474       bool RHSNegative = RHSKnown.isNegative();
4475       bool RHSPositive = RHSKnown.isNonNegative();
4476       if ((!LHSNegative && !LHSPositive) || (!RHSNegative && !RHSPositive))
4477         break;
4478       Negative = (LHSNegative && RHSPositive) || (LHSPositive && RHSNegative);
4479       if (Negative)
4480         Known.One.setHighBits(32 - MaxValBits);
4481       else
4482         Known.Zero.setHighBits(32 - MaxValBits);
4483     } else {
4484       unsigned LHSValBits = 24 - LHSKnown.countMinLeadingZeros();
4485       unsigned RHSValBits = 24 - RHSKnown.countMinLeadingZeros();
4486       unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
4487       if (MaxValBits >= 32)
4488         break;
4489       Known.Zero.setHighBits(32 - MaxValBits);
4490     }
4491     break;
4492   }
4493   case AMDGPUISD::PERM: {
4494     ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4495     if (!CMask)
4496       return;
4497 
4498     KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4499     KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4500     unsigned Sel = CMask->getZExtValue();
4501 
4502     for (unsigned I = 0; I < 32; I += 8) {
4503       unsigned SelBits = Sel & 0xff;
4504       if (SelBits < 4) {
4505         SelBits *= 8;
4506         Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4507         Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4508       } else if (SelBits < 7) {
4509         SelBits = (SelBits & 3) * 8;
4510         Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4511         Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4512       } else if (SelBits == 0x0c) {
4513         Known.Zero |= 0xFFull << I;
4514       } else if (SelBits > 0x0c) {
4515         Known.One |= 0xFFull << I;
4516       }
4517       Sel >>= 8;
4518     }
4519     break;
4520   }
4521   case AMDGPUISD::BUFFER_LOAD_UBYTE:  {
4522     Known.Zero.setHighBits(24);
4523     break;
4524   }
4525   case AMDGPUISD::BUFFER_LOAD_USHORT: {
4526     Known.Zero.setHighBits(16);
4527     break;
4528   }
4529   case AMDGPUISD::LDS: {
4530     auto GA = cast<GlobalAddressSDNode>(Op.getOperand(0).getNode());
4531     unsigned Align = GA->getGlobal()->getAlignment();
4532 
4533     Known.Zero.setHighBits(16);
4534     if (Align)
4535       Known.Zero.setLowBits(Log2_32(Align));
4536     break;
4537   }
4538   case ISD::INTRINSIC_WO_CHAIN: {
4539     unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4540     switch (IID) {
4541     case Intrinsic::amdgcn_mbcnt_lo:
4542     case Intrinsic::amdgcn_mbcnt_hi: {
4543       const GCNSubtarget &ST =
4544           DAG.getMachineFunction().getSubtarget<GCNSubtarget>();
4545       // These return at most the wavefront size - 1.
4546       unsigned Size = Op.getValueType().getSizeInBits();
4547       Known.Zero.setHighBits(Size - ST.getWavefrontSizeLog2());
4548       break;
4549     }
4550     default:
4551       break;
4552     }
4553   }
4554   }
4555 }
4556 
4557 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
4558     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4559     unsigned Depth) const {
4560   switch (Op.getOpcode()) {
4561   case AMDGPUISD::BFE_I32: {
4562     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4563     if (!Width)
4564       return 1;
4565 
4566     unsigned SignBits = 32 - Width->getZExtValue() + 1;
4567     if (!isNullConstant(Op.getOperand(1)))
4568       return SignBits;
4569 
4570     // TODO: Could probably figure something out with non-0 offsets.
4571     unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4572     return std::max(SignBits, Op0SignBits);
4573   }
4574 
4575   case AMDGPUISD::BFE_U32: {
4576     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4577     return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
4578   }
4579 
4580   case AMDGPUISD::CARRY:
4581   case AMDGPUISD::BORROW:
4582     return 31;
4583   case AMDGPUISD::BUFFER_LOAD_BYTE:
4584     return 25;
4585   case AMDGPUISD::BUFFER_LOAD_SHORT:
4586     return 17;
4587   case AMDGPUISD::BUFFER_LOAD_UBYTE:
4588     return 24;
4589   case AMDGPUISD::BUFFER_LOAD_USHORT:
4590     return 16;
4591   case AMDGPUISD::FP_TO_FP16:
4592   case AMDGPUISD::FP16_ZEXT:
4593     return 16;
4594   default:
4595     return 1;
4596   }
4597 }
4598 
4599 bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
4600                                                         const SelectionDAG &DAG,
4601                                                         bool SNaN,
4602                                                         unsigned Depth) const {
4603   unsigned Opcode = Op.getOpcode();
4604   switch (Opcode) {
4605   case AMDGPUISD::FMIN_LEGACY:
4606   case AMDGPUISD::FMAX_LEGACY: {
4607     if (SNaN)
4608       return true;
4609 
4610     // TODO: Can check no nans on one of the operands for each one, but which
4611     // one?
4612     return false;
4613   }
4614   case AMDGPUISD::FMUL_LEGACY:
4615   case AMDGPUISD::CVT_PKRTZ_F16_F32: {
4616     if (SNaN)
4617       return true;
4618     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4619            DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4620   }
4621   case AMDGPUISD::FMED3:
4622   case AMDGPUISD::FMIN3:
4623   case AMDGPUISD::FMAX3:
4624   case AMDGPUISD::FMAD_FTZ: {
4625     if (SNaN)
4626       return true;
4627     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4628            DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4629            DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4630   }
4631   case AMDGPUISD::CVT_F32_UBYTE0:
4632   case AMDGPUISD::CVT_F32_UBYTE1:
4633   case AMDGPUISD::CVT_F32_UBYTE2:
4634   case AMDGPUISD::CVT_F32_UBYTE3:
4635     return true;
4636 
4637   case AMDGPUISD::RCP:
4638   case AMDGPUISD::RSQ:
4639   case AMDGPUISD::RCP_LEGACY:
4640   case AMDGPUISD::RSQ_LEGACY:
4641   case AMDGPUISD::RSQ_CLAMP: {
4642     if (SNaN)
4643       return true;
4644 
4645     // TODO: Need is known positive check.
4646     return false;
4647   }
4648   case AMDGPUISD::LDEXP:
4649   case AMDGPUISD::FRACT: {
4650     if (SNaN)
4651       return true;
4652     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4653   }
4654   case AMDGPUISD::DIV_SCALE:
4655   case AMDGPUISD::DIV_FMAS:
4656   case AMDGPUISD::DIV_FIXUP:
4657   case AMDGPUISD::TRIG_PREOP:
4658     // TODO: Refine on operands.
4659     return SNaN;
4660   case AMDGPUISD::SIN_HW:
4661   case AMDGPUISD::COS_HW: {
4662     // TODO: Need check for infinity
4663     return SNaN;
4664   }
4665   case ISD::INTRINSIC_WO_CHAIN: {
4666     unsigned IntrinsicID
4667       = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4668     // TODO: Handle more intrinsics
4669     switch (IntrinsicID) {
4670     case Intrinsic::amdgcn_cubeid:
4671       return true;
4672 
4673     case Intrinsic::amdgcn_frexp_mant: {
4674       if (SNaN)
4675         return true;
4676       return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4677     }
4678     case Intrinsic::amdgcn_cvt_pkrtz: {
4679       if (SNaN)
4680         return true;
4681       return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4682              DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4683     }
4684     case Intrinsic::amdgcn_fdot2:
4685       // TODO: Refine on operand
4686       return SNaN;
4687     default:
4688       return false;
4689     }
4690   }
4691   default:
4692     return false;
4693   }
4694 }
4695 
4696 TargetLowering::AtomicExpansionKind
4697 AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
4698   switch (RMW->getOperation()) {
4699   case AtomicRMWInst::Nand:
4700   case AtomicRMWInst::FAdd:
4701   case AtomicRMWInst::FSub:
4702     return AtomicExpansionKind::CmpXChg;
4703   default:
4704     return AtomicExpansionKind::None;
4705   }
4706 }
4707