1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Custom DAG lowering for SI
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SIISelLowering.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPUTargetMachine.h"
18 #include "SIMachineFunctionInfo.h"
19 #include "SIRegisterInfo.h"
20 #include "llvm/ADT/FloatingPointMode.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
23 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
24 #include "llvm/BinaryFormat/ELF.h"
25 #include "llvm/CodeGen/Analysis.h"
26 #include "llvm/CodeGen/FunctionLoweringInfo.h"
27 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
28 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineLoopInfo.h"
32 #include "llvm/IR/DiagnosticInfo.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/IntrinsicsAMDGPU.h"
35 #include "llvm/IR/IntrinsicsR600.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/KnownBits.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "si-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 static cl::opt<bool> DisableLoopAlignment(
46   "amdgpu-disable-loop-alignment",
47   cl::desc("Do not align and prefetch loops"),
48   cl::init(false));
49 
50 static cl::opt<bool> UseDivergentRegisterIndexing(
51   "amdgpu-use-divergent-register-indexing",
52   cl::Hidden,
53   cl::desc("Use indirect register addressing for divergent indexes"),
54   cl::init(false));
55 
56 static bool hasFP32Denormals(const MachineFunction &MF) {
57   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
58   return Info->getMode().allFP32Denormals();
59 }
60 
61 static bool hasFP64FP16Denormals(const MachineFunction &MF) {
62   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
63   return Info->getMode().allFP64FP16Denormals();
64 }
65 
66 static unsigned findFirstFreeSGPR(CCState &CCInfo) {
67   unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
68   for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
69     if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
70       return AMDGPU::SGPR0 + Reg;
71     }
72   }
73   llvm_unreachable("Cannot allocate sgpr");
74 }
75 
76 SITargetLowering::SITargetLowering(const TargetMachine &TM,
77                                    const GCNSubtarget &STI)
78     : AMDGPUTargetLowering(TM, STI),
79       Subtarget(&STI) {
80   addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
81   addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
82 
83   addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass);
84   addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
85 
86   addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
87 
88   const SIRegisterInfo *TRI = STI.getRegisterInfo();
89   const TargetRegisterClass *V64RegClass = TRI->getVGPR64Class();
90 
91   addRegisterClass(MVT::f64, V64RegClass);
92   addRegisterClass(MVT::v2f32, V64RegClass);
93 
94   addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
95   addRegisterClass(MVT::v3f32, TRI->getVGPRClassForBitWidth(96));
96 
97   addRegisterClass(MVT::v2i64, &AMDGPU::SGPR_128RegClass);
98   addRegisterClass(MVT::v2f64, &AMDGPU::SGPR_128RegClass);
99 
100   addRegisterClass(MVT::v4i32, &AMDGPU::SGPR_128RegClass);
101   addRegisterClass(MVT::v4f32, TRI->getVGPRClassForBitWidth(128));
102 
103   addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
104   addRegisterClass(MVT::v5f32, TRI->getVGPRClassForBitWidth(160));
105 
106   addRegisterClass(MVT::v6i32, &AMDGPU::SGPR_192RegClass);
107   addRegisterClass(MVT::v6f32, TRI->getVGPRClassForBitWidth(192));
108 
109   addRegisterClass(MVT::v3i64, &AMDGPU::SGPR_192RegClass);
110   addRegisterClass(MVT::v3f64, TRI->getVGPRClassForBitWidth(192));
111 
112   addRegisterClass(MVT::v7i32, &AMDGPU::SGPR_224RegClass);
113   addRegisterClass(MVT::v7f32, TRI->getVGPRClassForBitWidth(224));
114 
115   addRegisterClass(MVT::v8i32, &AMDGPU::SGPR_256RegClass);
116   addRegisterClass(MVT::v8f32, TRI->getVGPRClassForBitWidth(256));
117 
118   addRegisterClass(MVT::v4i64, &AMDGPU::SGPR_256RegClass);
119   addRegisterClass(MVT::v4f64, TRI->getVGPRClassForBitWidth(256));
120 
121   addRegisterClass(MVT::v16i32, &AMDGPU::SGPR_512RegClass);
122   addRegisterClass(MVT::v16f32, TRI->getVGPRClassForBitWidth(512));
123 
124   addRegisterClass(MVT::v8i64, &AMDGPU::SGPR_512RegClass);
125   addRegisterClass(MVT::v8f64, TRI->getVGPRClassForBitWidth(512));
126 
127   addRegisterClass(MVT::v16i64, &AMDGPU::SGPR_1024RegClass);
128   addRegisterClass(MVT::v16f64, TRI->getVGPRClassForBitWidth(1024));
129 
130   if (Subtarget->has16BitInsts()) {
131     addRegisterClass(MVT::i16, &AMDGPU::SReg_32RegClass);
132     addRegisterClass(MVT::f16, &AMDGPU::SReg_32RegClass);
133 
134     // Unless there are also VOP3P operations, not operations are really legal.
135     addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32RegClass);
136     addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32RegClass);
137     addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
138     addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
139     addRegisterClass(MVT::v8i16, &AMDGPU::SGPR_128RegClass);
140     addRegisterClass(MVT::v8f16, &AMDGPU::SGPR_128RegClass);
141   }
142 
143   addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass);
144   addRegisterClass(MVT::v32f32, TRI->getVGPRClassForBitWidth(1024));
145 
146   computeRegisterProperties(Subtarget->getRegisterInfo());
147 
148   // The boolean content concept here is too inflexible. Compares only ever
149   // really produce a 1-bit result. Any copy/extend from these will turn into a
150   // select, and zext/1 or sext/-1 are equally cheap. Arbitrarily choose 0/1, as
151   // it's what most targets use.
152   setBooleanContents(ZeroOrOneBooleanContent);
153   setBooleanVectorContents(ZeroOrOneBooleanContent);
154 
155   // We need to custom lower vector stores from local memory
156   setOperationAction(ISD::LOAD,
157                      {MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32,
158                       MVT::v6i32, MVT::v7i32, MVT::v8i32, MVT::v16i32, MVT::i1,
159                       MVT::v32i32},
160                      Custom);
161 
162   setOperationAction(ISD::STORE,
163                      {MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32,
164                       MVT::v6i32, MVT::v7i32, MVT::v8i32, MVT::v16i32, MVT::i1,
165                       MVT::v32i32},
166                      Custom);
167 
168   setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
169   setTruncStoreAction(MVT::v3i32, MVT::v3i16, Expand);
170   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
171   setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
172   setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
173   setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
174   setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
175   setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
176   setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
177   setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
178   setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
179   setTruncStoreAction(MVT::v2i16, MVT::v2i8, Expand);
180   setTruncStoreAction(MVT::v4i16, MVT::v4i8, Expand);
181   setTruncStoreAction(MVT::v8i16, MVT::v8i8, Expand);
182   setTruncStoreAction(MVT::v16i16, MVT::v16i8, Expand);
183   setTruncStoreAction(MVT::v32i16, MVT::v32i8, Expand);
184 
185   setTruncStoreAction(MVT::v3i64, MVT::v3i16, Expand);
186   setTruncStoreAction(MVT::v3i64, MVT::v3i32, Expand);
187   setTruncStoreAction(MVT::v4i64, MVT::v4i8, Expand);
188   setTruncStoreAction(MVT::v8i64, MVT::v8i8, Expand);
189   setTruncStoreAction(MVT::v8i64, MVT::v8i16, Expand);
190   setTruncStoreAction(MVT::v8i64, MVT::v8i32, Expand);
191   setTruncStoreAction(MVT::v16i64, MVT::v16i32, Expand);
192 
193   setOperationAction(ISD::GlobalAddress, {MVT::i32, MVT::i64}, Custom);
194 
195   setOperationAction(ISD::SELECT, MVT::i1, Promote);
196   setOperationAction(ISD::SELECT, MVT::i64, Custom);
197   setOperationAction(ISD::SELECT, MVT::f64, Promote);
198   AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
199 
200   setOperationAction(ISD::SELECT_CC,
201                      {MVT::f32, MVT::i32, MVT::i64, MVT::f64, MVT::i1}, Expand);
202 
203   setOperationAction(ISD::SETCC, MVT::i1, Promote);
204   setOperationAction(ISD::SETCC, {MVT::v2i1, MVT::v4i1}, Expand);
205   AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
206 
207   setOperationAction(ISD::TRUNCATE,
208                      {MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32,
209                       MVT::v6i32, MVT::v7i32, MVT::v8i32, MVT::v16i32},
210                      Expand);
211   setOperationAction(ISD::FP_ROUND,
212                      {MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32,
213                       MVT::v6f32, MVT::v7f32, MVT::v8f32, MVT::v16f32},
214                      Expand);
215 
216   setOperationAction(ISD::SIGN_EXTEND_INREG,
217                      {MVT::v2i1, MVT::v4i1, MVT::v2i8, MVT::v4i8, MVT::v2i16,
218                       MVT::v3i16, MVT::v4i16, MVT::Other},
219                      Custom);
220 
221   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
222   setOperationAction(ISD::BR_CC,
223                      {MVT::i1, MVT::i32, MVT::i64, MVT::f32, MVT::f64}, Expand);
224 
225   setOperationAction({ISD::UADDO, ISD::USUBO}, MVT::i32, Legal);
226 
227   setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY}, MVT::i32, Legal);
228 
229   setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, MVT::i64,
230                      Expand);
231 
232 #if 0
233   setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY}, MVT::i64, Legal);
234 #endif
235 
236   // We only support LOAD/STORE and vector manipulation ops for vectors
237   // with > 4 elements.
238   for (MVT VT : { MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
239                   MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16,
240                   MVT::v3i64, MVT::v3f64, MVT::v6i32, MVT::v6f32,
241                   MVT::v4i64, MVT::v4f64, MVT::v8i64, MVT::v8f64,
242                   MVT::v8i16, MVT::v8f16, MVT::v16i64, MVT::v16f64,
243                   MVT::v32i32, MVT::v32f32 }) {
244     for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
245       switch (Op) {
246       case ISD::LOAD:
247       case ISD::STORE:
248       case ISD::BUILD_VECTOR:
249       case ISD::BITCAST:
250       case ISD::EXTRACT_VECTOR_ELT:
251       case ISD::INSERT_VECTOR_ELT:
252       case ISD::EXTRACT_SUBVECTOR:
253       case ISD::SCALAR_TO_VECTOR:
254         break;
255       case ISD::INSERT_SUBVECTOR:
256       case ISD::CONCAT_VECTORS:
257         setOperationAction(Op, VT, Custom);
258         break;
259       default:
260         setOperationAction(Op, VT, Expand);
261         break;
262       }
263     }
264   }
265 
266   setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
267 
268   // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
269   // is expanded to avoid having two separate loops in case the index is a VGPR.
270 
271   // Most operations are naturally 32-bit vector operations. We only support
272   // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
273   for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
274     setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
275     AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
276 
277     setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
278     AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
279 
280     setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
281     AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
282 
283     setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
284     AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
285   }
286 
287   for (MVT Vec64 : { MVT::v3i64, MVT::v3f64 }) {
288     setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
289     AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v6i32);
290 
291     setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
292     AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v6i32);
293 
294     setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
295     AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v6i32);
296 
297     setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
298     AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v6i32);
299   }
300 
301   for (MVT Vec64 : { MVT::v4i64, MVT::v4f64 }) {
302     setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
303     AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v8i32);
304 
305     setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
306     AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v8i32);
307 
308     setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
309     AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v8i32);
310 
311     setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
312     AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v8i32);
313   }
314 
315   for (MVT Vec64 : { MVT::v8i64, MVT::v8f64 }) {
316     setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
317     AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v16i32);
318 
319     setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
320     AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v16i32);
321 
322     setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
323     AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v16i32);
324 
325     setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
326     AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v16i32);
327   }
328 
329   for (MVT Vec64 : { MVT::v16i64, MVT::v16f64 }) {
330     setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
331     AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v32i32);
332 
333     setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
334     AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v32i32);
335 
336     setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
337     AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v32i32);
338 
339     setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
340     AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v32i32);
341   }
342 
343   setOperationAction(ISD::VECTOR_SHUFFLE,
344                      {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32},
345                      Expand);
346 
347   setOperationAction(ISD::BUILD_VECTOR, {MVT::v4f16, MVT::v4i16}, Custom);
348 
349   // Avoid stack access for these.
350   // TODO: Generalize to more vector types.
351   setOperationAction({ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT},
352                      {MVT::v2i16, MVT::v2f16, MVT::v2i8, MVT::v4i8, MVT::v8i8,
353                       MVT::v4i16, MVT::v4f16},
354                      Custom);
355 
356   // Deal with vec3 vector operations when widened to vec4.
357   setOperationAction(ISD::INSERT_SUBVECTOR,
358                      {MVT::v3i32, MVT::v3f32, MVT::v4i32, MVT::v4f32}, Custom);
359 
360   // Deal with vec5/6/7 vector operations when widened to vec8.
361   setOperationAction(ISD::INSERT_SUBVECTOR,
362                      {MVT::v5i32, MVT::v5f32, MVT::v6i32, MVT::v6f32,
363                       MVT::v7i32, MVT::v7f32, MVT::v8i32, MVT::v8f32},
364                      Custom);
365 
366   // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
367   // and output demarshalling
368   setOperationAction(ISD::ATOMIC_CMP_SWAP, {MVT::i32, MVT::i64}, Custom);
369 
370   // We can't return success/failure, only the old value,
371   // let LLVM add the comparison
372   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, {MVT::i32, MVT::i64},
373                      Expand);
374 
375   if (Subtarget->hasFlatAddressSpace())
376     setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom);
377 
378   setOperationAction(ISD::BITREVERSE, {MVT::i32, MVT::i64}, Legal);
379 
380   // FIXME: This should be narrowed to i32, but that only happens if i64 is
381   // illegal.
382   // FIXME: Should lower sub-i32 bswaps to bit-ops without v_perm_b32.
383   setOperationAction(ISD::BSWAP, {MVT::i64, MVT::i32}, Legal);
384 
385   // On SI this is s_memtime and s_memrealtime on VI.
386   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
387   setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Custom);
388 
389   if (Subtarget->has16BitInsts()) {
390     setOperationAction({ISD::FPOW, ISD::FPOWI}, MVT::f16, Promote);
391     setOperationAction({ISD::FLOG, ISD::FEXP, ISD::FLOG10}, MVT::f16, Custom);
392   }
393 
394   if (Subtarget->hasMadMacF32Insts())
395     setOperationAction(ISD::FMAD, MVT::f32, Legal);
396 
397   if (!Subtarget->hasBFI())
398     // fcopysign can be done in a single instruction with BFI.
399     setOperationAction(ISD::FCOPYSIGN, {MVT::f32, MVT::f64}, Expand);
400 
401   if (!Subtarget->hasBCNT(32))
402     setOperationAction(ISD::CTPOP, MVT::i32, Expand);
403 
404   if (!Subtarget->hasBCNT(64))
405     setOperationAction(ISD::CTPOP, MVT::i64, Expand);
406 
407   if (Subtarget->hasFFBH())
408     setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, MVT::i32, Custom);
409 
410   if (Subtarget->hasFFBL())
411     setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, MVT::i32, Custom);
412 
413   // We only really have 32-bit BFE instructions (and 16-bit on VI).
414   //
415   // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
416   // effort to match them now. We want this to be false for i64 cases when the
417   // extraction isn't restricted to the upper or lower half. Ideally we would
418   // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
419   // span the midpoint are probably relatively rare, so don't worry about them
420   // for now.
421   if (Subtarget->hasBFE())
422     setHasExtractBitsInsn(true);
423 
424   // Clamp modifier on add/sub
425   if (Subtarget->hasIntClamp())
426     setOperationAction({ISD::UADDSAT, ISD::USUBSAT}, MVT::i32, Legal);
427 
428   if (Subtarget->hasAddNoCarry())
429     setOperationAction({ISD::SADDSAT, ISD::SSUBSAT}, {MVT::i16, MVT::i32},
430                        Legal);
431 
432   setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, {MVT::f32, MVT::f64},
433                      Custom);
434 
435   // These are really only legal for ieee_mode functions. We should be avoiding
436   // them for functions that don't have ieee_mode enabled, so just say they are
437   // legal.
438   setOperationAction({ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE},
439                      {MVT::f32, MVT::f64}, Legal);
440 
441   if (Subtarget->haveRoundOpsF64())
442     setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FRINT}, MVT::f64, Legal);
443   else
444     setOperationAction({ISD::FCEIL, ISD::FTRUNC, ISD::FRINT, ISD::FFLOOR},
445                        MVT::f64, Custom);
446 
447   setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
448 
449   setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FDIV}, MVT::f32, Custom);
450   setOperationAction(ISD::FDIV, MVT::f64, Custom);
451 
452   if (Subtarget->has16BitInsts()) {
453     setOperationAction({ISD::Constant, ISD::SMIN, ISD::SMAX, ISD::UMIN,
454                         ISD::UMAX, ISD::UADDSAT, ISD::USUBSAT},
455                        MVT::i16, Legal);
456 
457     AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
458 
459     setOperationAction({ISD::ROTR, ISD::ROTL, ISD::SELECT_CC, ISD::BR_CC},
460                        MVT::i16, Expand);
461 
462     setOperationAction({ISD::SIGN_EXTEND, ISD::SDIV, ISD::UDIV, ISD::SREM,
463                         ISD::UREM, ISD::BITREVERSE, ISD::CTTZ,
464                         ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF,
465                         ISD::CTPOP},
466                        MVT::i16, Promote);
467 
468     setOperationAction(ISD::LOAD, MVT::i16, Custom);
469 
470     setTruncStoreAction(MVT::i64, MVT::i16, Expand);
471 
472     setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
473     AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
474     setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
475     AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
476 
477     setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT}, MVT::i16, Custom);
478 
479     // F16 - Constant Actions.
480     setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
481 
482     // F16 - Load/Store Actions.
483     setOperationAction(ISD::LOAD, MVT::f16, Promote);
484     AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
485     setOperationAction(ISD::STORE, MVT::f16, Promote);
486     AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
487 
488     // F16 - VOP1 Actions.
489     setOperationAction(
490         {ISD::FP_ROUND, ISD::FCOS, ISD::FSIN, ISD::FROUND, ISD::FPTRUNC_ROUND},
491         MVT::f16, Custom);
492 
493     setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP}, MVT::i16, Custom);
494 
495     setOperationAction(
496         {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP},
497         MVT::f16, Promote);
498 
499     // F16 - VOP2 Actions.
500     setOperationAction({ISD::BR_CC, ISD::SELECT_CC}, MVT::f16, Expand);
501 
502     setOperationAction(ISD::FDIV, MVT::f16, Custom);
503 
504     // F16 - VOP3 Actions.
505     setOperationAction(ISD::FMA, MVT::f16, Legal);
506     if (STI.hasMadF16())
507       setOperationAction(ISD::FMAD, MVT::f16, Legal);
508 
509     for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16, MVT::v8i16,
510                    MVT::v8f16}) {
511       for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
512         switch (Op) {
513         case ISD::LOAD:
514         case ISD::STORE:
515         case ISD::BUILD_VECTOR:
516         case ISD::BITCAST:
517         case ISD::EXTRACT_VECTOR_ELT:
518         case ISD::INSERT_VECTOR_ELT:
519         case ISD::INSERT_SUBVECTOR:
520         case ISD::EXTRACT_SUBVECTOR:
521         case ISD::SCALAR_TO_VECTOR:
522           break;
523         case ISD::CONCAT_VECTORS:
524           setOperationAction(Op, VT, Custom);
525           break;
526         default:
527           setOperationAction(Op, VT, Expand);
528           break;
529         }
530       }
531     }
532 
533     // v_perm_b32 can handle either of these.
534     setOperationAction(ISD::BSWAP, {MVT::i16, MVT::v2i16}, Legal);
535     setOperationAction(ISD::BSWAP, MVT::v4i16, Custom);
536 
537     // XXX - Do these do anything? Vector constants turn into build_vector.
538     setOperationAction(ISD::Constant, {MVT::v2i16, MVT::v2f16}, Legal);
539 
540     setOperationAction(ISD::UNDEF, {MVT::v2i16, MVT::v2f16}, Legal);
541 
542     setOperationAction(ISD::STORE, MVT::v2i16, Promote);
543     AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
544     setOperationAction(ISD::STORE, MVT::v2f16, Promote);
545     AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
546 
547     setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
548     AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
549     setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
550     AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
551 
552     setOperationAction(ISD::AND, MVT::v2i16, Promote);
553     AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
554     setOperationAction(ISD::OR, MVT::v2i16, Promote);
555     AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
556     setOperationAction(ISD::XOR, MVT::v2i16, Promote);
557     AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
558 
559     setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
560     AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
561     setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
562     AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
563 
564     setOperationAction(ISD::STORE, MVT::v4i16, Promote);
565     AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
566     setOperationAction(ISD::STORE, MVT::v4f16, Promote);
567     AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
568 
569     setOperationAction(ISD::LOAD, MVT::v8i16, Promote);
570     AddPromotedToType(ISD::LOAD, MVT::v8i16, MVT::v4i32);
571     setOperationAction(ISD::LOAD, MVT::v8f16, Promote);
572     AddPromotedToType(ISD::LOAD, MVT::v8f16, MVT::v4i32);
573 
574     setOperationAction(ISD::STORE, MVT::v4i16, Promote);
575     AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
576     setOperationAction(ISD::STORE, MVT::v4f16, Promote);
577     AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
578 
579     setOperationAction(ISD::STORE, MVT::v8i16, Promote);
580     AddPromotedToType(ISD::STORE, MVT::v8i16, MVT::v4i32);
581     setOperationAction(ISD::STORE, MVT::v8f16, Promote);
582     AddPromotedToType(ISD::STORE, MVT::v8f16, MVT::v4i32);
583 
584     setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND},
585                        MVT::v2i32, Expand);
586     setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
587 
588     setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND},
589                        MVT::v4i32, Expand);
590 
591     setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND},
592                        MVT::v8i32, Expand);
593 
594     if (!Subtarget->hasVOP3PInsts())
595       setOperationAction(ISD::BUILD_VECTOR, {MVT::v2i16, MVT::v2f16}, Custom);
596 
597     setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
598     // This isn't really legal, but this avoids the legalizer unrolling it (and
599     // allows matching fneg (fabs x) patterns)
600     setOperationAction(ISD::FABS, MVT::v2f16, Legal);
601 
602     setOperationAction({ISD::FMAXNUM, ISD::FMINNUM}, MVT::f16, Custom);
603     setOperationAction({ISD::FMAXNUM_IEEE, ISD::FMINNUM_IEEE}, MVT::f16, Legal);
604 
605     setOperationAction({ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE},
606                        {MVT::v4f16, MVT::v8f16}, Custom);
607 
608     setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, {MVT::v4f16, MVT::v8f16},
609                        Expand);
610 
611     for (MVT Vec16 : { MVT::v8i16, MVT::v8f16 }) {
612       setOperationAction(
613           {ISD::BUILD_VECTOR, ISD::EXTRACT_VECTOR_ELT, ISD::SCALAR_TO_VECTOR},
614           Vec16, Custom);
615       setOperationAction(ISD::INSERT_VECTOR_ELT, Vec16, Expand);
616     }
617   }
618 
619   if (Subtarget->hasVOP3PInsts()) {
620     setOperationAction({ISD::ADD, ISD::SUB, ISD::MUL, ISD::SHL, ISD::SRL,
621                         ISD::SRA, ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX,
622                         ISD::UADDSAT, ISD::USUBSAT, ISD::SADDSAT, ISD::SSUBSAT},
623                        MVT::v2i16, Legal);
624 
625     setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA, ISD::FMINNUM_IEEE,
626                         ISD::FMAXNUM_IEEE, ISD::FCANONICALIZE},
627                        MVT::v2f16, Legal);
628 
629     setOperationAction(ISD::EXTRACT_VECTOR_ELT, {MVT::v2i16, MVT::v2f16},
630                        Custom);
631 
632     setOperationAction(ISD::VECTOR_SHUFFLE,
633                        {MVT::v4f16, MVT::v4i16, MVT::v8f16, MVT::v8i16},
634                        Custom);
635 
636     for (MVT VT : {MVT::v4i16, MVT::v8i16})
637       // Split vector operations.
638       setOperationAction({ISD::SHL, ISD::SRA, ISD::SRL, ISD::ADD, ISD::SUB,
639                           ISD::MUL, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX,
640                           ISD::UADDSAT, ISD::SADDSAT, ISD::USUBSAT,
641                           ISD::SSUBSAT},
642                          VT, Custom);
643 
644     for (MVT VT : {MVT::v4f16, MVT::v8f16})
645       // Split vector operations.
646       setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA, ISD::FCANONICALIZE},
647                          VT, Custom);
648 
649     setOperationAction({ISD::FMAXNUM, ISD::FMINNUM}, {MVT::v2f16, MVT::v4f16},
650                        Custom);
651 
652     setOperationAction(ISD::FEXP, MVT::v2f16, Custom);
653     setOperationAction(ISD::SELECT, {MVT::v4i16, MVT::v4f16}, Custom);
654 
655     if (Subtarget->hasPackedFP32Ops()) {
656       setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA, ISD::FNEG},
657                          MVT::v2f32, Legal);
658       setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA},
659                          {MVT::v4f32, MVT::v8f32, MVT::v16f32, MVT::v32f32},
660                          Custom);
661     }
662   }
663 
664   setOperationAction({ISD::FNEG, ISD::FABS}, MVT::v4f16, Custom);
665 
666   if (Subtarget->has16BitInsts()) {
667     setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
668     AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
669     setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
670     AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
671   } else {
672     // Legalization hack.
673     setOperationAction(ISD::SELECT, {MVT::v2i16, MVT::v2f16}, Custom);
674 
675     setOperationAction({ISD::FNEG, ISD::FABS}, MVT::v2f16, Custom);
676   }
677 
678   setOperationAction(ISD::SELECT,
679                      {MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8,
680                       MVT::v8i16, MVT::v8f16},
681                      Custom);
682 
683   setOperationAction({ISD::SMULO, ISD::UMULO}, MVT::i64, Custom);
684 
685   if (Subtarget->hasMad64_32())
686     setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i32, Custom);
687 
688   setOperationAction(ISD::INTRINSIC_WO_CHAIN,
689                      {MVT::Other, MVT::f32, MVT::v4f32, MVT::i16, MVT::f16,
690                       MVT::v2i16, MVT::v2f16},
691                      Custom);
692 
693   setOperationAction(ISD::INTRINSIC_W_CHAIN,
694                      {MVT::v2f16, MVT::v2i16, MVT::v3f16, MVT::v3i16,
695                       MVT::v4f16, MVT::v4i16, MVT::v8f16, MVT::Other, MVT::f16,
696                       MVT::i16, MVT::i8},
697                      Custom);
698 
699   setOperationAction(ISD::INTRINSIC_VOID,
700                      {MVT::Other, MVT::v2i16, MVT::v2f16, MVT::v3i16,
701                       MVT::v3f16, MVT::v4f16, MVT::v4i16, MVT::f16, MVT::i16,
702                       MVT::i8},
703                      Custom);
704 
705   setTargetDAGCombine({ISD::ADD,
706                        ISD::ADDCARRY,
707                        ISD::SUB,
708                        ISD::SUBCARRY,
709                        ISD::FADD,
710                        ISD::FSUB,
711                        ISD::FMINNUM,
712                        ISD::FMAXNUM,
713                        ISD::FMINNUM_IEEE,
714                        ISD::FMAXNUM_IEEE,
715                        ISD::FMA,
716                        ISD::SMIN,
717                        ISD::SMAX,
718                        ISD::UMIN,
719                        ISD::UMAX,
720                        ISD::SETCC,
721                        ISD::AND,
722                        ISD::OR,
723                        ISD::XOR,
724                        ISD::SINT_TO_FP,
725                        ISD::UINT_TO_FP,
726                        ISD::FCANONICALIZE,
727                        ISD::SCALAR_TO_VECTOR,
728                        ISD::ZERO_EXTEND,
729                        ISD::SIGN_EXTEND_INREG,
730                        ISD::EXTRACT_VECTOR_ELT,
731                        ISD::INSERT_VECTOR_ELT});
732 
733   // All memory operations. Some folding on the pointer operand is done to help
734   // matching the constant offsets in the addressing modes.
735   setTargetDAGCombine({ISD::LOAD,
736                        ISD::STORE,
737                        ISD::ATOMIC_LOAD,
738                        ISD::ATOMIC_STORE,
739                        ISD::ATOMIC_CMP_SWAP,
740                        ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
741                        ISD::ATOMIC_SWAP,
742                        ISD::ATOMIC_LOAD_ADD,
743                        ISD::ATOMIC_LOAD_SUB,
744                        ISD::ATOMIC_LOAD_AND,
745                        ISD::ATOMIC_LOAD_OR,
746                        ISD::ATOMIC_LOAD_XOR,
747                        ISD::ATOMIC_LOAD_NAND,
748                        ISD::ATOMIC_LOAD_MIN,
749                        ISD::ATOMIC_LOAD_MAX,
750                        ISD::ATOMIC_LOAD_UMIN,
751                        ISD::ATOMIC_LOAD_UMAX,
752                        ISD::ATOMIC_LOAD_FADD,
753                        ISD::INTRINSIC_VOID,
754                        ISD::INTRINSIC_W_CHAIN});
755 
756   // FIXME: In other contexts we pretend this is a per-function property.
757   setStackPointerRegisterToSaveRestore(AMDGPU::SGPR32);
758 
759   setSchedulingPreference(Sched::RegPressure);
760 }
761 
762 const GCNSubtarget *SITargetLowering::getSubtarget() const {
763   return Subtarget;
764 }
765 
766 //===----------------------------------------------------------------------===//
767 // TargetLowering queries
768 //===----------------------------------------------------------------------===//
769 
770 // v_mad_mix* support a conversion from f16 to f32.
771 //
772 // There is only one special case when denormals are enabled we don't currently,
773 // where this is OK to use.
774 bool SITargetLowering::isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
775                                        EVT DestVT, EVT SrcVT) const {
776   return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
777           (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
778     DestVT.getScalarType() == MVT::f32 &&
779     SrcVT.getScalarType() == MVT::f16 &&
780     // TODO: This probably only requires no input flushing?
781     !hasFP32Denormals(DAG.getMachineFunction());
782 }
783 
784 bool SITargetLowering::isFPExtFoldable(const MachineInstr &MI, unsigned Opcode,
785                                        LLT DestTy, LLT SrcTy) const {
786   return ((Opcode == TargetOpcode::G_FMAD && Subtarget->hasMadMixInsts()) ||
787           (Opcode == TargetOpcode::G_FMA && Subtarget->hasFmaMixInsts())) &&
788          DestTy.getScalarSizeInBits() == 32 &&
789          SrcTy.getScalarSizeInBits() == 16 &&
790          // TODO: This probably only requires no input flushing?
791          !hasFP32Denormals(*MI.getMF());
792 }
793 
794 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
795   // SI has some legal vector types, but no legal vector operations. Say no
796   // shuffles are legal in order to prefer scalarizing some vector operations.
797   return false;
798 }
799 
800 MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
801                                                     CallingConv::ID CC,
802                                                     EVT VT) const {
803   if (CC == CallingConv::AMDGPU_KERNEL)
804     return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
805 
806   if (VT.isVector()) {
807     EVT ScalarVT = VT.getScalarType();
808     unsigned Size = ScalarVT.getSizeInBits();
809     if (Size == 16) {
810       if (Subtarget->has16BitInsts())
811         return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
812       return VT.isInteger() ? MVT::i32 : MVT::f32;
813     }
814 
815     if (Size < 16)
816       return Subtarget->has16BitInsts() ? MVT::i16 : MVT::i32;
817     return Size == 32 ? ScalarVT.getSimpleVT() : MVT::i32;
818   }
819 
820   if (VT.getSizeInBits() > 32)
821     return MVT::i32;
822 
823   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
824 }
825 
826 unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
827                                                          CallingConv::ID CC,
828                                                          EVT VT) const {
829   if (CC == CallingConv::AMDGPU_KERNEL)
830     return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
831 
832   if (VT.isVector()) {
833     unsigned NumElts = VT.getVectorNumElements();
834     EVT ScalarVT = VT.getScalarType();
835     unsigned Size = ScalarVT.getSizeInBits();
836 
837     // FIXME: Should probably promote 8-bit vectors to i16.
838     if (Size == 16 && Subtarget->has16BitInsts())
839       return (NumElts + 1) / 2;
840 
841     if (Size <= 32)
842       return NumElts;
843 
844     if (Size > 32)
845       return NumElts * ((Size + 31) / 32);
846   } else if (VT.getSizeInBits() > 32)
847     return (VT.getSizeInBits() + 31) / 32;
848 
849   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
850 }
851 
852 unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
853   LLVMContext &Context, CallingConv::ID CC,
854   EVT VT, EVT &IntermediateVT,
855   unsigned &NumIntermediates, MVT &RegisterVT) const {
856   if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
857     unsigned NumElts = VT.getVectorNumElements();
858     EVT ScalarVT = VT.getScalarType();
859     unsigned Size = ScalarVT.getSizeInBits();
860     // FIXME: We should fix the ABI to be the same on targets without 16-bit
861     // support, but unless we can properly handle 3-vectors, it will be still be
862     // inconsistent.
863     if (Size == 16 && Subtarget->has16BitInsts()) {
864       RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
865       IntermediateVT = RegisterVT;
866       NumIntermediates = (NumElts + 1) / 2;
867       return NumIntermediates;
868     }
869 
870     if (Size == 32) {
871       RegisterVT = ScalarVT.getSimpleVT();
872       IntermediateVT = RegisterVT;
873       NumIntermediates = NumElts;
874       return NumIntermediates;
875     }
876 
877     if (Size < 16 && Subtarget->has16BitInsts()) {
878       // FIXME: Should probably form v2i16 pieces
879       RegisterVT = MVT::i16;
880       IntermediateVT = ScalarVT;
881       NumIntermediates = NumElts;
882       return NumIntermediates;
883     }
884 
885 
886     if (Size != 16 && Size <= 32) {
887       RegisterVT = MVT::i32;
888       IntermediateVT = ScalarVT;
889       NumIntermediates = NumElts;
890       return NumIntermediates;
891     }
892 
893     if (Size > 32) {
894       RegisterVT = MVT::i32;
895       IntermediateVT = RegisterVT;
896       NumIntermediates = NumElts * ((Size + 31) / 32);
897       return NumIntermediates;
898     }
899   }
900 
901   return TargetLowering::getVectorTypeBreakdownForCallingConv(
902     Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
903 }
904 
905 static EVT memVTFromImageData(Type *Ty, unsigned DMaskLanes) {
906   assert(DMaskLanes != 0);
907 
908   if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
909     unsigned NumElts = std::min(DMaskLanes, VT->getNumElements());
910     return EVT::getVectorVT(Ty->getContext(),
911                             EVT::getEVT(VT->getElementType()),
912                             NumElts);
913   }
914 
915   return EVT::getEVT(Ty);
916 }
917 
918 // Peek through TFE struct returns to only use the data size.
919 static EVT memVTFromImageReturn(Type *Ty, unsigned DMaskLanes) {
920   auto *ST = dyn_cast<StructType>(Ty);
921   if (!ST)
922     return memVTFromImageData(Ty, DMaskLanes);
923 
924   // Some intrinsics return an aggregate type - special case to work out the
925   // correct memVT.
926   //
927   // Only limited forms of aggregate type currently expected.
928   if (ST->getNumContainedTypes() != 2 ||
929       !ST->getContainedType(1)->isIntegerTy(32))
930     return EVT();
931   return memVTFromImageData(ST->getContainedType(0), DMaskLanes);
932 }
933 
934 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
935                                           const CallInst &CI,
936                                           MachineFunction &MF,
937                                           unsigned IntrID) const {
938   Info.flags = MachineMemOperand::MONone;
939   if (CI.hasMetadata(LLVMContext::MD_invariant_load))
940     Info.flags |= MachineMemOperand::MOInvariant;
941 
942   if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
943           AMDGPU::lookupRsrcIntrinsic(IntrID)) {
944     AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
945                                                   (Intrinsic::ID)IntrID);
946     if (Attr.hasFnAttr(Attribute::ReadNone))
947       return false;
948 
949     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
950 
951     if (RsrcIntr->IsImage) {
952       Info.ptrVal =
953           MFI->getImagePSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo());
954       Info.align.reset();
955     } else {
956       Info.ptrVal =
957           MFI->getBufferPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo());
958     }
959 
960     Info.flags |= MachineMemOperand::MODereferenceable;
961     if (Attr.hasFnAttr(Attribute::ReadOnly)) {
962       unsigned DMaskLanes = 4;
963 
964       if (RsrcIntr->IsImage) {
965         const AMDGPU::ImageDimIntrinsicInfo *Intr
966           = AMDGPU::getImageDimIntrinsicInfo(IntrID);
967         const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
968           AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
969 
970         if (!BaseOpcode->Gather4) {
971           // If this isn't a gather, we may have excess loaded elements in the
972           // IR type. Check the dmask for the real number of elements loaded.
973           unsigned DMask
974             = cast<ConstantInt>(CI.getArgOperand(0))->getZExtValue();
975           DMaskLanes = DMask == 0 ? 1 : countPopulation(DMask);
976         }
977 
978         Info.memVT = memVTFromImageReturn(CI.getType(), DMaskLanes);
979       } else
980         Info.memVT = EVT::getEVT(CI.getType());
981 
982       // FIXME: What does alignment mean for an image?
983       Info.opc = ISD::INTRINSIC_W_CHAIN;
984       Info.flags |= MachineMemOperand::MOLoad;
985     } else if (Attr.hasFnAttr(Attribute::WriteOnly)) {
986       Info.opc = ISD::INTRINSIC_VOID;
987 
988       Type *DataTy = CI.getArgOperand(0)->getType();
989       if (RsrcIntr->IsImage) {
990         unsigned DMask = cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue();
991         unsigned DMaskLanes = DMask == 0 ? 1 : countPopulation(DMask);
992         Info.memVT = memVTFromImageData(DataTy, DMaskLanes);
993       } else
994         Info.memVT = EVT::getEVT(DataTy);
995 
996       Info.flags |= MachineMemOperand::MOStore;
997     } else {
998       // Atomic
999       Info.opc = CI.getType()->isVoidTy() ? ISD::INTRINSIC_VOID :
1000                                             ISD::INTRINSIC_W_CHAIN;
1001       Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
1002       Info.flags |= MachineMemOperand::MOLoad |
1003                     MachineMemOperand::MOStore |
1004                     MachineMemOperand::MODereferenceable;
1005 
1006       // XXX - Should this be volatile without known ordering?
1007       Info.flags |= MachineMemOperand::MOVolatile;
1008 
1009       switch (IntrID) {
1010       default:
1011         break;
1012       case Intrinsic::amdgcn_raw_buffer_load_lds:
1013       case Intrinsic::amdgcn_struct_buffer_load_lds: {
1014         unsigned Width = cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue();
1015         Info.memVT = EVT::getIntegerVT(CI.getContext(), Width * 8);
1016         return true;
1017       }
1018       }
1019     }
1020     return true;
1021   }
1022 
1023   switch (IntrID) {
1024   case Intrinsic::amdgcn_atomic_inc:
1025   case Intrinsic::amdgcn_atomic_dec:
1026   case Intrinsic::amdgcn_ds_ordered_add:
1027   case Intrinsic::amdgcn_ds_ordered_swap:
1028   case Intrinsic::amdgcn_ds_fadd:
1029   case Intrinsic::amdgcn_ds_fmin:
1030   case Intrinsic::amdgcn_ds_fmax: {
1031     Info.opc = ISD::INTRINSIC_W_CHAIN;
1032     Info.memVT = MVT::getVT(CI.getType());
1033     Info.ptrVal = CI.getOperand(0);
1034     Info.align.reset();
1035     Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1036 
1037     const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4));
1038     if (!Vol->isZero())
1039       Info.flags |= MachineMemOperand::MOVolatile;
1040 
1041     return true;
1042   }
1043   case Intrinsic::amdgcn_buffer_atomic_fadd: {
1044     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1045 
1046     Info.opc = ISD::INTRINSIC_W_CHAIN;
1047     Info.memVT = MVT::getVT(CI.getOperand(0)->getType());
1048     Info.ptrVal =
1049         MFI->getBufferPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo());
1050     Info.align.reset();
1051     Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1052 
1053     const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
1054     if (!Vol || !Vol->isZero())
1055       Info.flags |= MachineMemOperand::MOVolatile;
1056 
1057     return true;
1058   }
1059   case Intrinsic::amdgcn_ds_append:
1060   case Intrinsic::amdgcn_ds_consume: {
1061     Info.opc = ISD::INTRINSIC_W_CHAIN;
1062     Info.memVT = MVT::getVT(CI.getType());
1063     Info.ptrVal = CI.getOperand(0);
1064     Info.align.reset();
1065     Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1066 
1067     const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1));
1068     if (!Vol->isZero())
1069       Info.flags |= MachineMemOperand::MOVolatile;
1070 
1071     return true;
1072   }
1073   case Intrinsic::amdgcn_global_atomic_csub: {
1074     Info.opc = ISD::INTRINSIC_W_CHAIN;
1075     Info.memVT = MVT::getVT(CI.getType());
1076     Info.ptrVal = CI.getOperand(0);
1077     Info.align.reset();
1078     Info.flags |= MachineMemOperand::MOLoad |
1079                   MachineMemOperand::MOStore |
1080                   MachineMemOperand::MOVolatile;
1081     return true;
1082   }
1083   case Intrinsic::amdgcn_image_bvh_intersect_ray: {
1084     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1085     Info.opc = ISD::INTRINSIC_W_CHAIN;
1086     Info.memVT = MVT::getVT(CI.getType()); // XXX: what is correct VT?
1087     Info.ptrVal =
1088         MFI->getImagePSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo());
1089     Info.align.reset();
1090     Info.flags |= MachineMemOperand::MOLoad |
1091                   MachineMemOperand::MODereferenceable;
1092     return true;
1093   }
1094   case Intrinsic::amdgcn_global_atomic_fadd:
1095   case Intrinsic::amdgcn_global_atomic_fmin:
1096   case Intrinsic::amdgcn_global_atomic_fmax:
1097   case Intrinsic::amdgcn_flat_atomic_fadd:
1098   case Intrinsic::amdgcn_flat_atomic_fmin:
1099   case Intrinsic::amdgcn_flat_atomic_fmax:
1100   case Intrinsic::amdgcn_global_atomic_fadd_v2bf16:
1101   case Intrinsic::amdgcn_flat_atomic_fadd_v2bf16: {
1102     Info.opc = ISD::INTRINSIC_W_CHAIN;
1103     Info.memVT = MVT::getVT(CI.getType());
1104     Info.ptrVal = CI.getOperand(0);
1105     Info.align.reset();
1106     Info.flags |= MachineMemOperand::MOLoad |
1107                   MachineMemOperand::MOStore |
1108                   MachineMemOperand::MODereferenceable |
1109                   MachineMemOperand::MOVolatile;
1110     return true;
1111   }
1112   case Intrinsic::amdgcn_ds_gws_init:
1113   case Intrinsic::amdgcn_ds_gws_barrier:
1114   case Intrinsic::amdgcn_ds_gws_sema_v:
1115   case Intrinsic::amdgcn_ds_gws_sema_br:
1116   case Intrinsic::amdgcn_ds_gws_sema_p:
1117   case Intrinsic::amdgcn_ds_gws_sema_release_all: {
1118     Info.opc = ISD::INTRINSIC_VOID;
1119 
1120     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1121     Info.ptrVal =
1122         MFI->getGWSPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo());
1123 
1124     // This is an abstract access, but we need to specify a type and size.
1125     Info.memVT = MVT::i32;
1126     Info.size = 4;
1127     Info.align = Align(4);
1128 
1129     if (IntrID == Intrinsic::amdgcn_ds_gws_barrier)
1130       Info.flags |= MachineMemOperand::MOLoad;
1131     else
1132       Info.flags |= MachineMemOperand::MOStore;
1133     return true;
1134   }
1135   case Intrinsic::amdgcn_global_load_lds: {
1136     Info.opc = ISD::INTRINSIC_VOID;
1137     unsigned Width = cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue();
1138     Info.memVT = EVT::getIntegerVT(CI.getContext(), Width * 8);
1139     Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1140                   MachineMemOperand::MOVolatile;
1141     return true;
1142   }
1143   default:
1144     return false;
1145   }
1146 }
1147 
1148 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
1149                                             SmallVectorImpl<Value*> &Ops,
1150                                             Type *&AccessTy) const {
1151   switch (II->getIntrinsicID()) {
1152   case Intrinsic::amdgcn_atomic_inc:
1153   case Intrinsic::amdgcn_atomic_dec:
1154   case Intrinsic::amdgcn_ds_ordered_add:
1155   case Intrinsic::amdgcn_ds_ordered_swap:
1156   case Intrinsic::amdgcn_ds_append:
1157   case Intrinsic::amdgcn_ds_consume:
1158   case Intrinsic::amdgcn_ds_fadd:
1159   case Intrinsic::amdgcn_ds_fmin:
1160   case Intrinsic::amdgcn_ds_fmax:
1161   case Intrinsic::amdgcn_global_atomic_fadd:
1162   case Intrinsic::amdgcn_flat_atomic_fadd:
1163   case Intrinsic::amdgcn_flat_atomic_fmin:
1164   case Intrinsic::amdgcn_flat_atomic_fmax:
1165   case Intrinsic::amdgcn_global_atomic_fadd_v2bf16:
1166   case Intrinsic::amdgcn_flat_atomic_fadd_v2bf16:
1167   case Intrinsic::amdgcn_global_atomic_csub: {
1168     Value *Ptr = II->getArgOperand(0);
1169     AccessTy = II->getType();
1170     Ops.push_back(Ptr);
1171     return true;
1172   }
1173   default:
1174     return false;
1175   }
1176 }
1177 
1178 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
1179   if (!Subtarget->hasFlatInstOffsets()) {
1180     // Flat instructions do not have offsets, and only have the register
1181     // address.
1182     return AM.BaseOffs == 0 && AM.Scale == 0;
1183   }
1184 
1185   return AM.Scale == 0 &&
1186          (AM.BaseOffs == 0 ||
1187           Subtarget->getInstrInfo()->isLegalFLATOffset(
1188               AM.BaseOffs, AMDGPUAS::FLAT_ADDRESS, SIInstrFlags::FLAT));
1189 }
1190 
1191 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
1192   if (Subtarget->hasFlatGlobalInsts())
1193     return AM.Scale == 0 &&
1194            (AM.BaseOffs == 0 || Subtarget->getInstrInfo()->isLegalFLATOffset(
1195                                     AM.BaseOffs, AMDGPUAS::GLOBAL_ADDRESS,
1196                                     SIInstrFlags::FlatGlobal));
1197 
1198   if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
1199       // Assume the we will use FLAT for all global memory accesses
1200       // on VI.
1201       // FIXME: This assumption is currently wrong.  On VI we still use
1202       // MUBUF instructions for the r + i addressing mode.  As currently
1203       // implemented, the MUBUF instructions only work on buffer < 4GB.
1204       // It may be possible to support > 4GB buffers with MUBUF instructions,
1205       // by setting the stride value in the resource descriptor which would
1206       // increase the size limit to (stride * 4GB).  However, this is risky,
1207       // because it has never been validated.
1208     return isLegalFlatAddressingMode(AM);
1209   }
1210 
1211   return isLegalMUBUFAddressingMode(AM);
1212 }
1213 
1214 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
1215   // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
1216   // additionally can do r + r + i with addr64. 32-bit has more addressing
1217   // mode options. Depending on the resource constant, it can also do
1218   // (i64 r0) + (i32 r1) * (i14 i).
1219   //
1220   // Private arrays end up using a scratch buffer most of the time, so also
1221   // assume those use MUBUF instructions. Scratch loads / stores are currently
1222   // implemented as mubuf instructions with offen bit set, so slightly
1223   // different than the normal addr64.
1224   if (!SIInstrInfo::isLegalMUBUFImmOffset(AM.BaseOffs))
1225     return false;
1226 
1227   // FIXME: Since we can split immediate into soffset and immediate offset,
1228   // would it make sense to allow any immediate?
1229 
1230   switch (AM.Scale) {
1231   case 0: // r + i or just i, depending on HasBaseReg.
1232     return true;
1233   case 1:
1234     return true; // We have r + r or r + i.
1235   case 2:
1236     if (AM.HasBaseReg) {
1237       // Reject 2 * r + r.
1238       return false;
1239     }
1240 
1241     // Allow 2 * r as r + r
1242     // Or  2 * r + i is allowed as r + r + i.
1243     return true;
1244   default: // Don't allow n * r
1245     return false;
1246   }
1247 }
1248 
1249 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
1250                                              const AddrMode &AM, Type *Ty,
1251                                              unsigned AS, Instruction *I) const {
1252   // No global is ever allowed as a base.
1253   if (AM.BaseGV)
1254     return false;
1255 
1256   if (AS == AMDGPUAS::GLOBAL_ADDRESS)
1257     return isLegalGlobalAddressingMode(AM);
1258 
1259   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
1260       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
1261       AS == AMDGPUAS::BUFFER_FAT_POINTER) {
1262     // If the offset isn't a multiple of 4, it probably isn't going to be
1263     // correctly aligned.
1264     // FIXME: Can we get the real alignment here?
1265     if (AM.BaseOffs % 4 != 0)
1266       return isLegalMUBUFAddressingMode(AM);
1267 
1268     // There are no SMRD extloads, so if we have to do a small type access we
1269     // will use a MUBUF load.
1270     // FIXME?: We also need to do this if unaligned, but we don't know the
1271     // alignment here.
1272     if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
1273       return isLegalGlobalAddressingMode(AM);
1274 
1275     if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
1276       // SMRD instructions have an 8-bit, dword offset on SI.
1277       if (!isUInt<8>(AM.BaseOffs / 4))
1278         return false;
1279     } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
1280       // On CI+, this can also be a 32-bit literal constant offset. If it fits
1281       // in 8-bits, it can use a smaller encoding.
1282       if (!isUInt<32>(AM.BaseOffs / 4))
1283         return false;
1284     } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
1285       // On VI, these use the SMEM format and the offset is 20-bit in bytes.
1286       if (!isUInt<20>(AM.BaseOffs))
1287         return false;
1288     } else
1289       llvm_unreachable("unhandled generation");
1290 
1291     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1292       return true;
1293 
1294     if (AM.Scale == 1 && AM.HasBaseReg)
1295       return true;
1296 
1297     return false;
1298 
1299   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1300     return isLegalMUBUFAddressingMode(AM);
1301   } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
1302              AS == AMDGPUAS::REGION_ADDRESS) {
1303     // Basic, single offset DS instructions allow a 16-bit unsigned immediate
1304     // field.
1305     // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
1306     // an 8-bit dword offset but we don't know the alignment here.
1307     if (!isUInt<16>(AM.BaseOffs))
1308       return false;
1309 
1310     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1311       return true;
1312 
1313     if (AM.Scale == 1 && AM.HasBaseReg)
1314       return true;
1315 
1316     return false;
1317   } else if (AS == AMDGPUAS::FLAT_ADDRESS ||
1318              AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) {
1319     // For an unknown address space, this usually means that this is for some
1320     // reason being used for pure arithmetic, and not based on some addressing
1321     // computation. We don't have instructions that compute pointers with any
1322     // addressing modes, so treat them as having no offset like flat
1323     // instructions.
1324     return isLegalFlatAddressingMode(AM);
1325   }
1326 
1327   // Assume a user alias of global for unknown address spaces.
1328   return isLegalGlobalAddressingMode(AM);
1329 }
1330 
1331 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1332                                         const MachineFunction &MF) const {
1333   if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
1334     return (MemVT.getSizeInBits() <= 4 * 32);
1335   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1336     unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1337     return (MemVT.getSizeInBits() <= MaxPrivateBits);
1338   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
1339     return (MemVT.getSizeInBits() <= 2 * 32);
1340   }
1341   return true;
1342 }
1343 
1344 bool SITargetLowering::allowsMisalignedMemoryAccessesImpl(
1345     unsigned Size, unsigned AddrSpace, Align Alignment,
1346     MachineMemOperand::Flags Flags, bool *IsFast) const {
1347   if (IsFast)
1348     *IsFast = false;
1349 
1350   if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1351       AddrSpace == AMDGPUAS::REGION_ADDRESS) {
1352     // Check if alignment requirements for ds_read/write instructions are
1353     // disabled.
1354     if (!Subtarget->hasUnalignedDSAccessEnabled() && Alignment < Align(4))
1355       return false;
1356 
1357     Align RequiredAlignment(PowerOf2Ceil(Size/8)); // Natural alignment.
1358     if (Subtarget->hasLDSMisalignedBug() && Size > 32 &&
1359         Alignment < RequiredAlignment)
1360       return false;
1361 
1362     // Either, the alignment requirements are "enabled", or there is an
1363     // unaligned LDS access related hardware bug though alignment requirements
1364     // are "disabled". In either case, we need to check for proper alignment
1365     // requirements.
1366     //
1367     switch (Size) {
1368     case 64:
1369       // SI has a hardware bug in the LDS / GDS bounds checking: if the base
1370       // address is negative, then the instruction is incorrectly treated as
1371       // out-of-bounds even if base + offsets is in bounds. Split vectorized
1372       // loads here to avoid emitting ds_read2_b32. We may re-combine the
1373       // load later in the SILoadStoreOptimizer.
1374       if (!Subtarget->hasUsableDSOffset() && Alignment < Align(8))
1375         return false;
1376 
1377       // 8 byte accessing via ds_read/write_b64 require 8-byte alignment, but we
1378       // can do a 4 byte aligned, 8 byte access in a single operation using
1379       // ds_read2/write2_b32 with adjacent offsets.
1380       RequiredAlignment = Align(4);
1381 
1382       if (Subtarget->hasUnalignedDSAccessEnabled()) {
1383         // We will either select ds_read_b64/ds_write_b64 or ds_read2_b32/
1384         // ds_write2_b32 depending on the alignment. In either case with either
1385         // alignment there is no faster way of doing this.
1386         if (IsFast)
1387           *IsFast = true;
1388         return true;
1389       }
1390 
1391       break;
1392     case 96:
1393       if (!Subtarget->hasDS96AndDS128())
1394         return false;
1395 
1396       // 12 byte accessing via ds_read/write_b96 require 16-byte alignment on
1397       // gfx8 and older.
1398 
1399       if (Subtarget->hasUnalignedDSAccessEnabled()) {
1400         // Naturally aligned access is fastest. However, also report it is Fast
1401         // if memory is aligned less than DWORD. A narrow load or store will be
1402         // be equally slow as a single ds_read_b96/ds_write_b96, but there will
1403         // be more of them, so overall we will pay less penalty issuing a single
1404         // instruction.
1405         if (IsFast)
1406           *IsFast = Alignment >= RequiredAlignment || Alignment < Align(4);
1407         return true;
1408       }
1409 
1410       break;
1411     case 128:
1412       if (!Subtarget->hasDS96AndDS128() || !Subtarget->useDS128())
1413         return false;
1414 
1415       // 16 byte accessing via ds_read/write_b128 require 16-byte alignment on
1416       // gfx8 and older, but  we can do a 8 byte aligned, 16 byte access in a
1417       // single operation using ds_read2/write2_b64.
1418       RequiredAlignment = Align(8);
1419 
1420       if (Subtarget->hasUnalignedDSAccessEnabled()) {
1421         // Naturally aligned access is fastest. However, also report it is Fast
1422         // if memory is aligned less than DWORD. A narrow load or store will be
1423         // be equally slow as a single ds_read_b128/ds_write_b128, but there
1424         // will be more of them, so overall we will pay less penalty issuing a
1425         // single instruction.
1426         if (IsFast)
1427           *IsFast = Alignment >= RequiredAlignment || Alignment < Align(4);
1428         return true;
1429       }
1430 
1431       break;
1432     default:
1433       if (Size > 32)
1434         return false;
1435 
1436       break;
1437     }
1438 
1439     if (IsFast)
1440       *IsFast = Alignment >= RequiredAlignment;
1441 
1442     return Alignment >= RequiredAlignment ||
1443            Subtarget->hasUnalignedDSAccessEnabled();
1444   }
1445 
1446   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
1447     bool AlignedBy4 = Alignment >= Align(4);
1448     if (IsFast)
1449       *IsFast = AlignedBy4;
1450 
1451     return AlignedBy4 ||
1452            Subtarget->enableFlatScratch() ||
1453            Subtarget->hasUnalignedScratchAccess();
1454   }
1455 
1456   // FIXME: We have to be conservative here and assume that flat operations
1457   // will access scratch.  If we had access to the IR function, then we
1458   // could determine if any private memory was used in the function.
1459   if (AddrSpace == AMDGPUAS::FLAT_ADDRESS &&
1460       !Subtarget->hasUnalignedScratchAccess()) {
1461     bool AlignedBy4 = Alignment >= Align(4);
1462     if (IsFast)
1463       *IsFast = AlignedBy4;
1464 
1465     return AlignedBy4;
1466   }
1467 
1468   if (Subtarget->hasUnalignedBufferAccessEnabled()) {
1469     // If we have a uniform constant load, it still requires using a slow
1470     // buffer instruction if unaligned.
1471     if (IsFast) {
1472       // Accesses can really be issued as 1-byte aligned or 4-byte aligned, so
1473       // 2-byte alignment is worse than 1 unless doing a 2-byte access.
1474       *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
1475                  AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
1476         Alignment >= Align(4) : Alignment != Align(2);
1477     }
1478 
1479     return true;
1480   }
1481 
1482   // Smaller than dword value must be aligned.
1483   if (Size < 32)
1484     return false;
1485 
1486   // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1487   // byte-address are ignored, thus forcing Dword alignment.
1488   // This applies to private, global, and constant memory.
1489   if (IsFast)
1490     *IsFast = true;
1491 
1492   return Size >= 32 && Alignment >= Align(4);
1493 }
1494 
1495 bool SITargetLowering::allowsMisalignedMemoryAccesses(
1496     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
1497     bool *IsFast) const {
1498   bool Allow = allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AddrSpace,
1499                                                   Alignment, Flags, IsFast);
1500 
1501   if (Allow && IsFast && Subtarget->hasUnalignedDSAccessEnabled() &&
1502       (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1503        AddrSpace == AMDGPUAS::REGION_ADDRESS)) {
1504     // Lie it is fast if +unaligned-access-mode is passed so that DS accesses
1505     // get vectorized. We could use ds_read2_b*/ds_write2_b* instructions on a
1506     // misaligned data which is faster than a pair of ds_read_b*/ds_write_b*
1507     // which would be equally misaligned.
1508     // This is only used by the common passes, selection always calls the
1509     // allowsMisalignedMemoryAccessesImpl version.
1510     *IsFast = true;
1511   }
1512 
1513   return Allow;
1514 }
1515 
1516 EVT SITargetLowering::getOptimalMemOpType(
1517     const MemOp &Op, const AttributeList &FuncAttributes) const {
1518   // FIXME: Should account for address space here.
1519 
1520   // The default fallback uses the private pointer size as a guess for a type to
1521   // use. Make sure we switch these to 64-bit accesses.
1522 
1523   if (Op.size() >= 16 &&
1524       Op.isDstAligned(Align(4))) // XXX: Should only do for global
1525     return MVT::v4i32;
1526 
1527   if (Op.size() >= 8 && Op.isDstAligned(Align(4)))
1528     return MVT::v2i32;
1529 
1530   // Use the default.
1531   return MVT::Other;
1532 }
1533 
1534 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
1535   const MemSDNode *MemNode = cast<MemSDNode>(N);
1536   return MemNode->getMemOperand()->getFlags() & MONoClobber;
1537 }
1538 
1539 bool SITargetLowering::isNonGlobalAddrSpace(unsigned AS) {
1540   return AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS ||
1541          AS == AMDGPUAS::PRIVATE_ADDRESS;
1542 }
1543 
1544 bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS,
1545                                            unsigned DestAS) const {
1546   // Flat -> private/local is a simple truncate.
1547   // Flat -> global is no-op
1548   if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
1549     return true;
1550 
1551   const GCNTargetMachine &TM =
1552       static_cast<const GCNTargetMachine &>(getTargetMachine());
1553   return TM.isNoopAddrSpaceCast(SrcAS, DestAS);
1554 }
1555 
1556 bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1557   const MemSDNode *MemNode = cast<MemSDNode>(N);
1558 
1559   return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
1560 }
1561 
1562 TargetLoweringBase::LegalizeTypeAction
1563 SITargetLowering::getPreferredVectorAction(MVT VT) const {
1564   if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
1565       VT.getScalarType().bitsLE(MVT::i16))
1566     return VT.isPow2VectorType() ? TypeSplitVector : TypeWidenVector;
1567   return TargetLoweringBase::getPreferredVectorAction(VT);
1568 }
1569 
1570 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1571                                                          Type *Ty) const {
1572   // FIXME: Could be smarter if called for vector constants.
1573   return true;
1574 }
1575 
1576 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
1577   if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1578     switch (Op) {
1579     case ISD::LOAD:
1580     case ISD::STORE:
1581 
1582     // These operations are done with 32-bit instructions anyway.
1583     case ISD::AND:
1584     case ISD::OR:
1585     case ISD::XOR:
1586     case ISD::SELECT:
1587       // TODO: Extensions?
1588       return true;
1589     default:
1590       return false;
1591     }
1592   }
1593 
1594   // SimplifySetCC uses this function to determine whether or not it should
1595   // create setcc with i1 operands.  We don't have instructions for i1 setcc.
1596   if (VT == MVT::i1 && Op == ISD::SETCC)
1597     return false;
1598 
1599   return TargetLowering::isTypeDesirableForOp(Op, VT);
1600 }
1601 
1602 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1603                                                    const SDLoc &SL,
1604                                                    SDValue Chain,
1605                                                    uint64_t Offset) const {
1606   const DataLayout &DL = DAG.getDataLayout();
1607   MachineFunction &MF = DAG.getMachineFunction();
1608   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1609 
1610   const ArgDescriptor *InputPtrReg;
1611   const TargetRegisterClass *RC;
1612   LLT ArgTy;
1613   MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
1614 
1615   std::tie(InputPtrReg, RC, ArgTy) =
1616       Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1617 
1618   // We may not have the kernarg segment argument if we have no kernel
1619   // arguments.
1620   if (!InputPtrReg)
1621     return DAG.getConstant(0, SL, PtrVT);
1622 
1623   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1624   SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
1625     MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1626 
1627   return DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Offset));
1628 }
1629 
1630 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1631                                             const SDLoc &SL) const {
1632   uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1633                                                FIRST_IMPLICIT);
1634   return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1635 }
1636 
1637 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1638                                          const SDLoc &SL, SDValue Val,
1639                                          bool Signed,
1640                                          const ISD::InputArg *Arg) const {
1641   // First, if it is a widened vector, narrow it.
1642   if (VT.isVector() &&
1643       VT.getVectorNumElements() != MemVT.getVectorNumElements()) {
1644     EVT NarrowedVT =
1645         EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(),
1646                          VT.getVectorNumElements());
1647     Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val,
1648                       DAG.getConstant(0, SL, MVT::i32));
1649   }
1650 
1651   // Then convert the vector elements or scalar value.
1652   if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1653       VT.bitsLT(MemVT)) {
1654     unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1655     Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1656   }
1657 
1658   if (MemVT.isFloatingPoint())
1659     Val = getFPExtOrFPRound(DAG, Val, SL, VT);
1660   else if (Signed)
1661     Val = DAG.getSExtOrTrunc(Val, SL, VT);
1662   else
1663     Val = DAG.getZExtOrTrunc(Val, SL, VT);
1664 
1665   return Val;
1666 }
1667 
1668 SDValue SITargetLowering::lowerKernargMemParameter(
1669     SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Chain,
1670     uint64_t Offset, Align Alignment, bool Signed,
1671     const ISD::InputArg *Arg) const {
1672   MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
1673 
1674   // Try to avoid using an extload by loading earlier than the argument address,
1675   // and extracting the relevant bits. The load should hopefully be merged with
1676   // the previous argument.
1677   if (MemVT.getStoreSize() < 4 && Alignment < 4) {
1678     // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
1679     int64_t AlignDownOffset = alignDown(Offset, 4);
1680     int64_t OffsetDiff = Offset - AlignDownOffset;
1681 
1682     EVT IntVT = MemVT.changeTypeToInteger();
1683 
1684     // TODO: If we passed in the base kernel offset we could have a better
1685     // alignment than 4, but we don't really need it.
1686     SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1687     SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, Align(4),
1688                                MachineMemOperand::MODereferenceable |
1689                                    MachineMemOperand::MOInvariant);
1690 
1691     SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1692     SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1693 
1694     SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1695     ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1696     ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1697 
1698 
1699     return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1700   }
1701 
1702   SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1703   SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Alignment,
1704                              MachineMemOperand::MODereferenceable |
1705                                  MachineMemOperand::MOInvariant);
1706 
1707   SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
1708   return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
1709 }
1710 
1711 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1712                                               const SDLoc &SL, SDValue Chain,
1713                                               const ISD::InputArg &Arg) const {
1714   MachineFunction &MF = DAG.getMachineFunction();
1715   MachineFrameInfo &MFI = MF.getFrameInfo();
1716 
1717   if (Arg.Flags.isByVal()) {
1718     unsigned Size = Arg.Flags.getByValSize();
1719     int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1720     return DAG.getFrameIndex(FrameIdx, MVT::i32);
1721   }
1722 
1723   unsigned ArgOffset = VA.getLocMemOffset();
1724   unsigned ArgSize = VA.getValVT().getStoreSize();
1725 
1726   int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1727 
1728   // Create load nodes to retrieve arguments from the stack.
1729   SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1730   SDValue ArgValue;
1731 
1732   // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1733   ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1734   MVT MemVT = VA.getValVT();
1735 
1736   switch (VA.getLocInfo()) {
1737   default:
1738     break;
1739   case CCValAssign::BCvt:
1740     MemVT = VA.getLocVT();
1741     break;
1742   case CCValAssign::SExt:
1743     ExtType = ISD::SEXTLOAD;
1744     break;
1745   case CCValAssign::ZExt:
1746     ExtType = ISD::ZEXTLOAD;
1747     break;
1748   case CCValAssign::AExt:
1749     ExtType = ISD::EXTLOAD;
1750     break;
1751   }
1752 
1753   ArgValue = DAG.getExtLoad(
1754     ExtType, SL, VA.getLocVT(), Chain, FIN,
1755     MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1756     MemVT);
1757   return ArgValue;
1758 }
1759 
1760 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1761   const SIMachineFunctionInfo &MFI,
1762   EVT VT,
1763   AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1764   const ArgDescriptor *Reg;
1765   const TargetRegisterClass *RC;
1766   LLT Ty;
1767 
1768   std::tie(Reg, RC, Ty) = MFI.getPreloadedValue(PVID);
1769   if (!Reg) {
1770     if (PVID == AMDGPUFunctionArgInfo::PreloadedValue::KERNARG_SEGMENT_PTR) {
1771       // It's possible for a kernarg intrinsic call to appear in a kernel with
1772       // no allocated segment, in which case we do not add the user sgpr
1773       // argument, so just return null.
1774       return DAG.getConstant(0, SDLoc(), VT);
1775     }
1776 
1777     // It's undefined behavior if a function marked with the amdgpu-no-*
1778     // attributes uses the corresponding intrinsic.
1779     return DAG.getUNDEF(VT);
1780   }
1781 
1782   return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1783 }
1784 
1785 static void processPSInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1786                                CallingConv::ID CallConv,
1787                                ArrayRef<ISD::InputArg> Ins, BitVector &Skipped,
1788                                FunctionType *FType,
1789                                SIMachineFunctionInfo *Info) {
1790   for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1791     const ISD::InputArg *Arg = &Ins[I];
1792 
1793     assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1794            "vector type argument should have been split");
1795 
1796     // First check if it's a PS input addr.
1797     if (CallConv == CallingConv::AMDGPU_PS &&
1798         !Arg->Flags.isInReg() && PSInputNum <= 15) {
1799       bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1800 
1801       // Inconveniently only the first part of the split is marked as isSplit,
1802       // so skip to the end. We only want to increment PSInputNum once for the
1803       // entire split argument.
1804       if (Arg->Flags.isSplit()) {
1805         while (!Arg->Flags.isSplitEnd()) {
1806           assert((!Arg->VT.isVector() ||
1807                   Arg->VT.getScalarSizeInBits() == 16) &&
1808                  "unexpected vector split in ps argument type");
1809           if (!SkipArg)
1810             Splits.push_back(*Arg);
1811           Arg = &Ins[++I];
1812         }
1813       }
1814 
1815       if (SkipArg) {
1816         // We can safely skip PS inputs.
1817         Skipped.set(Arg->getOrigArgIndex());
1818         ++PSInputNum;
1819         continue;
1820       }
1821 
1822       Info->markPSInputAllocated(PSInputNum);
1823       if (Arg->Used)
1824         Info->markPSInputEnabled(PSInputNum);
1825 
1826       ++PSInputNum;
1827     }
1828 
1829     Splits.push_back(*Arg);
1830   }
1831 }
1832 
1833 // Allocate special inputs passed in VGPRs.
1834 void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1835                                                       MachineFunction &MF,
1836                                                       const SIRegisterInfo &TRI,
1837                                                       SIMachineFunctionInfo &Info) const {
1838   const LLT S32 = LLT::scalar(32);
1839   MachineRegisterInfo &MRI = MF.getRegInfo();
1840 
1841   if (Info.hasWorkItemIDX()) {
1842     Register Reg = AMDGPU::VGPR0;
1843     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1844 
1845     CCInfo.AllocateReg(Reg);
1846     unsigned Mask = (Subtarget->hasPackedTID() &&
1847                      Info.hasWorkItemIDY()) ? 0x3ff : ~0u;
1848     Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg, Mask));
1849   }
1850 
1851   if (Info.hasWorkItemIDY()) {
1852     assert(Info.hasWorkItemIDX());
1853     if (Subtarget->hasPackedTID()) {
1854       Info.setWorkItemIDY(ArgDescriptor::createRegister(AMDGPU::VGPR0,
1855                                                         0x3ff << 10));
1856     } else {
1857       unsigned Reg = AMDGPU::VGPR1;
1858       MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1859 
1860       CCInfo.AllocateReg(Reg);
1861       Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1862     }
1863   }
1864 
1865   if (Info.hasWorkItemIDZ()) {
1866     assert(Info.hasWorkItemIDX() && Info.hasWorkItemIDY());
1867     if (Subtarget->hasPackedTID()) {
1868       Info.setWorkItemIDZ(ArgDescriptor::createRegister(AMDGPU::VGPR0,
1869                                                         0x3ff << 20));
1870     } else {
1871       unsigned Reg = AMDGPU::VGPR2;
1872       MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1873 
1874       CCInfo.AllocateReg(Reg);
1875       Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1876     }
1877   }
1878 }
1879 
1880 // Try to allocate a VGPR at the end of the argument list, or if no argument
1881 // VGPRs are left allocating a stack slot.
1882 // If \p Mask is is given it indicates bitfield position in the register.
1883 // If \p Arg is given use it with new ]p Mask instead of allocating new.
1884 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u,
1885                                          ArgDescriptor Arg = ArgDescriptor()) {
1886   if (Arg.isSet())
1887     return ArgDescriptor::createArg(Arg, Mask);
1888 
1889   ArrayRef<MCPhysReg> ArgVGPRs
1890     = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1891   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1892   if (RegIdx == ArgVGPRs.size()) {
1893     // Spill to stack required.
1894     int64_t Offset = CCInfo.AllocateStack(4, Align(4));
1895 
1896     return ArgDescriptor::createStack(Offset, Mask);
1897   }
1898 
1899   unsigned Reg = ArgVGPRs[RegIdx];
1900   Reg = CCInfo.AllocateReg(Reg);
1901   assert(Reg != AMDGPU::NoRegister);
1902 
1903   MachineFunction &MF = CCInfo.getMachineFunction();
1904   Register LiveInVReg = MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1905   MF.getRegInfo().setType(LiveInVReg, LLT::scalar(32));
1906   return ArgDescriptor::createRegister(Reg, Mask);
1907 }
1908 
1909 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1910                                              const TargetRegisterClass *RC,
1911                                              unsigned NumArgRegs) {
1912   ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1913   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1914   if (RegIdx == ArgSGPRs.size())
1915     report_fatal_error("ran out of SGPRs for arguments");
1916 
1917   unsigned Reg = ArgSGPRs[RegIdx];
1918   Reg = CCInfo.AllocateReg(Reg);
1919   assert(Reg != AMDGPU::NoRegister);
1920 
1921   MachineFunction &MF = CCInfo.getMachineFunction();
1922   MF.addLiveIn(Reg, RC);
1923   return ArgDescriptor::createRegister(Reg);
1924 }
1925 
1926 // If this has a fixed position, we still should allocate the register in the
1927 // CCInfo state. Technically we could get away with this for values passed
1928 // outside of the normal argument range.
1929 static void allocateFixedSGPRInputImpl(CCState &CCInfo,
1930                                        const TargetRegisterClass *RC,
1931                                        MCRegister Reg) {
1932   Reg = CCInfo.AllocateReg(Reg);
1933   assert(Reg != AMDGPU::NoRegister);
1934   MachineFunction &MF = CCInfo.getMachineFunction();
1935   MF.addLiveIn(Reg, RC);
1936 }
1937 
1938 static void allocateSGPR32Input(CCState &CCInfo, ArgDescriptor &Arg) {
1939   if (Arg) {
1940     allocateFixedSGPRInputImpl(CCInfo, &AMDGPU::SGPR_32RegClass,
1941                                Arg.getRegister());
1942   } else
1943     Arg = allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1944 }
1945 
1946 static void allocateSGPR64Input(CCState &CCInfo, ArgDescriptor &Arg) {
1947   if (Arg) {
1948     allocateFixedSGPRInputImpl(CCInfo, &AMDGPU::SGPR_64RegClass,
1949                                Arg.getRegister());
1950   } else
1951     Arg = allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1952 }
1953 
1954 /// Allocate implicit function VGPR arguments at the end of allocated user
1955 /// arguments.
1956 void SITargetLowering::allocateSpecialInputVGPRs(
1957   CCState &CCInfo, MachineFunction &MF,
1958   const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const {
1959   const unsigned Mask = 0x3ff;
1960   ArgDescriptor Arg;
1961 
1962   if (Info.hasWorkItemIDX()) {
1963     Arg = allocateVGPR32Input(CCInfo, Mask);
1964     Info.setWorkItemIDX(Arg);
1965   }
1966 
1967   if (Info.hasWorkItemIDY()) {
1968     Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg);
1969     Info.setWorkItemIDY(Arg);
1970   }
1971 
1972   if (Info.hasWorkItemIDZ())
1973     Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg));
1974 }
1975 
1976 /// Allocate implicit function VGPR arguments in fixed registers.
1977 void SITargetLowering::allocateSpecialInputVGPRsFixed(
1978   CCState &CCInfo, MachineFunction &MF,
1979   const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const {
1980   Register Reg = CCInfo.AllocateReg(AMDGPU::VGPR31);
1981   if (!Reg)
1982     report_fatal_error("failed to allocated VGPR for implicit arguments");
1983 
1984   const unsigned Mask = 0x3ff;
1985   Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg, Mask));
1986   Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg, Mask << 10));
1987   Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg, Mask << 20));
1988 }
1989 
1990 void SITargetLowering::allocateSpecialInputSGPRs(
1991   CCState &CCInfo,
1992   MachineFunction &MF,
1993   const SIRegisterInfo &TRI,
1994   SIMachineFunctionInfo &Info) const {
1995   auto &ArgInfo = Info.getArgInfo();
1996 
1997   // TODO: Unify handling with private memory pointers.
1998   if (Info.hasDispatchPtr())
1999     allocateSGPR64Input(CCInfo, ArgInfo.DispatchPtr);
2000 
2001   if (Info.hasQueuePtr() && AMDGPU::getAmdhsaCodeObjectVersion() < 5)
2002     allocateSGPR64Input(CCInfo, ArgInfo.QueuePtr);
2003 
2004   // Implicit arg ptr takes the place of the kernarg segment pointer. This is a
2005   // constant offset from the kernarg segment.
2006   if (Info.hasImplicitArgPtr())
2007     allocateSGPR64Input(CCInfo, ArgInfo.ImplicitArgPtr);
2008 
2009   if (Info.hasDispatchID())
2010     allocateSGPR64Input(CCInfo, ArgInfo.DispatchID);
2011 
2012   // flat_scratch_init is not applicable for non-kernel functions.
2013 
2014   if (Info.hasWorkGroupIDX())
2015     allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDX);
2016 
2017   if (Info.hasWorkGroupIDY())
2018     allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDY);
2019 
2020   if (Info.hasWorkGroupIDZ())
2021     allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDZ);
2022 }
2023 
2024 // Allocate special inputs passed in user SGPRs.
2025 void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo,
2026                                             MachineFunction &MF,
2027                                             const SIRegisterInfo &TRI,
2028                                             SIMachineFunctionInfo &Info) const {
2029   if (Info.hasImplicitBufferPtr()) {
2030     Register ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
2031     MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
2032     CCInfo.AllocateReg(ImplicitBufferPtrReg);
2033   }
2034 
2035   // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
2036   if (Info.hasPrivateSegmentBuffer()) {
2037     Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
2038     MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
2039     CCInfo.AllocateReg(PrivateSegmentBufferReg);
2040   }
2041 
2042   if (Info.hasDispatchPtr()) {
2043     Register DispatchPtrReg = Info.addDispatchPtr(TRI);
2044     MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
2045     CCInfo.AllocateReg(DispatchPtrReg);
2046   }
2047 
2048   if (Info.hasQueuePtr() && AMDGPU::getAmdhsaCodeObjectVersion() < 5) {
2049     Register QueuePtrReg = Info.addQueuePtr(TRI);
2050     MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
2051     CCInfo.AllocateReg(QueuePtrReg);
2052   }
2053 
2054   if (Info.hasKernargSegmentPtr()) {
2055     MachineRegisterInfo &MRI = MF.getRegInfo();
2056     Register InputPtrReg = Info.addKernargSegmentPtr(TRI);
2057     CCInfo.AllocateReg(InputPtrReg);
2058 
2059     Register VReg = MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
2060     MRI.setType(VReg, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
2061   }
2062 
2063   if (Info.hasDispatchID()) {
2064     Register DispatchIDReg = Info.addDispatchID(TRI);
2065     MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
2066     CCInfo.AllocateReg(DispatchIDReg);
2067   }
2068 
2069   if (Info.hasFlatScratchInit() && !getSubtarget()->isAmdPalOS()) {
2070     Register FlatScratchInitReg = Info.addFlatScratchInit(TRI);
2071     MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
2072     CCInfo.AllocateReg(FlatScratchInitReg);
2073   }
2074 
2075   // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
2076   // these from the dispatch pointer.
2077 }
2078 
2079 // Allocate special input registers that are initialized per-wave.
2080 void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo,
2081                                            MachineFunction &MF,
2082                                            SIMachineFunctionInfo &Info,
2083                                            CallingConv::ID CallConv,
2084                                            bool IsShader) const {
2085   if (Info.hasWorkGroupIDX()) {
2086     Register Reg = Info.addWorkGroupIDX();
2087     MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
2088     CCInfo.AllocateReg(Reg);
2089   }
2090 
2091   if (Info.hasWorkGroupIDY()) {
2092     Register Reg = Info.addWorkGroupIDY();
2093     MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
2094     CCInfo.AllocateReg(Reg);
2095   }
2096 
2097   if (Info.hasWorkGroupIDZ()) {
2098     Register Reg = Info.addWorkGroupIDZ();
2099     MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
2100     CCInfo.AllocateReg(Reg);
2101   }
2102 
2103   if (Info.hasWorkGroupInfo()) {
2104     Register Reg = Info.addWorkGroupInfo();
2105     MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
2106     CCInfo.AllocateReg(Reg);
2107   }
2108 
2109   if (Info.hasPrivateSegmentWaveByteOffset()) {
2110     // Scratch wave offset passed in system SGPR.
2111     unsigned PrivateSegmentWaveByteOffsetReg;
2112 
2113     if (IsShader) {
2114       PrivateSegmentWaveByteOffsetReg =
2115         Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
2116 
2117       // This is true if the scratch wave byte offset doesn't have a fixed
2118       // location.
2119       if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
2120         PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
2121         Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
2122       }
2123     } else
2124       PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
2125 
2126     MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
2127     CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
2128   }
2129 }
2130 
2131 static void reservePrivateMemoryRegs(const TargetMachine &TM,
2132                                      MachineFunction &MF,
2133                                      const SIRegisterInfo &TRI,
2134                                      SIMachineFunctionInfo &Info) {
2135   // Now that we've figured out where the scratch register inputs are, see if
2136   // should reserve the arguments and use them directly.
2137   MachineFrameInfo &MFI = MF.getFrameInfo();
2138   bool HasStackObjects = MFI.hasStackObjects();
2139   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
2140 
2141   // Record that we know we have non-spill stack objects so we don't need to
2142   // check all stack objects later.
2143   if (HasStackObjects)
2144     Info.setHasNonSpillStackObjects(true);
2145 
2146   // Everything live out of a block is spilled with fast regalloc, so it's
2147   // almost certain that spilling will be required.
2148   if (TM.getOptLevel() == CodeGenOpt::None)
2149     HasStackObjects = true;
2150 
2151   // For now assume stack access is needed in any callee functions, so we need
2152   // the scratch registers to pass in.
2153   bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
2154 
2155   if (!ST.enableFlatScratch()) {
2156     if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) {
2157       // If we have stack objects, we unquestionably need the private buffer
2158       // resource. For the Code Object V2 ABI, this will be the first 4 user
2159       // SGPR inputs. We can reserve those and use them directly.
2160 
2161       Register PrivateSegmentBufferReg =
2162           Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
2163       Info.setScratchRSrcReg(PrivateSegmentBufferReg);
2164     } else {
2165       unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
2166       // We tentatively reserve the last registers (skipping the last registers
2167       // which may contain VCC, FLAT_SCR, and XNACK). After register allocation,
2168       // we'll replace these with the ones immediately after those which were
2169       // really allocated. In the prologue copies will be inserted from the
2170       // argument to these reserved registers.
2171 
2172       // Without HSA, relocations are used for the scratch pointer and the
2173       // buffer resource setup is always inserted in the prologue. Scratch wave
2174       // offset is still in an input SGPR.
2175       Info.setScratchRSrcReg(ReservedBufferReg);
2176     }
2177   }
2178 
2179   MachineRegisterInfo &MRI = MF.getRegInfo();
2180 
2181   // For entry functions we have to set up the stack pointer if we use it,
2182   // whereas non-entry functions get this "for free". This means there is no
2183   // intrinsic advantage to using S32 over S34 in cases where we do not have
2184   // calls but do need a frame pointer (i.e. if we are requested to have one
2185   // because frame pointer elimination is disabled). To keep things simple we
2186   // only ever use S32 as the call ABI stack pointer, and so using it does not
2187   // imply we need a separate frame pointer.
2188   //
2189   // Try to use s32 as the SP, but move it if it would interfere with input
2190   // arguments. This won't work with calls though.
2191   //
2192   // FIXME: Move SP to avoid any possible inputs, or find a way to spill input
2193   // registers.
2194   if (!MRI.isLiveIn(AMDGPU::SGPR32)) {
2195     Info.setStackPtrOffsetReg(AMDGPU::SGPR32);
2196   } else {
2197     assert(AMDGPU::isShader(MF.getFunction().getCallingConv()));
2198 
2199     if (MFI.hasCalls())
2200       report_fatal_error("call in graphics shader with too many input SGPRs");
2201 
2202     for (unsigned Reg : AMDGPU::SGPR_32RegClass) {
2203       if (!MRI.isLiveIn(Reg)) {
2204         Info.setStackPtrOffsetReg(Reg);
2205         break;
2206       }
2207     }
2208 
2209     if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG)
2210       report_fatal_error("failed to find register for SP");
2211   }
2212 
2213   // hasFP should be accurate for entry functions even before the frame is
2214   // finalized, because it does not rely on the known stack size, only
2215   // properties like whether variable sized objects are present.
2216   if (ST.getFrameLowering()->hasFP(MF)) {
2217     Info.setFrameOffsetReg(AMDGPU::SGPR33);
2218   }
2219 }
2220 
2221 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
2222   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
2223   return !Info->isEntryFunction();
2224 }
2225 
2226 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
2227 
2228 }
2229 
2230 void SITargetLowering::insertCopiesSplitCSR(
2231   MachineBasicBlock *Entry,
2232   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
2233   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2234 
2235   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
2236   if (!IStart)
2237     return;
2238 
2239   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2240   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
2241   MachineBasicBlock::iterator MBBI = Entry->begin();
2242   for (const MCPhysReg *I = IStart; *I; ++I) {
2243     const TargetRegisterClass *RC = nullptr;
2244     if (AMDGPU::SReg_64RegClass.contains(*I))
2245       RC = &AMDGPU::SGPR_64RegClass;
2246     else if (AMDGPU::SReg_32RegClass.contains(*I))
2247       RC = &AMDGPU::SGPR_32RegClass;
2248     else
2249       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2250 
2251     Register NewVR = MRI->createVirtualRegister(RC);
2252     // Create copy from CSR to a virtual register.
2253     Entry->addLiveIn(*I);
2254     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
2255       .addReg(*I);
2256 
2257     // Insert the copy-back instructions right before the terminator.
2258     for (auto *Exit : Exits)
2259       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
2260               TII->get(TargetOpcode::COPY), *I)
2261         .addReg(NewVR);
2262   }
2263 }
2264 
2265 SDValue SITargetLowering::LowerFormalArguments(
2266     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
2267     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2268     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2269   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2270 
2271   MachineFunction &MF = DAG.getMachineFunction();
2272   const Function &Fn = MF.getFunction();
2273   FunctionType *FType = MF.getFunction().getFunctionType();
2274   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2275 
2276   if (Subtarget->isAmdHsaOS() && AMDGPU::isGraphics(CallConv)) {
2277     DiagnosticInfoUnsupported NoGraphicsHSA(
2278         Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
2279     DAG.getContext()->diagnose(NoGraphicsHSA);
2280     return DAG.getEntryNode();
2281   }
2282 
2283   Info->allocateModuleLDSGlobal(Fn);
2284 
2285   SmallVector<ISD::InputArg, 16> Splits;
2286   SmallVector<CCValAssign, 16> ArgLocs;
2287   BitVector Skipped(Ins.size());
2288   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
2289                  *DAG.getContext());
2290 
2291   bool IsGraphics = AMDGPU::isGraphics(CallConv);
2292   bool IsKernel = AMDGPU::isKernel(CallConv);
2293   bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
2294 
2295   if (IsGraphics) {
2296     assert(!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() &&
2297            (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) &&
2298            !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
2299            !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
2300            !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
2301            !Info->hasWorkItemIDZ());
2302   }
2303 
2304   if (CallConv == CallingConv::AMDGPU_PS) {
2305     processPSInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
2306 
2307     // At least one interpolation mode must be enabled or else the GPU will
2308     // hang.
2309     //
2310     // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
2311     // set PSInputAddr, the user wants to enable some bits after the compilation
2312     // based on run-time states. Since we can't know what the final PSInputEna
2313     // will look like, so we shouldn't do anything here and the user should take
2314     // responsibility for the correct programming.
2315     //
2316     // Otherwise, the following restrictions apply:
2317     // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
2318     // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
2319     //   enabled too.
2320     if ((Info->getPSInputAddr() & 0x7F) == 0 ||
2321         ((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11))) {
2322       CCInfo.AllocateReg(AMDGPU::VGPR0);
2323       CCInfo.AllocateReg(AMDGPU::VGPR1);
2324       Info->markPSInputAllocated(0);
2325       Info->markPSInputEnabled(0);
2326     }
2327     if (Subtarget->isAmdPalOS()) {
2328       // For isAmdPalOS, the user does not enable some bits after compilation
2329       // based on run-time states; the register values being generated here are
2330       // the final ones set in hardware. Therefore we need to apply the
2331       // workaround to PSInputAddr and PSInputEnable together.  (The case where
2332       // a bit is set in PSInputAddr but not PSInputEnable is where the
2333       // frontend set up an input arg for a particular interpolation mode, but
2334       // nothing uses that input arg. Really we should have an earlier pass
2335       // that removes such an arg.)
2336       unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
2337       if ((PsInputBits & 0x7F) == 0 ||
2338           ((PsInputBits & 0xF) == 0 && (PsInputBits >> 11 & 1)))
2339         Info->markPSInputEnabled(
2340             countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
2341     }
2342   } else if (IsKernel) {
2343     assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
2344   } else {
2345     Splits.append(Ins.begin(), Ins.end());
2346   }
2347 
2348   if (IsEntryFunc) {
2349     allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
2350     allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
2351   } else if (!IsGraphics) {
2352     // For the fixed ABI, pass workitem IDs in the last argument register.
2353     allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info);
2354   }
2355 
2356   if (IsKernel) {
2357     analyzeFormalArgumentsCompute(CCInfo, Ins);
2358   } else {
2359     CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
2360     CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
2361   }
2362 
2363   SmallVector<SDValue, 16> Chains;
2364 
2365   // FIXME: This is the minimum kernel argument alignment. We should improve
2366   // this to the maximum alignment of the arguments.
2367   //
2368   // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
2369   // kern arg offset.
2370   const Align KernelArgBaseAlign = Align(16);
2371 
2372   for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
2373     const ISD::InputArg &Arg = Ins[i];
2374     if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
2375       InVals.push_back(DAG.getUNDEF(Arg.VT));
2376       continue;
2377     }
2378 
2379     CCValAssign &VA = ArgLocs[ArgIdx++];
2380     MVT VT = VA.getLocVT();
2381 
2382     if (IsEntryFunc && VA.isMemLoc()) {
2383       VT = Ins[i].VT;
2384       EVT MemVT = VA.getLocVT();
2385 
2386       const uint64_t Offset = VA.getLocMemOffset();
2387       Align Alignment = commonAlignment(KernelArgBaseAlign, Offset);
2388 
2389       if (Arg.Flags.isByRef()) {
2390         SDValue Ptr = lowerKernArgParameterPtr(DAG, DL, Chain, Offset);
2391 
2392         const GCNTargetMachine &TM =
2393             static_cast<const GCNTargetMachine &>(getTargetMachine());
2394         if (!TM.isNoopAddrSpaceCast(AMDGPUAS::CONSTANT_ADDRESS,
2395                                     Arg.Flags.getPointerAddrSpace())) {
2396           Ptr = DAG.getAddrSpaceCast(DL, VT, Ptr, AMDGPUAS::CONSTANT_ADDRESS,
2397                                      Arg.Flags.getPointerAddrSpace());
2398         }
2399 
2400         InVals.push_back(Ptr);
2401         continue;
2402       }
2403 
2404       SDValue Arg = lowerKernargMemParameter(
2405         DAG, VT, MemVT, DL, Chain, Offset, Alignment, Ins[i].Flags.isSExt(), &Ins[i]);
2406       Chains.push_back(Arg.getValue(1));
2407 
2408       auto *ParamTy =
2409         dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
2410       if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
2411           ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
2412                       ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) {
2413         // On SI local pointers are just offsets into LDS, so they are always
2414         // less than 16-bits.  On CI and newer they could potentially be
2415         // real pointers, so we can't guarantee their size.
2416         Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
2417                           DAG.getValueType(MVT::i16));
2418       }
2419 
2420       InVals.push_back(Arg);
2421       continue;
2422     } else if (!IsEntryFunc && VA.isMemLoc()) {
2423       SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
2424       InVals.push_back(Val);
2425       if (!Arg.Flags.isByVal())
2426         Chains.push_back(Val.getValue(1));
2427       continue;
2428     }
2429 
2430     assert(VA.isRegLoc() && "Parameter must be in a register!");
2431 
2432     Register Reg = VA.getLocReg();
2433     const TargetRegisterClass *RC = nullptr;
2434     if (AMDGPU::VGPR_32RegClass.contains(Reg))
2435       RC = &AMDGPU::VGPR_32RegClass;
2436     else if (AMDGPU::SGPR_32RegClass.contains(Reg))
2437       RC = &AMDGPU::SGPR_32RegClass;
2438     else
2439       llvm_unreachable("Unexpected register class in LowerFormalArguments!");
2440     EVT ValVT = VA.getValVT();
2441 
2442     Reg = MF.addLiveIn(Reg, RC);
2443     SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
2444 
2445     if (Arg.Flags.isSRet()) {
2446       // The return object should be reasonably addressable.
2447 
2448       // FIXME: This helps when the return is a real sret. If it is a
2449       // automatically inserted sret (i.e. CanLowerReturn returns false), an
2450       // extra copy is inserted in SelectionDAGBuilder which obscures this.
2451       unsigned NumBits
2452         = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex();
2453       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2454         DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
2455     }
2456 
2457     // If this is an 8 or 16-bit value, it is really passed promoted
2458     // to 32 bits. Insert an assert[sz]ext to capture this, then
2459     // truncate to the right size.
2460     switch (VA.getLocInfo()) {
2461     case CCValAssign::Full:
2462       break;
2463     case CCValAssign::BCvt:
2464       Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
2465       break;
2466     case CCValAssign::SExt:
2467       Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
2468                         DAG.getValueType(ValVT));
2469       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2470       break;
2471     case CCValAssign::ZExt:
2472       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2473                         DAG.getValueType(ValVT));
2474       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2475       break;
2476     case CCValAssign::AExt:
2477       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2478       break;
2479     default:
2480       llvm_unreachable("Unknown loc info!");
2481     }
2482 
2483     InVals.push_back(Val);
2484   }
2485 
2486   // Start adding system SGPRs.
2487   if (IsEntryFunc) {
2488     allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsGraphics);
2489   } else {
2490     CCInfo.AllocateReg(Info->getScratchRSrcReg());
2491     if (!IsGraphics)
2492       allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
2493   }
2494 
2495   auto &ArgUsageInfo =
2496     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2497   ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
2498 
2499   unsigned StackArgSize = CCInfo.getNextStackOffset();
2500   Info->setBytesInStackArgArea(StackArgSize);
2501 
2502   return Chains.empty() ? Chain :
2503     DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
2504 }
2505 
2506 // TODO: If return values can't fit in registers, we should return as many as
2507 // possible in registers before passing on stack.
2508 bool SITargetLowering::CanLowerReturn(
2509   CallingConv::ID CallConv,
2510   MachineFunction &MF, bool IsVarArg,
2511   const SmallVectorImpl<ISD::OutputArg> &Outs,
2512   LLVMContext &Context) const {
2513   // Replacing returns with sret/stack usage doesn't make sense for shaders.
2514   // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
2515   // for shaders. Vector types should be explicitly handled by CC.
2516   if (AMDGPU::isEntryFunctionCC(CallConv))
2517     return true;
2518 
2519   SmallVector<CCValAssign, 16> RVLocs;
2520   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2521   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2522 }
2523 
2524 SDValue
2525 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2526                               bool isVarArg,
2527                               const SmallVectorImpl<ISD::OutputArg> &Outs,
2528                               const SmallVectorImpl<SDValue> &OutVals,
2529                               const SDLoc &DL, SelectionDAG &DAG) const {
2530   MachineFunction &MF = DAG.getMachineFunction();
2531   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2532 
2533   if (AMDGPU::isKernel(CallConv)) {
2534     return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2535                                              OutVals, DL, DAG);
2536   }
2537 
2538   bool IsShader = AMDGPU::isShader(CallConv);
2539 
2540   Info->setIfReturnsVoid(Outs.empty());
2541   bool IsWaveEnd = Info->returnsVoid() && IsShader;
2542 
2543   // CCValAssign - represent the assignment of the return value to a location.
2544   SmallVector<CCValAssign, 48> RVLocs;
2545   SmallVector<ISD::OutputArg, 48> Splits;
2546 
2547   // CCState - Info about the registers and stack slots.
2548   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2549                  *DAG.getContext());
2550 
2551   // Analyze outgoing return values.
2552   CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2553 
2554   SDValue Flag;
2555   SmallVector<SDValue, 48> RetOps;
2556   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2557 
2558   // Copy the result values into the output registers.
2559   for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2560        ++I, ++RealRVLocIdx) {
2561     CCValAssign &VA = RVLocs[I];
2562     assert(VA.isRegLoc() && "Can only return in registers!");
2563     // TODO: Partially return in registers if return values don't fit.
2564     SDValue Arg = OutVals[RealRVLocIdx];
2565 
2566     // Copied from other backends.
2567     switch (VA.getLocInfo()) {
2568     case CCValAssign::Full:
2569       break;
2570     case CCValAssign::BCvt:
2571       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2572       break;
2573     case CCValAssign::SExt:
2574       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2575       break;
2576     case CCValAssign::ZExt:
2577       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2578       break;
2579     case CCValAssign::AExt:
2580       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2581       break;
2582     default:
2583       llvm_unreachable("Unknown loc info!");
2584     }
2585 
2586     Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2587     Flag = Chain.getValue(1);
2588     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2589   }
2590 
2591   // FIXME: Does sret work properly?
2592   if (!Info->isEntryFunction()) {
2593     const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2594     const MCPhysReg *I =
2595       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2596     if (I) {
2597       for (; *I; ++I) {
2598         if (AMDGPU::SReg_64RegClass.contains(*I))
2599           RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2600         else if (AMDGPU::SReg_32RegClass.contains(*I))
2601           RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2602         else
2603           llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2604       }
2605     }
2606   }
2607 
2608   // Update chain and glue.
2609   RetOps[0] = Chain;
2610   if (Flag.getNode())
2611     RetOps.push_back(Flag);
2612 
2613   unsigned Opc = AMDGPUISD::ENDPGM;
2614   if (!IsWaveEnd)
2615     Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
2616   return DAG.getNode(Opc, DL, MVT::Other, RetOps);
2617 }
2618 
2619 SDValue SITargetLowering::LowerCallResult(
2620     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2621     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2622     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2623     SDValue ThisVal) const {
2624   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2625 
2626   // Assign locations to each value returned by this call.
2627   SmallVector<CCValAssign, 16> RVLocs;
2628   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2629                  *DAG.getContext());
2630   CCInfo.AnalyzeCallResult(Ins, RetCC);
2631 
2632   // Copy all of the result registers out of their specified physreg.
2633   for (unsigned i = 0; i != RVLocs.size(); ++i) {
2634     CCValAssign VA = RVLocs[i];
2635     SDValue Val;
2636 
2637     if (VA.isRegLoc()) {
2638       Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2639       Chain = Val.getValue(1);
2640       InFlag = Val.getValue(2);
2641     } else if (VA.isMemLoc()) {
2642       report_fatal_error("TODO: return values in memory");
2643     } else
2644       llvm_unreachable("unknown argument location type");
2645 
2646     switch (VA.getLocInfo()) {
2647     case CCValAssign::Full:
2648       break;
2649     case CCValAssign::BCvt:
2650       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2651       break;
2652     case CCValAssign::ZExt:
2653       Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2654                         DAG.getValueType(VA.getValVT()));
2655       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2656       break;
2657     case CCValAssign::SExt:
2658       Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2659                         DAG.getValueType(VA.getValVT()));
2660       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2661       break;
2662     case CCValAssign::AExt:
2663       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2664       break;
2665     default:
2666       llvm_unreachable("Unknown loc info!");
2667     }
2668 
2669     InVals.push_back(Val);
2670   }
2671 
2672   return Chain;
2673 }
2674 
2675 // Add code to pass special inputs required depending on used features separate
2676 // from the explicit user arguments present in the IR.
2677 void SITargetLowering::passSpecialInputs(
2678     CallLoweringInfo &CLI,
2679     CCState &CCInfo,
2680     const SIMachineFunctionInfo &Info,
2681     SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2682     SmallVectorImpl<SDValue> &MemOpChains,
2683     SDValue Chain) const {
2684   // If we don't have a call site, this was a call inserted by
2685   // legalization. These can never use special inputs.
2686   if (!CLI.CB)
2687     return;
2688 
2689   SelectionDAG &DAG = CLI.DAG;
2690   const SDLoc &DL = CLI.DL;
2691   const Function &F = DAG.getMachineFunction().getFunction();
2692 
2693   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2694   const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2695 
2696   const AMDGPUFunctionArgInfo *CalleeArgInfo
2697     = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo;
2698   if (const Function *CalleeFunc = CLI.CB->getCalledFunction()) {
2699     auto &ArgUsageInfo =
2700       DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2701     CalleeArgInfo = &ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2702   }
2703 
2704   // TODO: Unify with private memory register handling. This is complicated by
2705   // the fact that at least in kernels, the input argument is not necessarily
2706   // in the same location as the input.
2707   static constexpr std::pair<AMDGPUFunctionArgInfo::PreloadedValue,
2708                              StringLiteral> ImplicitAttrs[] = {
2709     {AMDGPUFunctionArgInfo::DISPATCH_PTR, "amdgpu-no-dispatch-ptr"},
2710     {AMDGPUFunctionArgInfo::QUEUE_PTR, "amdgpu-no-queue-ptr" },
2711     {AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, "amdgpu-no-implicitarg-ptr"},
2712     {AMDGPUFunctionArgInfo::DISPATCH_ID, "amdgpu-no-dispatch-id"},
2713     {AMDGPUFunctionArgInfo::WORKGROUP_ID_X, "amdgpu-no-workgroup-id-x"},
2714     {AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,"amdgpu-no-workgroup-id-y"},
2715     {AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,"amdgpu-no-workgroup-id-z"}
2716   };
2717 
2718   for (auto Attr : ImplicitAttrs) {
2719     const ArgDescriptor *OutgoingArg;
2720     const TargetRegisterClass *ArgRC;
2721     LLT ArgTy;
2722 
2723     AMDGPUFunctionArgInfo::PreloadedValue InputID = Attr.first;
2724 
2725     // If the callee does not use the attribute value, skip copying the value.
2726     if (CLI.CB->hasFnAttr(Attr.second))
2727       continue;
2728 
2729     std::tie(OutgoingArg, ArgRC, ArgTy) =
2730         CalleeArgInfo->getPreloadedValue(InputID);
2731     if (!OutgoingArg)
2732       continue;
2733 
2734     const ArgDescriptor *IncomingArg;
2735     const TargetRegisterClass *IncomingArgRC;
2736     LLT Ty;
2737     std::tie(IncomingArg, IncomingArgRC, Ty) =
2738         CallerArgInfo.getPreloadedValue(InputID);
2739     assert(IncomingArgRC == ArgRC);
2740 
2741     // All special arguments are ints for now.
2742     EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
2743     SDValue InputReg;
2744 
2745     if (IncomingArg) {
2746       InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2747     } else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) {
2748       // The implicit arg ptr is special because it doesn't have a corresponding
2749       // input for kernels, and is computed from the kernarg segment pointer.
2750       InputReg = getImplicitArgPtr(DAG, DL);
2751     } else {
2752       // We may have proven the input wasn't needed, although the ABI is
2753       // requiring it. We just need to allocate the register appropriately.
2754       InputReg = DAG.getUNDEF(ArgVT);
2755     }
2756 
2757     if (OutgoingArg->isRegister()) {
2758       RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2759       if (!CCInfo.AllocateReg(OutgoingArg->getRegister()))
2760         report_fatal_error("failed to allocate implicit input argument");
2761     } else {
2762       unsigned SpecialArgOffset =
2763           CCInfo.AllocateStack(ArgVT.getStoreSize(), Align(4));
2764       SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2765                                               SpecialArgOffset);
2766       MemOpChains.push_back(ArgStore);
2767     }
2768   }
2769 
2770   // Pack workitem IDs into a single register or pass it as is if already
2771   // packed.
2772   const ArgDescriptor *OutgoingArg;
2773   const TargetRegisterClass *ArgRC;
2774   LLT Ty;
2775 
2776   std::tie(OutgoingArg, ArgRC, Ty) =
2777       CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
2778   if (!OutgoingArg)
2779     std::tie(OutgoingArg, ArgRC, Ty) =
2780         CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
2781   if (!OutgoingArg)
2782     std::tie(OutgoingArg, ArgRC, Ty) =
2783         CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
2784   if (!OutgoingArg)
2785     return;
2786 
2787   const ArgDescriptor *IncomingArgX = std::get<0>(
2788       CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X));
2789   const ArgDescriptor *IncomingArgY = std::get<0>(
2790       CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y));
2791   const ArgDescriptor *IncomingArgZ = std::get<0>(
2792       CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z));
2793 
2794   SDValue InputReg;
2795   SDLoc SL;
2796 
2797   const bool NeedWorkItemIDX = !CLI.CB->hasFnAttr("amdgpu-no-workitem-id-x");
2798   const bool NeedWorkItemIDY = !CLI.CB->hasFnAttr("amdgpu-no-workitem-id-y");
2799   const bool NeedWorkItemIDZ = !CLI.CB->hasFnAttr("amdgpu-no-workitem-id-z");
2800 
2801   // If incoming ids are not packed we need to pack them.
2802   if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX &&
2803       NeedWorkItemIDX) {
2804     if (Subtarget->getMaxWorkitemID(F, 0) != 0) {
2805       InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX);
2806     } else {
2807       InputReg = DAG.getConstant(0, DL, MVT::i32);
2808     }
2809   }
2810 
2811   if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY &&
2812       NeedWorkItemIDY && Subtarget->getMaxWorkitemID(F, 1) != 0) {
2813     SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY);
2814     Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y,
2815                     DAG.getShiftAmountConstant(10, MVT::i32, SL));
2816     InputReg = InputReg.getNode() ?
2817                  DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y;
2818   }
2819 
2820   if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ &&
2821       NeedWorkItemIDZ && Subtarget->getMaxWorkitemID(F, 2) != 0) {
2822     SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ);
2823     Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z,
2824                     DAG.getShiftAmountConstant(20, MVT::i32, SL));
2825     InputReg = InputReg.getNode() ?
2826                  DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z;
2827   }
2828 
2829   if (!InputReg && (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) {
2830     if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) {
2831       // We're in a situation where the outgoing function requires the workitem
2832       // ID, but the calling function does not have it (e.g a graphics function
2833       // calling a C calling convention function). This is illegal, but we need
2834       // to produce something.
2835       InputReg = DAG.getUNDEF(MVT::i32);
2836     } else {
2837       // Workitem ids are already packed, any of present incoming arguments
2838       // will carry all required fields.
2839       ArgDescriptor IncomingArg = ArgDescriptor::createArg(
2840         IncomingArgX ? *IncomingArgX :
2841         IncomingArgY ? *IncomingArgY :
2842         *IncomingArgZ, ~0u);
2843       InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg);
2844     }
2845   }
2846 
2847   if (OutgoingArg->isRegister()) {
2848     if (InputReg)
2849       RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2850 
2851     CCInfo.AllocateReg(OutgoingArg->getRegister());
2852   } else {
2853     unsigned SpecialArgOffset = CCInfo.AllocateStack(4, Align(4));
2854     if (InputReg) {
2855       SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2856                                               SpecialArgOffset);
2857       MemOpChains.push_back(ArgStore);
2858     }
2859   }
2860 }
2861 
2862 static bool canGuaranteeTCO(CallingConv::ID CC) {
2863   return CC == CallingConv::Fast;
2864 }
2865 
2866 /// Return true if we might ever do TCO for calls with this calling convention.
2867 static bool mayTailCallThisCC(CallingConv::ID CC) {
2868   switch (CC) {
2869   case CallingConv::C:
2870   case CallingConv::AMDGPU_Gfx:
2871     return true;
2872   default:
2873     return canGuaranteeTCO(CC);
2874   }
2875 }
2876 
2877 bool SITargetLowering::isEligibleForTailCallOptimization(
2878     SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2879     const SmallVectorImpl<ISD::OutputArg> &Outs,
2880     const SmallVectorImpl<SDValue> &OutVals,
2881     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2882   if (!mayTailCallThisCC(CalleeCC))
2883     return false;
2884 
2885   // For a divergent call target, we need to do a waterfall loop over the
2886   // possible callees which precludes us from using a simple jump.
2887   if (Callee->isDivergent())
2888     return false;
2889 
2890   MachineFunction &MF = DAG.getMachineFunction();
2891   const Function &CallerF = MF.getFunction();
2892   CallingConv::ID CallerCC = CallerF.getCallingConv();
2893   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2894   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2895 
2896   // Kernels aren't callable, and don't have a live in return address so it
2897   // doesn't make sense to do a tail call with entry functions.
2898   if (!CallerPreserved)
2899     return false;
2900 
2901   bool CCMatch = CallerCC == CalleeCC;
2902 
2903   if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2904     if (canGuaranteeTCO(CalleeCC) && CCMatch)
2905       return true;
2906     return false;
2907   }
2908 
2909   // TODO: Can we handle var args?
2910   if (IsVarArg)
2911     return false;
2912 
2913   for (const Argument &Arg : CallerF.args()) {
2914     if (Arg.hasByValAttr())
2915       return false;
2916   }
2917 
2918   LLVMContext &Ctx = *DAG.getContext();
2919 
2920   // Check that the call results are passed in the same way.
2921   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2922                                   CCAssignFnForCall(CalleeCC, IsVarArg),
2923                                   CCAssignFnForCall(CallerCC, IsVarArg)))
2924     return false;
2925 
2926   // The callee has to preserve all registers the caller needs to preserve.
2927   if (!CCMatch) {
2928     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2929     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2930       return false;
2931   }
2932 
2933   // Nothing more to check if the callee is taking no arguments.
2934   if (Outs.empty())
2935     return true;
2936 
2937   SmallVector<CCValAssign, 16> ArgLocs;
2938   CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2939 
2940   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2941 
2942   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2943   // If the stack arguments for this call do not fit into our own save area then
2944   // the call cannot be made tail.
2945   // TODO: Is this really necessary?
2946   if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2947     return false;
2948 
2949   const MachineRegisterInfo &MRI = MF.getRegInfo();
2950   return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2951 }
2952 
2953 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2954   if (!CI->isTailCall())
2955     return false;
2956 
2957   const Function *ParentFn = CI->getParent()->getParent();
2958   if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2959     return false;
2960   return true;
2961 }
2962 
2963 // The wave scratch offset register is used as the global base pointer.
2964 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2965                                     SmallVectorImpl<SDValue> &InVals) const {
2966   SelectionDAG &DAG = CLI.DAG;
2967   const SDLoc &DL = CLI.DL;
2968   SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2969   SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2970   SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2971   SDValue Chain = CLI.Chain;
2972   SDValue Callee = CLI.Callee;
2973   bool &IsTailCall = CLI.IsTailCall;
2974   CallingConv::ID CallConv = CLI.CallConv;
2975   bool IsVarArg = CLI.IsVarArg;
2976   bool IsSibCall = false;
2977   bool IsThisReturn = false;
2978   MachineFunction &MF = DAG.getMachineFunction();
2979 
2980   if (Callee.isUndef() || isNullConstant(Callee)) {
2981     if (!CLI.IsTailCall) {
2982       for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
2983         InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
2984     }
2985 
2986     return Chain;
2987   }
2988 
2989   if (IsVarArg) {
2990     return lowerUnhandledCall(CLI, InVals,
2991                               "unsupported call to variadic function ");
2992   }
2993 
2994   if (!CLI.CB)
2995     report_fatal_error("unsupported libcall legalization");
2996 
2997   if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2998     return lowerUnhandledCall(CLI, InVals,
2999                               "unsupported required tail call to function ");
3000   }
3001 
3002   if (AMDGPU::isShader(CallConv)) {
3003     // Note the issue is with the CC of the called function, not of the call
3004     // itself.
3005     return lowerUnhandledCall(CLI, InVals,
3006                               "unsupported call to a shader function ");
3007   }
3008 
3009   if (AMDGPU::isShader(MF.getFunction().getCallingConv()) &&
3010       CallConv != CallingConv::AMDGPU_Gfx) {
3011     // Only allow calls with specific calling conventions.
3012     return lowerUnhandledCall(CLI, InVals,
3013                               "unsupported calling convention for call from "
3014                               "graphics shader of function ");
3015   }
3016 
3017   if (IsTailCall) {
3018     IsTailCall = isEligibleForTailCallOptimization(
3019       Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
3020     if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall()) {
3021       report_fatal_error("failed to perform tail call elimination on a call "
3022                          "site marked musttail");
3023     }
3024 
3025     bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
3026 
3027     // A sibling call is one where we're under the usual C ABI and not planning
3028     // to change that but can still do a tail call:
3029     if (!TailCallOpt && IsTailCall)
3030       IsSibCall = true;
3031 
3032     if (IsTailCall)
3033       ++NumTailCalls;
3034   }
3035 
3036   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
3037   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
3038   SmallVector<SDValue, 8> MemOpChains;
3039 
3040   // Analyze operands of the call, assigning locations to each operand.
3041   SmallVector<CCValAssign, 16> ArgLocs;
3042   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
3043   CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
3044 
3045   if (CallConv != CallingConv::AMDGPU_Gfx) {
3046     // With a fixed ABI, allocate fixed registers before user arguments.
3047     passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
3048   }
3049 
3050   CCInfo.AnalyzeCallOperands(Outs, AssignFn);
3051 
3052   // Get a count of how many bytes are to be pushed on the stack.
3053   unsigned NumBytes = CCInfo.getNextStackOffset();
3054 
3055   if (IsSibCall) {
3056     // Since we're not changing the ABI to make this a tail call, the memory
3057     // operands are already available in the caller's incoming argument space.
3058     NumBytes = 0;
3059   }
3060 
3061   // FPDiff is the byte offset of the call's argument area from the callee's.
3062   // Stores to callee stack arguments will be placed in FixedStackSlots offset
3063   // by this amount for a tail call. In a sibling call it must be 0 because the
3064   // caller will deallocate the entire stack and the callee still expects its
3065   // arguments to begin at SP+0. Completely unused for non-tail calls.
3066   int32_t FPDiff = 0;
3067   MachineFrameInfo &MFI = MF.getFrameInfo();
3068 
3069   // Adjust the stack pointer for the new arguments...
3070   // These operations are automatically eliminated by the prolog/epilog pass
3071   if (!IsSibCall) {
3072     Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
3073 
3074     if (!Subtarget->enableFlatScratch()) {
3075       SmallVector<SDValue, 4> CopyFromChains;
3076 
3077       // In the HSA case, this should be an identity copy.
3078       SDValue ScratchRSrcReg
3079         = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
3080       RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
3081       CopyFromChains.push_back(ScratchRSrcReg.getValue(1));
3082       Chain = DAG.getTokenFactor(DL, CopyFromChains);
3083     }
3084   }
3085 
3086   MVT PtrVT = MVT::i32;
3087 
3088   // Walk the register/memloc assignments, inserting copies/loads.
3089   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3090     CCValAssign &VA = ArgLocs[i];
3091     SDValue Arg = OutVals[i];
3092 
3093     // Promote the value if needed.
3094     switch (VA.getLocInfo()) {
3095     case CCValAssign::Full:
3096       break;
3097     case CCValAssign::BCvt:
3098       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
3099       break;
3100     case CCValAssign::ZExt:
3101       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
3102       break;
3103     case CCValAssign::SExt:
3104       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
3105       break;
3106     case CCValAssign::AExt:
3107       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
3108       break;
3109     case CCValAssign::FPExt:
3110       Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
3111       break;
3112     default:
3113       llvm_unreachable("Unknown loc info!");
3114     }
3115 
3116     if (VA.isRegLoc()) {
3117       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3118     } else {
3119       assert(VA.isMemLoc());
3120 
3121       SDValue DstAddr;
3122       MachinePointerInfo DstInfo;
3123 
3124       unsigned LocMemOffset = VA.getLocMemOffset();
3125       int32_t Offset = LocMemOffset;
3126 
3127       SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT);
3128       MaybeAlign Alignment;
3129 
3130       if (IsTailCall) {
3131         ISD::ArgFlagsTy Flags = Outs[i].Flags;
3132         unsigned OpSize = Flags.isByVal() ?
3133           Flags.getByValSize() : VA.getValVT().getStoreSize();
3134 
3135         // FIXME: We can have better than the minimum byval required alignment.
3136         Alignment =
3137             Flags.isByVal()
3138                 ? Flags.getNonZeroByValAlign()
3139                 : commonAlignment(Subtarget->getStackAlignment(), Offset);
3140 
3141         Offset = Offset + FPDiff;
3142         int FI = MFI.CreateFixedObject(OpSize, Offset, true);
3143 
3144         DstAddr = DAG.getFrameIndex(FI, PtrVT);
3145         DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
3146 
3147         // Make sure any stack arguments overlapping with where we're storing
3148         // are loaded before this eventual operation. Otherwise they'll be
3149         // clobbered.
3150 
3151         // FIXME: Why is this really necessary? This seems to just result in a
3152         // lot of code to copy the stack and write them back to the same
3153         // locations, which are supposed to be immutable?
3154         Chain = addTokenForArgument(Chain, DAG, MFI, FI);
3155       } else {
3156         // Stores to the argument stack area are relative to the stack pointer.
3157         SDValue SP = DAG.getCopyFromReg(Chain, DL, Info->getStackPtrOffsetReg(),
3158                                         MVT::i32);
3159         DstAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, SP, PtrOff);
3160         DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
3161         Alignment =
3162             commonAlignment(Subtarget->getStackAlignment(), LocMemOffset);
3163       }
3164 
3165       if (Outs[i].Flags.isByVal()) {
3166         SDValue SizeNode =
3167             DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
3168         SDValue Cpy =
3169             DAG.getMemcpy(Chain, DL, DstAddr, Arg, SizeNode,
3170                           Outs[i].Flags.getNonZeroByValAlign(),
3171                           /*isVol = */ false, /*AlwaysInline = */ true,
3172                           /*isTailCall = */ false, DstInfo,
3173                           MachinePointerInfo(AMDGPUAS::PRIVATE_ADDRESS));
3174 
3175         MemOpChains.push_back(Cpy);
3176       } else {
3177         SDValue Store =
3178             DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Alignment);
3179         MemOpChains.push_back(Store);
3180       }
3181     }
3182   }
3183 
3184   if (!MemOpChains.empty())
3185     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
3186 
3187   // Build a sequence of copy-to-reg nodes chained together with token chain
3188   // and flag operands which copy the outgoing args into the appropriate regs.
3189   SDValue InFlag;
3190   for (auto &RegToPass : RegsToPass) {
3191     Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
3192                              RegToPass.second, InFlag);
3193     InFlag = Chain.getValue(1);
3194   }
3195 
3196 
3197   // We don't usually want to end the call-sequence here because we would tidy
3198   // the frame up *after* the call, however in the ABI-changing tail-call case
3199   // we've carefully laid out the parameters so that when sp is reset they'll be
3200   // in the correct location.
3201   if (IsTailCall && !IsSibCall) {
3202     Chain = DAG.getCALLSEQ_END(Chain,
3203                                DAG.getTargetConstant(NumBytes, DL, MVT::i32),
3204                                DAG.getTargetConstant(0, DL, MVT::i32),
3205                                InFlag, DL);
3206     InFlag = Chain.getValue(1);
3207   }
3208 
3209   std::vector<SDValue> Ops;
3210   Ops.push_back(Chain);
3211   Ops.push_back(Callee);
3212   // Add a redundant copy of the callee global which will not be legalized, as
3213   // we need direct access to the callee later.
3214   if (GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(Callee)) {
3215     const GlobalValue *GV = GSD->getGlobal();
3216     Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
3217   } else {
3218     Ops.push_back(DAG.getTargetConstant(0, DL, MVT::i64));
3219   }
3220 
3221   if (IsTailCall) {
3222     // Each tail call may have to adjust the stack by a different amount, so
3223     // this information must travel along with the operation for eventual
3224     // consumption by emitEpilogue.
3225     Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
3226   }
3227 
3228   // Add argument registers to the end of the list so that they are known live
3229   // into the call.
3230   for (auto &RegToPass : RegsToPass) {
3231     Ops.push_back(DAG.getRegister(RegToPass.first,
3232                                   RegToPass.second.getValueType()));
3233   }
3234 
3235   // Add a register mask operand representing the call-preserved registers.
3236 
3237   auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
3238   const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
3239   assert(Mask && "Missing call preserved mask for calling convention");
3240   Ops.push_back(DAG.getRegisterMask(Mask));
3241 
3242   if (InFlag.getNode())
3243     Ops.push_back(InFlag);
3244 
3245   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3246 
3247   // If we're doing a tall call, use a TC_RETURN here rather than an
3248   // actual call instruction.
3249   if (IsTailCall) {
3250     MFI.setHasTailCall();
3251     return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
3252   }
3253 
3254   // Returns a chain and a flag for retval copy to use.
3255   SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
3256   Chain = Call.getValue(0);
3257   InFlag = Call.getValue(1);
3258 
3259   uint64_t CalleePopBytes = NumBytes;
3260   Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
3261                              DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
3262                              InFlag, DL);
3263   if (!Ins.empty())
3264     InFlag = Chain.getValue(1);
3265 
3266   // Handle result values, copying them out of physregs into vregs that we
3267   // return.
3268   return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
3269                          InVals, IsThisReturn,
3270                          IsThisReturn ? OutVals[0] : SDValue());
3271 }
3272 
3273 // This is identical to the default implementation in ExpandDYNAMIC_STACKALLOC,
3274 // except for applying the wave size scale to the increment amount.
3275 SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(
3276     SDValue Op, SelectionDAG &DAG) const {
3277   const MachineFunction &MF = DAG.getMachineFunction();
3278   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
3279 
3280   SDLoc dl(Op);
3281   EVT VT = Op.getValueType();
3282   SDValue Tmp1 = Op;
3283   SDValue Tmp2 = Op.getValue(1);
3284   SDValue Tmp3 = Op.getOperand(2);
3285   SDValue Chain = Tmp1.getOperand(0);
3286 
3287   Register SPReg = Info->getStackPtrOffsetReg();
3288 
3289   // Chain the dynamic stack allocation so that it doesn't modify the stack
3290   // pointer when other instructions are using the stack.
3291   Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
3292 
3293   SDValue Size  = Tmp2.getOperand(1);
3294   SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
3295   Chain = SP.getValue(1);
3296   MaybeAlign Alignment = cast<ConstantSDNode>(Tmp3)->getMaybeAlignValue();
3297   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
3298   const TargetFrameLowering *TFL = ST.getFrameLowering();
3299   unsigned Opc =
3300     TFL->getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp ?
3301     ISD::ADD : ISD::SUB;
3302 
3303   SDValue ScaledSize = DAG.getNode(
3304       ISD::SHL, dl, VT, Size,
3305       DAG.getConstant(ST.getWavefrontSizeLog2(), dl, MVT::i32));
3306 
3307   Align StackAlign = TFL->getStackAlign();
3308   Tmp1 = DAG.getNode(Opc, dl, VT, SP, ScaledSize); // Value
3309   if (Alignment && *Alignment > StackAlign) {
3310     Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
3311                        DAG.getConstant(-(uint64_t)Alignment->value()
3312                                            << ST.getWavefrontSizeLog2(),
3313                                        dl, VT));
3314   }
3315 
3316   Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1);    // Output chain
3317   Tmp2 = DAG.getCALLSEQ_END(
3318       Chain, DAG.getIntPtrConstant(0, dl, true),
3319       DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
3320 
3321   return DAG.getMergeValues({Tmp1, Tmp2}, dl);
3322 }
3323 
3324 SDValue SITargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
3325                                                   SelectionDAG &DAG) const {
3326   // We only handle constant sizes here to allow non-entry block, static sized
3327   // allocas. A truly dynamic value is more difficult to support because we
3328   // don't know if the size value is uniform or not. If the size isn't uniform,
3329   // we would need to do a wave reduction to get the maximum size to know how
3330   // much to increment the uniform stack pointer.
3331   SDValue Size = Op.getOperand(1);
3332   if (isa<ConstantSDNode>(Size))
3333       return lowerDYNAMIC_STACKALLOCImpl(Op, DAG); // Use "generic" expansion.
3334 
3335   return AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(Op, DAG);
3336 }
3337 
3338 Register SITargetLowering::getRegisterByName(const char* RegName, LLT VT,
3339                                              const MachineFunction &MF) const {
3340   Register Reg = StringSwitch<Register>(RegName)
3341     .Case("m0", AMDGPU::M0)
3342     .Case("exec", AMDGPU::EXEC)
3343     .Case("exec_lo", AMDGPU::EXEC_LO)
3344     .Case("exec_hi", AMDGPU::EXEC_HI)
3345     .Case("flat_scratch", AMDGPU::FLAT_SCR)
3346     .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
3347     .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
3348     .Default(Register());
3349 
3350   if (Reg == AMDGPU::NoRegister) {
3351     report_fatal_error(Twine("invalid register name \""
3352                              + StringRef(RegName)  + "\"."));
3353 
3354   }
3355 
3356   if (!Subtarget->hasFlatScrRegister() &&
3357        Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
3358     report_fatal_error(Twine("invalid register \""
3359                              + StringRef(RegName)  + "\" for subtarget."));
3360   }
3361 
3362   switch (Reg) {
3363   case AMDGPU::M0:
3364   case AMDGPU::EXEC_LO:
3365   case AMDGPU::EXEC_HI:
3366   case AMDGPU::FLAT_SCR_LO:
3367   case AMDGPU::FLAT_SCR_HI:
3368     if (VT.getSizeInBits() == 32)
3369       return Reg;
3370     break;
3371   case AMDGPU::EXEC:
3372   case AMDGPU::FLAT_SCR:
3373     if (VT.getSizeInBits() == 64)
3374       return Reg;
3375     break;
3376   default:
3377     llvm_unreachable("missing register type checking");
3378   }
3379 
3380   report_fatal_error(Twine("invalid type for register \""
3381                            + StringRef(RegName) + "\"."));
3382 }
3383 
3384 // If kill is not the last instruction, split the block so kill is always a
3385 // proper terminator.
3386 MachineBasicBlock *
3387 SITargetLowering::splitKillBlock(MachineInstr &MI,
3388                                  MachineBasicBlock *BB) const {
3389   MachineBasicBlock *SplitBB = BB->splitAt(MI, false /*UpdateLiveIns*/);
3390   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3391   MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
3392   return SplitBB;
3393 }
3394 
3395 // Split block \p MBB at \p MI, as to insert a loop. If \p InstInLoop is true,
3396 // \p MI will be the only instruction in the loop body block. Otherwise, it will
3397 // be the first instruction in the remainder block.
3398 //
3399 /// \returns { LoopBody, Remainder }
3400 static std::pair<MachineBasicBlock *, MachineBasicBlock *>
3401 splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) {
3402   MachineFunction *MF = MBB.getParent();
3403   MachineBasicBlock::iterator I(&MI);
3404 
3405   // To insert the loop we need to split the block. Move everything after this
3406   // point to a new block, and insert a new empty block between the two.
3407   MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
3408   MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
3409   MachineFunction::iterator MBBI(MBB);
3410   ++MBBI;
3411 
3412   MF->insert(MBBI, LoopBB);
3413   MF->insert(MBBI, RemainderBB);
3414 
3415   LoopBB->addSuccessor(LoopBB);
3416   LoopBB->addSuccessor(RemainderBB);
3417 
3418   // Move the rest of the block into a new block.
3419   RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
3420 
3421   if (InstInLoop) {
3422     auto Next = std::next(I);
3423 
3424     // Move instruction to loop body.
3425     LoopBB->splice(LoopBB->begin(), &MBB, I, Next);
3426 
3427     // Move the rest of the block.
3428     RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end());
3429   } else {
3430     RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
3431   }
3432 
3433   MBB.addSuccessor(LoopBB);
3434 
3435   return std::make_pair(LoopBB, RemainderBB);
3436 }
3437 
3438 /// Insert \p MI into a BUNDLE with an S_WAITCNT 0 immediately following it.
3439 void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const {
3440   MachineBasicBlock *MBB = MI.getParent();
3441   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3442   auto I = MI.getIterator();
3443   auto E = std::next(I);
3444 
3445   BuildMI(*MBB, E, MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT))
3446     .addImm(0);
3447 
3448   MIBundleBuilder Bundler(*MBB, I, E);
3449   finalizeBundle(*MBB, Bundler.begin());
3450 }
3451 
3452 MachineBasicBlock *
3453 SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI,
3454                                          MachineBasicBlock *BB) const {
3455   const DebugLoc &DL = MI.getDebugLoc();
3456 
3457   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3458 
3459   MachineBasicBlock *LoopBB;
3460   MachineBasicBlock *RemainderBB;
3461   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3462 
3463   // Apparently kill flags are only valid if the def is in the same block?
3464   if (MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0))
3465     Src->setIsKill(false);
3466 
3467   std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true);
3468 
3469   MachineBasicBlock::iterator I = LoopBB->end();
3470 
3471   const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg(
3472     AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1);
3473 
3474   // Clear TRAP_STS.MEM_VIOL
3475   BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32))
3476     .addImm(0)
3477     .addImm(EncodedReg);
3478 
3479   bundleInstWithWaitcnt(MI);
3480 
3481   Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3482 
3483   // Load and check TRAP_STS.MEM_VIOL
3484   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg)
3485     .addImm(EncodedReg);
3486 
3487   // FIXME: Do we need to use an isel pseudo that may clobber scc?
3488   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32))
3489     .addReg(Reg, RegState::Kill)
3490     .addImm(0);
3491   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3492     .addMBB(LoopBB);
3493 
3494   return RemainderBB;
3495 }
3496 
3497 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
3498 // wavefront. If the value is uniform and just happens to be in a VGPR, this
3499 // will only do one iteration. In the worst case, this will loop 64 times.
3500 //
3501 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
3502 static MachineBasicBlock::iterator
3503 emitLoadM0FromVGPRLoop(const SIInstrInfo *TII, MachineRegisterInfo &MRI,
3504                        MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB,
3505                        const DebugLoc &DL, const MachineOperand &Idx,
3506                        unsigned InitReg, unsigned ResultReg, unsigned PhiReg,
3507                        unsigned InitSaveExecReg, int Offset, bool UseGPRIdxMode,
3508                        Register &SGPRIdxReg) {
3509 
3510   MachineFunction *MF = OrigBB.getParent();
3511   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3512   const SIRegisterInfo *TRI = ST.getRegisterInfo();
3513   MachineBasicBlock::iterator I = LoopBB.begin();
3514 
3515   const TargetRegisterClass *BoolRC = TRI->getBoolRC();
3516   Register PhiExec = MRI.createVirtualRegister(BoolRC);
3517   Register NewExec = MRI.createVirtualRegister(BoolRC);
3518   Register CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3519   Register CondReg = MRI.createVirtualRegister(BoolRC);
3520 
3521   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
3522     .addReg(InitReg)
3523     .addMBB(&OrigBB)
3524     .addReg(ResultReg)
3525     .addMBB(&LoopBB);
3526 
3527   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
3528     .addReg(InitSaveExecReg)
3529     .addMBB(&OrigBB)
3530     .addReg(NewExec)
3531     .addMBB(&LoopBB);
3532 
3533   // Read the next variant <- also loop target.
3534   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
3535       .addReg(Idx.getReg(), getUndefRegState(Idx.isUndef()));
3536 
3537   // Compare the just read M0 value to all possible Idx values.
3538   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
3539       .addReg(CurrentIdxReg)
3540       .addReg(Idx.getReg(), 0, Idx.getSubReg());
3541 
3542   // Update EXEC, save the original EXEC value to VCC.
3543   BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32
3544                                                 : AMDGPU::S_AND_SAVEEXEC_B64),
3545           NewExec)
3546     .addReg(CondReg, RegState::Kill);
3547 
3548   MRI.setSimpleHint(NewExec, CondReg);
3549 
3550   if (UseGPRIdxMode) {
3551     if (Offset == 0) {
3552       SGPRIdxReg = CurrentIdxReg;
3553     } else {
3554       SGPRIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3555       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), SGPRIdxReg)
3556           .addReg(CurrentIdxReg, RegState::Kill)
3557           .addImm(Offset);
3558     }
3559   } else {
3560     // Move index from VCC into M0
3561     if (Offset == 0) {
3562       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3563         .addReg(CurrentIdxReg, RegState::Kill);
3564     } else {
3565       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3566         .addReg(CurrentIdxReg, RegState::Kill)
3567         .addImm(Offset);
3568     }
3569   }
3570 
3571   // Update EXEC, switch all done bits to 0 and all todo bits to 1.
3572   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3573   MachineInstr *InsertPt =
3574     BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term
3575                                                   : AMDGPU::S_XOR_B64_term), Exec)
3576       .addReg(Exec)
3577       .addReg(NewExec);
3578 
3579   // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
3580   // s_cbranch_scc0?
3581 
3582   // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
3583   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
3584     .addMBB(&LoopBB);
3585 
3586   return InsertPt->getIterator();
3587 }
3588 
3589 // This has slightly sub-optimal regalloc when the source vector is killed by
3590 // the read. The register allocator does not understand that the kill is
3591 // per-workitem, so is kept alive for the whole loop so we end up not re-using a
3592 // subregister from it, using 1 more VGPR than necessary. This was saved when
3593 // this was expanded after register allocation.
3594 static MachineBasicBlock::iterator
3595 loadM0FromVGPR(const SIInstrInfo *TII, MachineBasicBlock &MBB, MachineInstr &MI,
3596                unsigned InitResultReg, unsigned PhiReg, int Offset,
3597                bool UseGPRIdxMode, Register &SGPRIdxReg) {
3598   MachineFunction *MF = MBB.getParent();
3599   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3600   const SIRegisterInfo *TRI = ST.getRegisterInfo();
3601   MachineRegisterInfo &MRI = MF->getRegInfo();
3602   const DebugLoc &DL = MI.getDebugLoc();
3603   MachineBasicBlock::iterator I(&MI);
3604 
3605   const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
3606   Register DstReg = MI.getOperand(0).getReg();
3607   Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
3608   Register TmpExec = MRI.createVirtualRegister(BoolXExecRC);
3609   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3610   unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
3611 
3612   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
3613 
3614   // Save the EXEC mask
3615   BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec)
3616     .addReg(Exec);
3617 
3618   MachineBasicBlock *LoopBB;
3619   MachineBasicBlock *RemainderBB;
3620   std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false);
3621 
3622   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3623 
3624   auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
3625                                       InitResultReg, DstReg, PhiReg, TmpExec,
3626                                       Offset, UseGPRIdxMode, SGPRIdxReg);
3627 
3628   MachineBasicBlock* LandingPad = MF->CreateMachineBasicBlock();
3629   MachineFunction::iterator MBBI(LoopBB);
3630   ++MBBI;
3631   MF->insert(MBBI, LandingPad);
3632   LoopBB->removeSuccessor(RemainderBB);
3633   LandingPad->addSuccessor(RemainderBB);
3634   LoopBB->addSuccessor(LandingPad);
3635   MachineBasicBlock::iterator First = LandingPad->begin();
3636   BuildMI(*LandingPad, First, DL, TII->get(MovExecOpc), Exec)
3637     .addReg(SaveExec);
3638 
3639   return InsPt;
3640 }
3641 
3642 // Returns subreg index, offset
3643 static std::pair<unsigned, int>
3644 computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
3645                             const TargetRegisterClass *SuperRC,
3646                             unsigned VecReg,
3647                             int Offset) {
3648   int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
3649 
3650   // Skip out of bounds offsets, or else we would end up using an undefined
3651   // register.
3652   if (Offset >= NumElts || Offset < 0)
3653     return std::make_pair(AMDGPU::sub0, Offset);
3654 
3655   return std::make_pair(SIRegisterInfo::getSubRegFromChannel(Offset), 0);
3656 }
3657 
3658 static void setM0ToIndexFromSGPR(const SIInstrInfo *TII,
3659                                  MachineRegisterInfo &MRI, MachineInstr &MI,
3660                                  int Offset) {
3661   MachineBasicBlock *MBB = MI.getParent();
3662   const DebugLoc &DL = MI.getDebugLoc();
3663   MachineBasicBlock::iterator I(&MI);
3664 
3665   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3666 
3667   assert(Idx->getReg() != AMDGPU::NoRegister);
3668 
3669   if (Offset == 0) {
3670     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0).add(*Idx);
3671   } else {
3672     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3673         .add(*Idx)
3674         .addImm(Offset);
3675   }
3676 }
3677 
3678 static Register getIndirectSGPRIdx(const SIInstrInfo *TII,
3679                                    MachineRegisterInfo &MRI, MachineInstr &MI,
3680                                    int Offset) {
3681   MachineBasicBlock *MBB = MI.getParent();
3682   const DebugLoc &DL = MI.getDebugLoc();
3683   MachineBasicBlock::iterator I(&MI);
3684 
3685   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3686 
3687   if (Offset == 0)
3688     return Idx->getReg();
3689 
3690   Register Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3691   BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
3692       .add(*Idx)
3693       .addImm(Offset);
3694   return Tmp;
3695 }
3696 
3697 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
3698                                           MachineBasicBlock &MBB,
3699                                           const GCNSubtarget &ST) {
3700   const SIInstrInfo *TII = ST.getInstrInfo();
3701   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3702   MachineFunction *MF = MBB.getParent();
3703   MachineRegisterInfo &MRI = MF->getRegInfo();
3704 
3705   Register Dst = MI.getOperand(0).getReg();
3706   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3707   Register SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
3708   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3709 
3710   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
3711   const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
3712 
3713   unsigned SubReg;
3714   std::tie(SubReg, Offset)
3715     = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
3716 
3717   const bool UseGPRIdxMode = ST.useVGPRIndexMode();
3718 
3719   // Check for a SGPR index.
3720   if (TII->getRegisterInfo().isSGPRClass(IdxRC)) {
3721     MachineBasicBlock::iterator I(&MI);
3722     const DebugLoc &DL = MI.getDebugLoc();
3723 
3724     if (UseGPRIdxMode) {
3725       // TODO: Look at the uses to avoid the copy. This may require rescheduling
3726       // to avoid interfering with other uses, so probably requires a new
3727       // optimization pass.
3728       Register Idx = getIndirectSGPRIdx(TII, MRI, MI, Offset);
3729 
3730       const MCInstrDesc &GPRIDXDesc =
3731           TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), true);
3732       BuildMI(MBB, I, DL, GPRIDXDesc, Dst)
3733           .addReg(SrcReg)
3734           .addReg(Idx)
3735           .addImm(SubReg);
3736     } else {
3737       setM0ToIndexFromSGPR(TII, MRI, MI, Offset);
3738 
3739       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3740         .addReg(SrcReg, 0, SubReg)
3741         .addReg(SrcReg, RegState::Implicit);
3742     }
3743 
3744     MI.eraseFromParent();
3745 
3746     return &MBB;
3747   }
3748 
3749   // Control flow needs to be inserted if indexing with a VGPR.
3750   const DebugLoc &DL = MI.getDebugLoc();
3751   MachineBasicBlock::iterator I(&MI);
3752 
3753   Register PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3754   Register InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3755 
3756   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3757 
3758   Register SGPRIdxReg;
3759   auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset,
3760                               UseGPRIdxMode, SGPRIdxReg);
3761 
3762   MachineBasicBlock *LoopBB = InsPt->getParent();
3763 
3764   if (UseGPRIdxMode) {
3765     const MCInstrDesc &GPRIDXDesc =
3766         TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), true);
3767 
3768     BuildMI(*LoopBB, InsPt, DL, GPRIDXDesc, Dst)
3769         .addReg(SrcReg)
3770         .addReg(SGPRIdxReg)
3771         .addImm(SubReg);
3772   } else {
3773     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3774       .addReg(SrcReg, 0, SubReg)
3775       .addReg(SrcReg, RegState::Implicit);
3776   }
3777 
3778   MI.eraseFromParent();
3779 
3780   return LoopBB;
3781 }
3782 
3783 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3784                                           MachineBasicBlock &MBB,
3785                                           const GCNSubtarget &ST) {
3786   const SIInstrInfo *TII = ST.getInstrInfo();
3787   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3788   MachineFunction *MF = MBB.getParent();
3789   MachineRegisterInfo &MRI = MF->getRegInfo();
3790 
3791   Register Dst = MI.getOperand(0).getReg();
3792   const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3793   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3794   const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3795   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3796   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3797   const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
3798 
3799   // This can be an immediate, but will be folded later.
3800   assert(Val->getReg());
3801 
3802   unsigned SubReg;
3803   std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3804                                                          SrcVec->getReg(),
3805                                                          Offset);
3806   const bool UseGPRIdxMode = ST.useVGPRIndexMode();
3807 
3808   if (Idx->getReg() == AMDGPU::NoRegister) {
3809     MachineBasicBlock::iterator I(&MI);
3810     const DebugLoc &DL = MI.getDebugLoc();
3811 
3812     assert(Offset == 0);
3813 
3814     BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
3815         .add(*SrcVec)
3816         .add(*Val)
3817         .addImm(SubReg);
3818 
3819     MI.eraseFromParent();
3820     return &MBB;
3821   }
3822 
3823   // Check for a SGPR index.
3824   if (TII->getRegisterInfo().isSGPRClass(IdxRC)) {
3825     MachineBasicBlock::iterator I(&MI);
3826     const DebugLoc &DL = MI.getDebugLoc();
3827 
3828     if (UseGPRIdxMode) {
3829       Register Idx = getIndirectSGPRIdx(TII, MRI, MI, Offset);
3830 
3831       const MCInstrDesc &GPRIDXDesc =
3832           TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
3833       BuildMI(MBB, I, DL, GPRIDXDesc, Dst)
3834           .addReg(SrcVec->getReg())
3835           .add(*Val)
3836           .addReg(Idx)
3837           .addImm(SubReg);
3838     } else {
3839       setM0ToIndexFromSGPR(TII, MRI, MI, Offset);
3840 
3841       const MCInstrDesc &MovRelDesc = TII->getIndirectRegWriteMovRelPseudo(
3842           TRI.getRegSizeInBits(*VecRC), 32, false);
3843       BuildMI(MBB, I, DL, MovRelDesc, Dst)
3844           .addReg(SrcVec->getReg())
3845           .add(*Val)
3846           .addImm(SubReg);
3847     }
3848     MI.eraseFromParent();
3849     return &MBB;
3850   }
3851 
3852   // Control flow needs to be inserted if indexing with a VGPR.
3853   if (Val->isReg())
3854     MRI.clearKillFlags(Val->getReg());
3855 
3856   const DebugLoc &DL = MI.getDebugLoc();
3857 
3858   Register PhiReg = MRI.createVirtualRegister(VecRC);
3859 
3860   Register SGPRIdxReg;
3861   auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, Offset,
3862                               UseGPRIdxMode, SGPRIdxReg);
3863   MachineBasicBlock *LoopBB = InsPt->getParent();
3864 
3865   if (UseGPRIdxMode) {
3866     const MCInstrDesc &GPRIDXDesc =
3867         TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
3868 
3869     BuildMI(*LoopBB, InsPt, DL, GPRIDXDesc, Dst)
3870         .addReg(PhiReg)
3871         .add(*Val)
3872         .addReg(SGPRIdxReg)
3873         .addImm(AMDGPU::sub0);
3874   } else {
3875     const MCInstrDesc &MovRelDesc = TII->getIndirectRegWriteMovRelPseudo(
3876         TRI.getRegSizeInBits(*VecRC), 32, false);
3877     BuildMI(*LoopBB, InsPt, DL, MovRelDesc, Dst)
3878         .addReg(PhiReg)
3879         .add(*Val)
3880         .addImm(AMDGPU::sub0);
3881   }
3882 
3883   MI.eraseFromParent();
3884   return LoopBB;
3885 }
3886 
3887 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3888   MachineInstr &MI, MachineBasicBlock *BB) const {
3889 
3890   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3891   MachineFunction *MF = BB->getParent();
3892   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3893 
3894   switch (MI.getOpcode()) {
3895   case AMDGPU::S_UADDO_PSEUDO:
3896   case AMDGPU::S_USUBO_PSEUDO: {
3897     const DebugLoc &DL = MI.getDebugLoc();
3898     MachineOperand &Dest0 = MI.getOperand(0);
3899     MachineOperand &Dest1 = MI.getOperand(1);
3900     MachineOperand &Src0 = MI.getOperand(2);
3901     MachineOperand &Src1 = MI.getOperand(3);
3902 
3903     unsigned Opc = (MI.getOpcode() == AMDGPU::S_UADDO_PSEUDO)
3904                        ? AMDGPU::S_ADD_I32
3905                        : AMDGPU::S_SUB_I32;
3906     BuildMI(*BB, MI, DL, TII->get(Opc), Dest0.getReg()).add(Src0).add(Src1);
3907 
3908     BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CSELECT_B64), Dest1.getReg())
3909         .addImm(1)
3910         .addImm(0);
3911 
3912     MI.eraseFromParent();
3913     return BB;
3914   }
3915   case AMDGPU::S_ADD_U64_PSEUDO:
3916   case AMDGPU::S_SUB_U64_PSEUDO: {
3917     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3918     const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3919     const SIRegisterInfo *TRI = ST.getRegisterInfo();
3920     const TargetRegisterClass *BoolRC = TRI->getBoolRC();
3921     const DebugLoc &DL = MI.getDebugLoc();
3922 
3923     MachineOperand &Dest = MI.getOperand(0);
3924     MachineOperand &Src0 = MI.getOperand(1);
3925     MachineOperand &Src1 = MI.getOperand(2);
3926 
3927     Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3928     Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3929 
3930     MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(
3931         MI, MRI, Src0, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass);
3932     MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(
3933         MI, MRI, Src0, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass);
3934 
3935     MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(
3936         MI, MRI, Src1, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass);
3937     MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(
3938         MI, MRI, Src1, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass);
3939 
3940     bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3941 
3942     unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3943     unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3944     BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0).add(Src0Sub0).add(Src1Sub0);
3945     BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1).add(Src0Sub1).add(Src1Sub1);
3946     BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3947         .addReg(DestSub0)
3948         .addImm(AMDGPU::sub0)
3949         .addReg(DestSub1)
3950         .addImm(AMDGPU::sub1);
3951     MI.eraseFromParent();
3952     return BB;
3953   }
3954   case AMDGPU::V_ADD_U64_PSEUDO:
3955   case AMDGPU::V_SUB_U64_PSEUDO: {
3956     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3957     const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3958     const SIRegisterInfo *TRI = ST.getRegisterInfo();
3959     const DebugLoc &DL = MI.getDebugLoc();
3960 
3961     bool IsAdd = (MI.getOpcode() == AMDGPU::V_ADD_U64_PSEUDO);
3962 
3963     MachineOperand &Dest = MI.getOperand(0);
3964     MachineOperand &Src0 = MI.getOperand(1);
3965     MachineOperand &Src1 = MI.getOperand(2);
3966 
3967     if (IsAdd && ST.hasLshlAddB64()) {
3968       auto Add = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_LSHL_ADD_U64_e64),
3969                          Dest.getReg())
3970                      .add(Src0)
3971                      .addImm(0)
3972                      .add(Src1);
3973       TII->legalizeOperands(*Add);
3974       MI.eraseFromParent();
3975       return BB;
3976     }
3977 
3978     const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
3979 
3980     Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3981     Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3982 
3983     Register CarryReg = MRI.createVirtualRegister(CarryRC);
3984     Register DeadCarryReg = MRI.createVirtualRegister(CarryRC);
3985 
3986     const TargetRegisterClass *Src0RC = Src0.isReg()
3987                                             ? MRI.getRegClass(Src0.getReg())
3988                                             : &AMDGPU::VReg_64RegClass;
3989     const TargetRegisterClass *Src1RC = Src1.isReg()
3990                                             ? MRI.getRegClass(Src1.getReg())
3991                                             : &AMDGPU::VReg_64RegClass;
3992 
3993     const TargetRegisterClass *Src0SubRC =
3994         TRI->getSubRegClass(Src0RC, AMDGPU::sub0);
3995     const TargetRegisterClass *Src1SubRC =
3996         TRI->getSubRegClass(Src1RC, AMDGPU::sub1);
3997 
3998     MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm(
3999         MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
4000     MachineOperand SrcReg1Sub0 = TII->buildExtractSubRegOrImm(
4001         MI, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC);
4002 
4003     MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm(
4004         MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
4005     MachineOperand SrcReg1Sub1 = TII->buildExtractSubRegOrImm(
4006         MI, MRI, Src1, Src1RC, AMDGPU::sub1, Src1SubRC);
4007 
4008     unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
4009     MachineInstr *LoHalf = BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
4010                                .addReg(CarryReg, RegState::Define)
4011                                .add(SrcReg0Sub0)
4012                                .add(SrcReg1Sub0)
4013                                .addImm(0); // clamp bit
4014 
4015     unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
4016     MachineInstr *HiHalf =
4017         BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
4018             .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
4019             .add(SrcReg0Sub1)
4020             .add(SrcReg1Sub1)
4021             .addReg(CarryReg, RegState::Kill)
4022             .addImm(0); // clamp bit
4023 
4024     BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
4025         .addReg(DestSub0)
4026         .addImm(AMDGPU::sub0)
4027         .addReg(DestSub1)
4028         .addImm(AMDGPU::sub1);
4029     TII->legalizeOperands(*LoHalf);
4030     TII->legalizeOperands(*HiHalf);
4031     MI.eraseFromParent();
4032     return BB;
4033   }
4034   case AMDGPU::S_ADD_CO_PSEUDO:
4035   case AMDGPU::S_SUB_CO_PSEUDO: {
4036     // This pseudo has a chance to be selected
4037     // only from uniform add/subcarry node. All the VGPR operands
4038     // therefore assumed to be splat vectors.
4039     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
4040     const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
4041     const SIRegisterInfo *TRI = ST.getRegisterInfo();
4042     MachineBasicBlock::iterator MII = MI;
4043     const DebugLoc &DL = MI.getDebugLoc();
4044     MachineOperand &Dest = MI.getOperand(0);
4045     MachineOperand &CarryDest = MI.getOperand(1);
4046     MachineOperand &Src0 = MI.getOperand(2);
4047     MachineOperand &Src1 = MI.getOperand(3);
4048     MachineOperand &Src2 = MI.getOperand(4);
4049     unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO)
4050                        ? AMDGPU::S_ADDC_U32
4051                        : AMDGPU::S_SUBB_U32;
4052     if (Src0.isReg() && TRI->isVectorRegister(MRI, Src0.getReg())) {
4053       Register RegOp0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4054       BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp0)
4055           .addReg(Src0.getReg());
4056       Src0.setReg(RegOp0);
4057     }
4058     if (Src1.isReg() && TRI->isVectorRegister(MRI, Src1.getReg())) {
4059       Register RegOp1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4060       BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp1)
4061           .addReg(Src1.getReg());
4062       Src1.setReg(RegOp1);
4063     }
4064     Register RegOp2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4065     if (TRI->isVectorRegister(MRI, Src2.getReg())) {
4066       BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp2)
4067           .addReg(Src2.getReg());
4068       Src2.setReg(RegOp2);
4069     }
4070 
4071     const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());
4072     unsigned WaveSize = TRI->getRegSizeInBits(*Src2RC);
4073     assert(WaveSize == 64 || WaveSize == 32);
4074 
4075     if (WaveSize == 64) {
4076       if (ST.hasScalarCompareEq64()) {
4077         BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64))
4078             .addReg(Src2.getReg())
4079             .addImm(0);
4080       } else {
4081         const TargetRegisterClass *SubRC =
4082             TRI->getSubRegClass(Src2RC, AMDGPU::sub0);
4083         MachineOperand Src2Sub0 = TII->buildExtractSubRegOrImm(
4084             MII, MRI, Src2, Src2RC, AMDGPU::sub0, SubRC);
4085         MachineOperand Src2Sub1 = TII->buildExtractSubRegOrImm(
4086             MII, MRI, Src2, Src2RC, AMDGPU::sub1, SubRC);
4087         Register Src2_32 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4088 
4089         BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_OR_B32), Src2_32)
4090             .add(Src2Sub0)
4091             .add(Src2Sub1);
4092 
4093         BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
4094             .addReg(Src2_32, RegState::Kill)
4095             .addImm(0);
4096       }
4097     } else {
4098       BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMPK_LG_U32))
4099           .addReg(Src2.getReg())
4100           .addImm(0);
4101     }
4102 
4103     BuildMI(*BB, MII, DL, TII->get(Opc), Dest.getReg()).add(Src0).add(Src1);
4104 
4105     unsigned SelOpc =
4106         (WaveSize == 64) ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
4107 
4108     BuildMI(*BB, MII, DL, TII->get(SelOpc), CarryDest.getReg())
4109         .addImm(-1)
4110         .addImm(0);
4111 
4112     MI.eraseFromParent();
4113     return BB;
4114   }
4115   case AMDGPU::SI_INIT_M0: {
4116     BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
4117             TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
4118         .add(MI.getOperand(0));
4119     MI.eraseFromParent();
4120     return BB;
4121   }
4122   case AMDGPU::GET_GROUPSTATICSIZE: {
4123     assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||
4124            getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL);
4125     DebugLoc DL = MI.getDebugLoc();
4126     BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
4127         .add(MI.getOperand(0))
4128         .addImm(MFI->getLDSSize());
4129     MI.eraseFromParent();
4130     return BB;
4131   }
4132   case AMDGPU::SI_INDIRECT_SRC_V1:
4133   case AMDGPU::SI_INDIRECT_SRC_V2:
4134   case AMDGPU::SI_INDIRECT_SRC_V4:
4135   case AMDGPU::SI_INDIRECT_SRC_V8:
4136   case AMDGPU::SI_INDIRECT_SRC_V16:
4137   case AMDGPU::SI_INDIRECT_SRC_V32:
4138     return emitIndirectSrc(MI, *BB, *getSubtarget());
4139   case AMDGPU::SI_INDIRECT_DST_V1:
4140   case AMDGPU::SI_INDIRECT_DST_V2:
4141   case AMDGPU::SI_INDIRECT_DST_V4:
4142   case AMDGPU::SI_INDIRECT_DST_V8:
4143   case AMDGPU::SI_INDIRECT_DST_V16:
4144   case AMDGPU::SI_INDIRECT_DST_V32:
4145     return emitIndirectDst(MI, *BB, *getSubtarget());
4146   case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
4147   case AMDGPU::SI_KILL_I1_PSEUDO:
4148     return splitKillBlock(MI, BB);
4149   case AMDGPU::V_CNDMASK_B64_PSEUDO: {
4150     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
4151     const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
4152     const SIRegisterInfo *TRI = ST.getRegisterInfo();
4153 
4154     Register Dst = MI.getOperand(0).getReg();
4155     Register Src0 = MI.getOperand(1).getReg();
4156     Register Src1 = MI.getOperand(2).getReg();
4157     const DebugLoc &DL = MI.getDebugLoc();
4158     Register SrcCond = MI.getOperand(3).getReg();
4159 
4160     Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4161     Register DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4162     const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
4163     Register SrcCondCopy = MRI.createVirtualRegister(CondRC);
4164 
4165     BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
4166       .addReg(SrcCond);
4167     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
4168       .addImm(0)
4169       .addReg(Src0, 0, AMDGPU::sub0)
4170       .addImm(0)
4171       .addReg(Src1, 0, AMDGPU::sub0)
4172       .addReg(SrcCondCopy);
4173     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
4174       .addImm(0)
4175       .addReg(Src0, 0, AMDGPU::sub1)
4176       .addImm(0)
4177       .addReg(Src1, 0, AMDGPU::sub1)
4178       .addReg(SrcCondCopy);
4179 
4180     BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
4181       .addReg(DstLo)
4182       .addImm(AMDGPU::sub0)
4183       .addReg(DstHi)
4184       .addImm(AMDGPU::sub1);
4185     MI.eraseFromParent();
4186     return BB;
4187   }
4188   case AMDGPU::SI_BR_UNDEF: {
4189     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
4190     const DebugLoc &DL = MI.getDebugLoc();
4191     MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
4192                            .add(MI.getOperand(0));
4193     Br->getOperand(1).setIsUndef(true); // read undef SCC
4194     MI.eraseFromParent();
4195     return BB;
4196   }
4197   case AMDGPU::ADJCALLSTACKUP:
4198   case AMDGPU::ADJCALLSTACKDOWN: {
4199     const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4200     MachineInstrBuilder MIB(*MF, &MI);
4201     MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
4202        .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit);
4203     return BB;
4204   }
4205   case AMDGPU::SI_CALL_ISEL: {
4206     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
4207     const DebugLoc &DL = MI.getDebugLoc();
4208 
4209     unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
4210 
4211     MachineInstrBuilder MIB;
4212     MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
4213 
4214     for (const MachineOperand &MO : MI.operands())
4215       MIB.add(MO);
4216 
4217     MIB.cloneMemRefs(MI);
4218     MI.eraseFromParent();
4219     return BB;
4220   }
4221   case AMDGPU::V_ADD_CO_U32_e32:
4222   case AMDGPU::V_SUB_CO_U32_e32:
4223   case AMDGPU::V_SUBREV_CO_U32_e32: {
4224     // TODO: Define distinct V_*_I32_Pseudo instructions instead.
4225     const DebugLoc &DL = MI.getDebugLoc();
4226     unsigned Opc = MI.getOpcode();
4227 
4228     bool NeedClampOperand = false;
4229     if (TII->pseudoToMCOpcode(Opc) == -1) {
4230       Opc = AMDGPU::getVOPe64(Opc);
4231       NeedClampOperand = true;
4232     }
4233 
4234     auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg());
4235     if (TII->isVOP3(*I)) {
4236       const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
4237       const SIRegisterInfo *TRI = ST.getRegisterInfo();
4238       I.addReg(TRI->getVCC(), RegState::Define);
4239     }
4240     I.add(MI.getOperand(1))
4241      .add(MI.getOperand(2));
4242     if (NeedClampOperand)
4243       I.addImm(0); // clamp bit for e64 encoding
4244 
4245     TII->legalizeOperands(*I);
4246 
4247     MI.eraseFromParent();
4248     return BB;
4249   }
4250   case AMDGPU::V_ADDC_U32_e32:
4251   case AMDGPU::V_SUBB_U32_e32:
4252   case AMDGPU::V_SUBBREV_U32_e32:
4253     // These instructions have an implicit use of vcc which counts towards the
4254     // constant bus limit.
4255     TII->legalizeOperands(MI);
4256     return BB;
4257   case AMDGPU::DS_GWS_INIT:
4258   case AMDGPU::DS_GWS_SEMA_BR:
4259   case AMDGPU::DS_GWS_BARRIER:
4260     TII->enforceOperandRCAlignment(MI, AMDGPU::OpName::data0);
4261     LLVM_FALLTHROUGH;
4262   case AMDGPU::DS_GWS_SEMA_V:
4263   case AMDGPU::DS_GWS_SEMA_P:
4264   case AMDGPU::DS_GWS_SEMA_RELEASE_ALL:
4265     // A s_waitcnt 0 is required to be the instruction immediately following.
4266     if (getSubtarget()->hasGWSAutoReplay()) {
4267       bundleInstWithWaitcnt(MI);
4268       return BB;
4269     }
4270 
4271     return emitGWSMemViolTestLoop(MI, BB);
4272   case AMDGPU::S_SETREG_B32: {
4273     // Try to optimize cases that only set the denormal mode or rounding mode.
4274     //
4275     // If the s_setreg_b32 fully sets all of the bits in the rounding mode or
4276     // denormal mode to a constant, we can use s_round_mode or s_denorm_mode
4277     // instead.
4278     //
4279     // FIXME: This could be predicates on the immediate, but tablegen doesn't
4280     // allow you to have a no side effect instruction in the output of a
4281     // sideeffecting pattern.
4282     unsigned ID, Offset, Width;
4283     AMDGPU::Hwreg::decodeHwreg(MI.getOperand(1).getImm(), ID, Offset, Width);
4284     if (ID != AMDGPU::Hwreg::ID_MODE)
4285       return BB;
4286 
4287     const unsigned WidthMask = maskTrailingOnes<unsigned>(Width);
4288     const unsigned SetMask = WidthMask << Offset;
4289 
4290     if (getSubtarget()->hasDenormModeInst()) {
4291       unsigned SetDenormOp = 0;
4292       unsigned SetRoundOp = 0;
4293 
4294       // The dedicated instructions can only set the whole denorm or round mode
4295       // at once, not a subset of bits in either.
4296       if (SetMask ==
4297           (AMDGPU::Hwreg::FP_ROUND_MASK | AMDGPU::Hwreg::FP_DENORM_MASK)) {
4298         // If this fully sets both the round and denorm mode, emit the two
4299         // dedicated instructions for these.
4300         SetRoundOp = AMDGPU::S_ROUND_MODE;
4301         SetDenormOp = AMDGPU::S_DENORM_MODE;
4302       } else if (SetMask == AMDGPU::Hwreg::FP_ROUND_MASK) {
4303         SetRoundOp = AMDGPU::S_ROUND_MODE;
4304       } else if (SetMask == AMDGPU::Hwreg::FP_DENORM_MASK) {
4305         SetDenormOp = AMDGPU::S_DENORM_MODE;
4306       }
4307 
4308       if (SetRoundOp || SetDenormOp) {
4309         MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
4310         MachineInstr *Def = MRI.getVRegDef(MI.getOperand(0).getReg());
4311         if (Def && Def->isMoveImmediate() && Def->getOperand(1).isImm()) {
4312           unsigned ImmVal = Def->getOperand(1).getImm();
4313           if (SetRoundOp) {
4314             BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetRoundOp))
4315                 .addImm(ImmVal & 0xf);
4316 
4317             // If we also have the denorm mode, get just the denorm mode bits.
4318             ImmVal >>= 4;
4319           }
4320 
4321           if (SetDenormOp) {
4322             BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetDenormOp))
4323                 .addImm(ImmVal & 0xf);
4324           }
4325 
4326           MI.eraseFromParent();
4327           return BB;
4328         }
4329       }
4330     }
4331 
4332     // If only FP bits are touched, used the no side effects pseudo.
4333     if ((SetMask & (AMDGPU::Hwreg::FP_ROUND_MASK |
4334                     AMDGPU::Hwreg::FP_DENORM_MASK)) == SetMask)
4335       MI.setDesc(TII->get(AMDGPU::S_SETREG_B32_mode));
4336 
4337     return BB;
4338   }
4339   default:
4340     return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
4341   }
4342 }
4343 
4344 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
4345   return isTypeLegal(VT.getScalarType());
4346 }
4347 
4348 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
4349   // This currently forces unfolding various combinations of fsub into fma with
4350   // free fneg'd operands. As long as we have fast FMA (controlled by
4351   // isFMAFasterThanFMulAndFAdd), we should perform these.
4352 
4353   // When fma is quarter rate, for f64 where add / sub are at best half rate,
4354   // most of these combines appear to be cycle neutral but save on instruction
4355   // count / code size.
4356   return true;
4357 }
4358 
4359 bool SITargetLowering::enableAggressiveFMAFusion(LLT Ty) const { return true; }
4360 
4361 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
4362                                          EVT VT) const {
4363   if (!VT.isVector()) {
4364     return MVT::i1;
4365   }
4366   return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
4367 }
4368 
4369 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
4370   // TODO: Should i16 be used always if legal? For now it would force VALU
4371   // shifts.
4372   return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
4373 }
4374 
4375 LLT SITargetLowering::getPreferredShiftAmountTy(LLT Ty) const {
4376   return (Ty.getScalarSizeInBits() <= 16 && Subtarget->has16BitInsts())
4377              ? Ty.changeElementSize(16)
4378              : Ty.changeElementSize(32);
4379 }
4380 
4381 // Answering this is somewhat tricky and depends on the specific device which
4382 // have different rates for fma or all f64 operations.
4383 //
4384 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
4385 // regardless of which device (although the number of cycles differs between
4386 // devices), so it is always profitable for f64.
4387 //
4388 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
4389 // only on full rate devices. Normally, we should prefer selecting v_mad_f32
4390 // which we can always do even without fused FP ops since it returns the same
4391 // result as the separate operations and since it is always full
4392 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
4393 // however does not support denormals, so we do report fma as faster if we have
4394 // a fast fma device and require denormals.
4395 //
4396 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
4397                                                   EVT VT) const {
4398   VT = VT.getScalarType();
4399 
4400   switch (VT.getSimpleVT().SimpleTy) {
4401   case MVT::f32: {
4402     // If mad is not available this depends only on if f32 fma is full rate.
4403     if (!Subtarget->hasMadMacF32Insts())
4404       return Subtarget->hasFastFMAF32();
4405 
4406     // Otherwise f32 mad is always full rate and returns the same result as
4407     // the separate operations so should be preferred over fma.
4408     // However does not support denormals.
4409     if (hasFP32Denormals(MF))
4410       return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
4411 
4412     // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
4413     return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
4414   }
4415   case MVT::f64:
4416     return true;
4417   case MVT::f16:
4418     return Subtarget->has16BitInsts() && hasFP64FP16Denormals(MF);
4419   default:
4420     break;
4421   }
4422 
4423   return false;
4424 }
4425 
4426 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
4427                                                   LLT Ty) const {
4428   switch (Ty.getScalarSizeInBits()) {
4429   case 16:
4430     return isFMAFasterThanFMulAndFAdd(MF, MVT::f16);
4431   case 32:
4432     return isFMAFasterThanFMulAndFAdd(MF, MVT::f32);
4433   case 64:
4434     return isFMAFasterThanFMulAndFAdd(MF, MVT::f64);
4435   default:
4436     break;
4437   }
4438 
4439   return false;
4440 }
4441 
4442 bool SITargetLowering::isFMADLegal(const MachineInstr &MI, LLT Ty) const {
4443   if (!Ty.isScalar())
4444     return false;
4445 
4446   if (Ty.getScalarSizeInBits() == 16)
4447     return Subtarget->hasMadF16() && !hasFP64FP16Denormals(*MI.getMF());
4448   if (Ty.getScalarSizeInBits() == 32)
4449     return Subtarget->hasMadMacF32Insts() && !hasFP32Denormals(*MI.getMF());
4450 
4451   return false;
4452 }
4453 
4454 bool SITargetLowering::isFMADLegal(const SelectionDAG &DAG,
4455                                    const SDNode *N) const {
4456   // TODO: Check future ftz flag
4457   // v_mad_f32/v_mac_f32 do not support denormals.
4458   EVT VT = N->getValueType(0);
4459   if (VT == MVT::f32)
4460     return Subtarget->hasMadMacF32Insts() &&
4461            !hasFP32Denormals(DAG.getMachineFunction());
4462   if (VT == MVT::f16) {
4463     return Subtarget->hasMadF16() &&
4464            !hasFP64FP16Denormals(DAG.getMachineFunction());
4465   }
4466 
4467   return false;
4468 }
4469 
4470 //===----------------------------------------------------------------------===//
4471 // Custom DAG Lowering Operations
4472 //===----------------------------------------------------------------------===//
4473 
4474 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
4475 // wider vector type is legal.
4476 SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
4477                                              SelectionDAG &DAG) const {
4478   unsigned Opc = Op.getOpcode();
4479   EVT VT = Op.getValueType();
4480   assert(VT == MVT::v4f16 || VT == MVT::v4i16);
4481 
4482   SDValue Lo, Hi;
4483   std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
4484 
4485   SDLoc SL(Op);
4486   SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
4487                              Op->getFlags());
4488   SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
4489                              Op->getFlags());
4490 
4491   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
4492 }
4493 
4494 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
4495 // wider vector type is legal.
4496 SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
4497                                               SelectionDAG &DAG) const {
4498   unsigned Opc = Op.getOpcode();
4499   EVT VT = Op.getValueType();
4500   assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 ||
4501          VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8f32 ||
4502          VT == MVT::v16f32 || VT == MVT::v32f32);
4503 
4504   SDValue Lo0, Hi0;
4505   std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
4506   SDValue Lo1, Hi1;
4507   std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
4508 
4509   SDLoc SL(Op);
4510 
4511   SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
4512                              Op->getFlags());
4513   SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
4514                              Op->getFlags());
4515 
4516   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
4517 }
4518 
4519 SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op,
4520                                               SelectionDAG &DAG) const {
4521   unsigned Opc = Op.getOpcode();
4522   EVT VT = Op.getValueType();
4523   assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v8i16 ||
4524          VT == MVT::v8f16 || VT == MVT::v4f32 || VT == MVT::v8f32 ||
4525          VT == MVT::v16f32 || VT == MVT::v32f32);
4526 
4527   SDValue Lo0, Hi0;
4528   SDValue Op0 = Op.getOperand(0);
4529   std::tie(Lo0, Hi0) = Op0.getValueType().isVector()
4530                          ? DAG.SplitVectorOperand(Op.getNode(), 0)
4531                          : std::make_pair(Op0, Op0);
4532   SDValue Lo1, Hi1;
4533   std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
4534   SDValue Lo2, Hi2;
4535   std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2);
4536 
4537   SDLoc SL(Op);
4538   auto ResVT = DAG.GetSplitDestVTs(VT);
4539 
4540   SDValue OpLo = DAG.getNode(Opc, SL, ResVT.first, Lo0, Lo1, Lo2,
4541                              Op->getFlags());
4542   SDValue OpHi = DAG.getNode(Opc, SL, ResVT.second, Hi0, Hi1, Hi2,
4543                              Op->getFlags());
4544 
4545   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
4546 }
4547 
4548 
4549 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
4550   switch (Op.getOpcode()) {
4551   default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
4552   case ISD::BRCOND: return LowerBRCOND(Op, DAG);
4553   case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
4554   case ISD::LOAD: {
4555     SDValue Result = LowerLOAD(Op, DAG);
4556     assert((!Result.getNode() ||
4557             Result.getNode()->getNumValues() == 2) &&
4558            "Load should return a value and a chain");
4559     return Result;
4560   }
4561 
4562   case ISD::FSIN:
4563   case ISD::FCOS:
4564     return LowerTrig(Op, DAG);
4565   case ISD::SELECT: return LowerSELECT(Op, DAG);
4566   case ISD::FDIV: return LowerFDIV(Op, DAG);
4567   case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
4568   case ISD::STORE: return LowerSTORE(Op, DAG);
4569   case ISD::GlobalAddress: {
4570     MachineFunction &MF = DAG.getMachineFunction();
4571     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
4572     return LowerGlobalAddress(MFI, Op, DAG);
4573   }
4574   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
4575   case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
4576   case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
4577   case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
4578   case ISD::INSERT_SUBVECTOR:
4579     return lowerINSERT_SUBVECTOR(Op, DAG);
4580   case ISD::INSERT_VECTOR_ELT:
4581     return lowerINSERT_VECTOR_ELT(Op, DAG);
4582   case ISD::EXTRACT_VECTOR_ELT:
4583     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
4584   case ISD::VECTOR_SHUFFLE:
4585     return lowerVECTOR_SHUFFLE(Op, DAG);
4586   case ISD::SCALAR_TO_VECTOR:
4587     return lowerSCALAR_TO_VECTOR(Op, DAG);
4588   case ISD::BUILD_VECTOR:
4589     return lowerBUILD_VECTOR(Op, DAG);
4590   case ISD::FP_ROUND:
4591     return lowerFP_ROUND(Op, DAG);
4592   case ISD::FPTRUNC_ROUND: {
4593     unsigned Opc;
4594     SDLoc DL(Op);
4595 
4596     if (Op.getOperand(0)->getValueType(0) != MVT::f32)
4597       return SDValue();
4598 
4599     // Get the rounding mode from the last operand
4600     int RoundMode = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
4601     if (RoundMode == (int)RoundingMode::TowardPositive)
4602       Opc = AMDGPUISD::FPTRUNC_ROUND_UPWARD;
4603     else if (RoundMode == (int)RoundingMode::TowardNegative)
4604       Opc = AMDGPUISD::FPTRUNC_ROUND_DOWNWARD;
4605     else
4606       return SDValue();
4607 
4608     return DAG.getNode(Opc, DL, Op.getNode()->getVTList(), Op->getOperand(0));
4609   }
4610   case ISD::TRAP:
4611     return lowerTRAP(Op, DAG);
4612   case ISD::DEBUGTRAP:
4613     return lowerDEBUGTRAP(Op, DAG);
4614   case ISD::FABS:
4615   case ISD::FNEG:
4616   case ISD::FCANONICALIZE:
4617   case ISD::BSWAP:
4618     return splitUnaryVectorOp(Op, DAG);
4619   case ISD::FMINNUM:
4620   case ISD::FMAXNUM:
4621     return lowerFMINNUM_FMAXNUM(Op, DAG);
4622   case ISD::FMA:
4623     return splitTernaryVectorOp(Op, DAG);
4624   case ISD::FP_TO_SINT:
4625   case ISD::FP_TO_UINT:
4626     return LowerFP_TO_INT(Op, DAG);
4627   case ISD::SHL:
4628   case ISD::SRA:
4629   case ISD::SRL:
4630   case ISD::ADD:
4631   case ISD::SUB:
4632   case ISD::MUL:
4633   case ISD::SMIN:
4634   case ISD::SMAX:
4635   case ISD::UMIN:
4636   case ISD::UMAX:
4637   case ISD::FADD:
4638   case ISD::FMUL:
4639   case ISD::FMINNUM_IEEE:
4640   case ISD::FMAXNUM_IEEE:
4641   case ISD::UADDSAT:
4642   case ISD::USUBSAT:
4643   case ISD::SADDSAT:
4644   case ISD::SSUBSAT:
4645     return splitBinaryVectorOp(Op, DAG);
4646   case ISD::SMULO:
4647   case ISD::UMULO:
4648     return lowerXMULO(Op, DAG);
4649   case ISD::SMUL_LOHI:
4650   case ISD::UMUL_LOHI:
4651     return lowerXMUL_LOHI(Op, DAG);
4652   case ISD::DYNAMIC_STACKALLOC:
4653     return LowerDYNAMIC_STACKALLOC(Op, DAG);
4654   }
4655   return SDValue();
4656 }
4657 
4658 // Used for D16: Casts the result of an instruction into the right vector,
4659 // packs values if loads return unpacked values.
4660 static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
4661                                        const SDLoc &DL,
4662                                        SelectionDAG &DAG, bool Unpacked) {
4663   if (!LoadVT.isVector())
4664     return Result;
4665 
4666   // Cast back to the original packed type or to a larger type that is a
4667   // multiple of 32 bit for D16. Widening the return type is a required for
4668   // legalization.
4669   EVT FittingLoadVT = LoadVT;
4670   if ((LoadVT.getVectorNumElements() % 2) == 1) {
4671     FittingLoadVT =
4672         EVT::getVectorVT(*DAG.getContext(), LoadVT.getVectorElementType(),
4673                          LoadVT.getVectorNumElements() + 1);
4674   }
4675 
4676   if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
4677     // Truncate to v2i16/v4i16.
4678     EVT IntLoadVT = FittingLoadVT.changeTypeToInteger();
4679 
4680     // Workaround legalizer not scalarizing truncate after vector op
4681     // legalization but not creating intermediate vector trunc.
4682     SmallVector<SDValue, 4> Elts;
4683     DAG.ExtractVectorElements(Result, Elts);
4684     for (SDValue &Elt : Elts)
4685       Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
4686 
4687     // Pad illegal v1i16/v3fi6 to v4i16
4688     if ((LoadVT.getVectorNumElements() % 2) == 1)
4689       Elts.push_back(DAG.getUNDEF(MVT::i16));
4690 
4691     Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
4692 
4693     // Bitcast to original type (v2f16/v4f16).
4694     return DAG.getNode(ISD::BITCAST, DL, FittingLoadVT, Result);
4695   }
4696 
4697   // Cast back to the original packed type.
4698   return DAG.getNode(ISD::BITCAST, DL, FittingLoadVT, Result);
4699 }
4700 
4701 SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
4702                                               MemSDNode *M,
4703                                               SelectionDAG &DAG,
4704                                               ArrayRef<SDValue> Ops,
4705                                               bool IsIntrinsic) const {
4706   SDLoc DL(M);
4707 
4708   bool Unpacked = Subtarget->hasUnpackedD16VMem();
4709   EVT LoadVT = M->getValueType(0);
4710 
4711   EVT EquivLoadVT = LoadVT;
4712   if (LoadVT.isVector()) {
4713     if (Unpacked) {
4714       EquivLoadVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
4715                                      LoadVT.getVectorNumElements());
4716     } else if ((LoadVT.getVectorNumElements() % 2) == 1) {
4717       // Widen v3f16 to legal type
4718       EquivLoadVT =
4719           EVT::getVectorVT(*DAG.getContext(), LoadVT.getVectorElementType(),
4720                            LoadVT.getVectorNumElements() + 1);
4721     }
4722   }
4723 
4724   // Change from v4f16/v2f16 to EquivLoadVT.
4725   SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
4726 
4727   SDValue Load
4728     = DAG.getMemIntrinsicNode(
4729       IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
4730       VTList, Ops, M->getMemoryVT(),
4731       M->getMemOperand());
4732 
4733   SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
4734 
4735   return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
4736 }
4737 
4738 SDValue SITargetLowering::lowerIntrinsicLoad(MemSDNode *M, bool IsFormat,
4739                                              SelectionDAG &DAG,
4740                                              ArrayRef<SDValue> Ops) const {
4741   SDLoc DL(M);
4742   EVT LoadVT = M->getValueType(0);
4743   EVT EltType = LoadVT.getScalarType();
4744   EVT IntVT = LoadVT.changeTypeToInteger();
4745 
4746   bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
4747 
4748   unsigned Opc =
4749       IsFormat ? AMDGPUISD::BUFFER_LOAD_FORMAT : AMDGPUISD::BUFFER_LOAD;
4750 
4751   if (IsD16) {
4752     return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops);
4753   }
4754 
4755   // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
4756   if (!IsD16 && !LoadVT.isVector() && EltType.getSizeInBits() < 32)
4757     return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
4758 
4759   if (isTypeLegal(LoadVT)) {
4760     return getMemIntrinsicNode(Opc, DL, M->getVTList(), Ops, IntVT,
4761                                M->getMemOperand(), DAG);
4762   }
4763 
4764   EVT CastVT = getEquivalentMemType(*DAG.getContext(), LoadVT);
4765   SDVTList VTList = DAG.getVTList(CastVT, MVT::Other);
4766   SDValue MemNode = getMemIntrinsicNode(Opc, DL, VTList, Ops, CastVT,
4767                                         M->getMemOperand(), DAG);
4768   return DAG.getMergeValues(
4769       {DAG.getNode(ISD::BITCAST, DL, LoadVT, MemNode), MemNode.getValue(1)},
4770       DL);
4771 }
4772 
4773 static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI,
4774                                   SDNode *N, SelectionDAG &DAG) {
4775   EVT VT = N->getValueType(0);
4776   const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
4777   unsigned CondCode = CD->getZExtValue();
4778   if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(CondCode)))
4779     return DAG.getUNDEF(VT);
4780 
4781   ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
4782 
4783   SDValue LHS = N->getOperand(1);
4784   SDValue RHS = N->getOperand(2);
4785 
4786   SDLoc DL(N);
4787 
4788   EVT CmpVT = LHS.getValueType();
4789   if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
4790     unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
4791       ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4792     LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
4793     RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
4794   }
4795 
4796   ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
4797 
4798   unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
4799   EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
4800 
4801   SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS,
4802                               DAG.getCondCode(CCOpcode));
4803   if (VT.bitsEq(CCVT))
4804     return SetCC;
4805   return DAG.getZExtOrTrunc(SetCC, DL, VT);
4806 }
4807 
4808 static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
4809                                   SDNode *N, SelectionDAG &DAG) {
4810   EVT VT = N->getValueType(0);
4811   const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
4812 
4813   unsigned CondCode = CD->getZExtValue();
4814   if (!FCmpInst::isFPPredicate(static_cast<FCmpInst::Predicate>(CondCode)))
4815     return DAG.getUNDEF(VT);
4816 
4817   SDValue Src0 = N->getOperand(1);
4818   SDValue Src1 = N->getOperand(2);
4819   EVT CmpVT = Src0.getValueType();
4820   SDLoc SL(N);
4821 
4822   if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
4823     Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
4824     Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
4825   }
4826 
4827   FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
4828   ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
4829   unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
4830   EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
4831   SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0,
4832                               Src1, DAG.getCondCode(CCOpcode));
4833   if (VT.bitsEq(CCVT))
4834     return SetCC;
4835   return DAG.getZExtOrTrunc(SetCC, SL, VT);
4836 }
4837 
4838 static SDValue lowerBALLOTIntrinsic(const SITargetLowering &TLI, SDNode *N,
4839                                     SelectionDAG &DAG) {
4840   EVT VT = N->getValueType(0);
4841   SDValue Src = N->getOperand(1);
4842   SDLoc SL(N);
4843 
4844   if (Src.getOpcode() == ISD::SETCC) {
4845     // (ballot (ISD::SETCC ...)) -> (AMDGPUISD::SETCC ...)
4846     return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src.getOperand(0),
4847                        Src.getOperand(1), Src.getOperand(2));
4848   }
4849   if (const ConstantSDNode *Arg = dyn_cast<ConstantSDNode>(Src)) {
4850     // (ballot 0) -> 0
4851     if (Arg->isZero())
4852       return DAG.getConstant(0, SL, VT);
4853 
4854     // (ballot 1) -> EXEC/EXEC_LO
4855     if (Arg->isOne()) {
4856       Register Exec;
4857       if (VT.getScalarSizeInBits() == 32)
4858         Exec = AMDGPU::EXEC_LO;
4859       else if (VT.getScalarSizeInBits() == 64)
4860         Exec = AMDGPU::EXEC;
4861       else
4862         return SDValue();
4863 
4864       return DAG.getCopyFromReg(DAG.getEntryNode(), SL, Exec, VT);
4865     }
4866   }
4867 
4868   // (ballot (i1 $src)) -> (AMDGPUISD::SETCC (i32 (zext $src)) (i32 0)
4869   // ISD::SETNE)
4870   return DAG.getNode(
4871       AMDGPUISD::SETCC, SL, VT, DAG.getZExtOrTrunc(Src, SL, MVT::i32),
4872       DAG.getConstant(0, SL, MVT::i32), DAG.getCondCode(ISD::SETNE));
4873 }
4874 
4875 void SITargetLowering::ReplaceNodeResults(SDNode *N,
4876                                           SmallVectorImpl<SDValue> &Results,
4877                                           SelectionDAG &DAG) const {
4878   switch (N->getOpcode()) {
4879   case ISD::INSERT_VECTOR_ELT: {
4880     if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
4881       Results.push_back(Res);
4882     return;
4883   }
4884   case ISD::EXTRACT_VECTOR_ELT: {
4885     if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
4886       Results.push_back(Res);
4887     return;
4888   }
4889   case ISD::INTRINSIC_WO_CHAIN: {
4890     unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
4891     switch (IID) {
4892     case Intrinsic::amdgcn_cvt_pkrtz: {
4893       SDValue Src0 = N->getOperand(1);
4894       SDValue Src1 = N->getOperand(2);
4895       SDLoc SL(N);
4896       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
4897                                 Src0, Src1);
4898       Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
4899       return;
4900     }
4901     case Intrinsic::amdgcn_cvt_pknorm_i16:
4902     case Intrinsic::amdgcn_cvt_pknorm_u16:
4903     case Intrinsic::amdgcn_cvt_pk_i16:
4904     case Intrinsic::amdgcn_cvt_pk_u16: {
4905       SDValue Src0 = N->getOperand(1);
4906       SDValue Src1 = N->getOperand(2);
4907       SDLoc SL(N);
4908       unsigned Opcode;
4909 
4910       if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
4911         Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
4912       else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
4913         Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
4914       else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
4915         Opcode = AMDGPUISD::CVT_PK_I16_I32;
4916       else
4917         Opcode = AMDGPUISD::CVT_PK_U16_U32;
4918 
4919       EVT VT = N->getValueType(0);
4920       if (isTypeLegal(VT))
4921         Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
4922       else {
4923         SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
4924         Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
4925       }
4926       return;
4927     }
4928     }
4929     break;
4930   }
4931   case ISD::INTRINSIC_W_CHAIN: {
4932     if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
4933       if (Res.getOpcode() == ISD::MERGE_VALUES) {
4934         // FIXME: Hacky
4935         for (unsigned I = 0; I < Res.getNumOperands(); I++) {
4936           Results.push_back(Res.getOperand(I));
4937         }
4938       } else {
4939         Results.push_back(Res);
4940         Results.push_back(Res.getValue(1));
4941       }
4942       return;
4943     }
4944 
4945     break;
4946   }
4947   case ISD::SELECT: {
4948     SDLoc SL(N);
4949     EVT VT = N->getValueType(0);
4950     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
4951     SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
4952     SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
4953 
4954     EVT SelectVT = NewVT;
4955     if (NewVT.bitsLT(MVT::i32)) {
4956       LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
4957       RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
4958       SelectVT = MVT::i32;
4959     }
4960 
4961     SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
4962                                     N->getOperand(0), LHS, RHS);
4963 
4964     if (NewVT != SelectVT)
4965       NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
4966     Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
4967     return;
4968   }
4969   case ISD::FNEG: {
4970     if (N->getValueType(0) != MVT::v2f16)
4971       break;
4972 
4973     SDLoc SL(N);
4974     SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4975 
4976     SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
4977                              BC,
4978                              DAG.getConstant(0x80008000, SL, MVT::i32));
4979     Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4980     return;
4981   }
4982   case ISD::FABS: {
4983     if (N->getValueType(0) != MVT::v2f16)
4984       break;
4985 
4986     SDLoc SL(N);
4987     SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4988 
4989     SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
4990                              BC,
4991                              DAG.getConstant(0x7fff7fff, SL, MVT::i32));
4992     Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4993     return;
4994   }
4995   default:
4996     break;
4997   }
4998 }
4999 
5000 /// Helper function for LowerBRCOND
5001 static SDNode *findUser(SDValue Value, unsigned Opcode) {
5002 
5003   SDNode *Parent = Value.getNode();
5004   for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
5005        I != E; ++I) {
5006 
5007     if (I.getUse().get() != Value)
5008       continue;
5009 
5010     if (I->getOpcode() == Opcode)
5011       return *I;
5012   }
5013   return nullptr;
5014 }
5015 
5016 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
5017   if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
5018     switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
5019     case Intrinsic::amdgcn_if:
5020       return AMDGPUISD::IF;
5021     case Intrinsic::amdgcn_else:
5022       return AMDGPUISD::ELSE;
5023     case Intrinsic::amdgcn_loop:
5024       return AMDGPUISD::LOOP;
5025     case Intrinsic::amdgcn_end_cf:
5026       llvm_unreachable("should not occur");
5027     default:
5028       return 0;
5029     }
5030   }
5031 
5032   // break, if_break, else_break are all only used as inputs to loop, not
5033   // directly as branch conditions.
5034   return 0;
5035 }
5036 
5037 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
5038   const Triple &TT = getTargetMachine().getTargetTriple();
5039   return (GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
5040           GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
5041          AMDGPU::shouldEmitConstantsToTextSection(TT);
5042 }
5043 
5044 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
5045   // FIXME: Either avoid relying on address space here or change the default
5046   // address space for functions to avoid the explicit check.
5047   return (GV->getValueType()->isFunctionTy() ||
5048           !isNonGlobalAddrSpace(GV->getAddressSpace())) &&
5049          !shouldEmitFixup(GV) &&
5050          !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
5051 }
5052 
5053 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
5054   return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
5055 }
5056 
5057 bool SITargetLowering::shouldUseLDSConstAddress(const GlobalValue *GV) const {
5058   if (!GV->hasExternalLinkage())
5059     return true;
5060 
5061   const auto OS = getTargetMachine().getTargetTriple().getOS();
5062   return OS == Triple::AMDHSA || OS == Triple::AMDPAL;
5063 }
5064 
5065 /// This transforms the control flow intrinsics to get the branch destination as
5066 /// last parameter, also switches branch target with BR if the need arise
5067 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
5068                                       SelectionDAG &DAG) const {
5069   SDLoc DL(BRCOND);
5070 
5071   SDNode *Intr = BRCOND.getOperand(1).getNode();
5072   SDValue Target = BRCOND.getOperand(2);
5073   SDNode *BR = nullptr;
5074   SDNode *SetCC = nullptr;
5075 
5076   if (Intr->getOpcode() == ISD::SETCC) {
5077     // As long as we negate the condition everything is fine
5078     SetCC = Intr;
5079     Intr = SetCC->getOperand(0).getNode();
5080 
5081   } else {
5082     // Get the target from BR if we don't negate the condition
5083     BR = findUser(BRCOND, ISD::BR);
5084     assert(BR && "brcond missing unconditional branch user");
5085     Target = BR->getOperand(1);
5086   }
5087 
5088   unsigned CFNode = isCFIntrinsic(Intr);
5089   if (CFNode == 0) {
5090     // This is a uniform branch so we don't need to legalize.
5091     return BRCOND;
5092   }
5093 
5094   bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
5095                    Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
5096 
5097   assert(!SetCC ||
5098         (SetCC->getConstantOperandVal(1) == 1 &&
5099          cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
5100                                                              ISD::SETNE));
5101 
5102   // operands of the new intrinsic call
5103   SmallVector<SDValue, 4> Ops;
5104   if (HaveChain)
5105     Ops.push_back(BRCOND.getOperand(0));
5106 
5107   Ops.append(Intr->op_begin() + (HaveChain ?  2 : 1), Intr->op_end());
5108   Ops.push_back(Target);
5109 
5110   ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
5111 
5112   // build the new intrinsic call
5113   SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
5114 
5115   if (!HaveChain) {
5116     SDValue Ops[] =  {
5117       SDValue(Result, 0),
5118       BRCOND.getOperand(0)
5119     };
5120 
5121     Result = DAG.getMergeValues(Ops, DL).getNode();
5122   }
5123 
5124   if (BR) {
5125     // Give the branch instruction our target
5126     SDValue Ops[] = {
5127       BR->getOperand(0),
5128       BRCOND.getOperand(2)
5129     };
5130     SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
5131     DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
5132   }
5133 
5134   SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
5135 
5136   // Copy the intrinsic results to registers
5137   for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
5138     SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
5139     if (!CopyToReg)
5140       continue;
5141 
5142     Chain = DAG.getCopyToReg(
5143       Chain, DL,
5144       CopyToReg->getOperand(1),
5145       SDValue(Result, i - 1),
5146       SDValue());
5147 
5148     DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
5149   }
5150 
5151   // Remove the old intrinsic from the chain
5152   DAG.ReplaceAllUsesOfValueWith(
5153     SDValue(Intr, Intr->getNumValues() - 1),
5154     Intr->getOperand(0));
5155 
5156   return Chain;
5157 }
5158 
5159 SDValue SITargetLowering::LowerRETURNADDR(SDValue Op,
5160                                           SelectionDAG &DAG) const {
5161   MVT VT = Op.getSimpleValueType();
5162   SDLoc DL(Op);
5163   // Checking the depth
5164   if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0)
5165     return DAG.getConstant(0, DL, VT);
5166 
5167   MachineFunction &MF = DAG.getMachineFunction();
5168   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
5169   // Check for kernel and shader functions
5170   if (Info->isEntryFunction())
5171     return DAG.getConstant(0, DL, VT);
5172 
5173   MachineFrameInfo &MFI = MF.getFrameInfo();
5174   // There is a call to @llvm.returnaddress in this function
5175   MFI.setReturnAddressIsTaken(true);
5176 
5177   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
5178   // Get the return address reg and mark it as an implicit live-in
5179   Register Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent()));
5180 
5181   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
5182 }
5183 
5184 SDValue SITargetLowering::getFPExtOrFPRound(SelectionDAG &DAG,
5185                                             SDValue Op,
5186                                             const SDLoc &DL,
5187                                             EVT VT) const {
5188   return Op.getValueType().bitsLE(VT) ?
5189       DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
5190     DAG.getNode(ISD::FP_ROUND, DL, VT, Op,
5191                 DAG.getTargetConstant(0, DL, MVT::i32));
5192 }
5193 
5194 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
5195   assert(Op.getValueType() == MVT::f16 &&
5196          "Do not know how to custom lower FP_ROUND for non-f16 type");
5197 
5198   SDValue Src = Op.getOperand(0);
5199   EVT SrcVT = Src.getValueType();
5200   if (SrcVT != MVT::f64)
5201     return Op;
5202 
5203   SDLoc DL(Op);
5204 
5205   SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
5206   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
5207   return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
5208 }
5209 
5210 SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
5211                                                SelectionDAG &DAG) const {
5212   EVT VT = Op.getValueType();
5213   const MachineFunction &MF = DAG.getMachineFunction();
5214   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
5215   bool IsIEEEMode = Info->getMode().IEEE;
5216 
5217   // FIXME: Assert during selection that this is only selected for
5218   // ieee_mode. Currently a combine can produce the ieee version for non-ieee
5219   // mode functions, but this happens to be OK since it's only done in cases
5220   // where there is known no sNaN.
5221   if (IsIEEEMode)
5222     return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
5223 
5224   if (VT == MVT::v4f16 || VT == MVT::v8f16)
5225     return splitBinaryVectorOp(Op, DAG);
5226   return Op;
5227 }
5228 
5229 SDValue SITargetLowering::lowerXMULO(SDValue Op, SelectionDAG &DAG) const {
5230   EVT VT = Op.getValueType();
5231   SDLoc SL(Op);
5232   SDValue LHS = Op.getOperand(0);
5233   SDValue RHS = Op.getOperand(1);
5234   bool isSigned = Op.getOpcode() == ISD::SMULO;
5235 
5236   if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) {
5237     const APInt &C = RHSC->getAPIntValue();
5238     // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X }
5239     if (C.isPowerOf2()) {
5240       // smulo(x, signed_min) is same as umulo(x, signed_min).
5241       bool UseArithShift = isSigned && !C.isMinSignedValue();
5242       SDValue ShiftAmt = DAG.getConstant(C.logBase2(), SL, MVT::i32);
5243       SDValue Result = DAG.getNode(ISD::SHL, SL, VT, LHS, ShiftAmt);
5244       SDValue Overflow = DAG.getSetCC(SL, MVT::i1,
5245           DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL,
5246                       SL, VT, Result, ShiftAmt),
5247           LHS, ISD::SETNE);
5248       return DAG.getMergeValues({ Result, Overflow }, SL);
5249     }
5250   }
5251 
5252   SDValue Result = DAG.getNode(ISD::MUL, SL, VT, LHS, RHS);
5253   SDValue Top = DAG.getNode(isSigned ? ISD::MULHS : ISD::MULHU,
5254                             SL, VT, LHS, RHS);
5255 
5256   SDValue Sign = isSigned
5257     ? DAG.getNode(ISD::SRA, SL, VT, Result,
5258                   DAG.getConstant(VT.getScalarSizeInBits() - 1, SL, MVT::i32))
5259     : DAG.getConstant(0, SL, VT);
5260   SDValue Overflow = DAG.getSetCC(SL, MVT::i1, Top, Sign, ISD::SETNE);
5261 
5262   return DAG.getMergeValues({ Result, Overflow }, SL);
5263 }
5264 
5265 SDValue SITargetLowering::lowerXMUL_LOHI(SDValue Op, SelectionDAG &DAG) const {
5266   if (Op->isDivergent()) {
5267     // Select to V_MAD_[IU]64_[IU]32.
5268     return Op;
5269   }
5270   if (Subtarget->hasSMulHi()) {
5271     // Expand to S_MUL_I32 + S_MUL_HI_[IU]32.
5272     return SDValue();
5273   }
5274   // The multiply is uniform but we would have to use V_MUL_HI_[IU]32 to
5275   // calculate the high part, so we might as well do the whole thing with
5276   // V_MAD_[IU]64_[IU]32.
5277   return Op;
5278 }
5279 
5280 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
5281   if (!Subtarget->isTrapHandlerEnabled() ||
5282       Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA)
5283     return lowerTrapEndpgm(Op, DAG);
5284 
5285   if (Optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(Subtarget)) {
5286     switch (*HsaAbiVer) {
5287     case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
5288     case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
5289       return lowerTrapHsaQueuePtr(Op, DAG);
5290     case ELF::ELFABIVERSION_AMDGPU_HSA_V4:
5291     case ELF::ELFABIVERSION_AMDGPU_HSA_V5:
5292       return Subtarget->supportsGetDoorbellID() ?
5293           lowerTrapHsa(Op, DAG) : lowerTrapHsaQueuePtr(Op, DAG);
5294     }
5295   }
5296 
5297   llvm_unreachable("Unknown trap handler");
5298 }
5299 
5300 SDValue SITargetLowering::lowerTrapEndpgm(
5301     SDValue Op, SelectionDAG &DAG) const {
5302   SDLoc SL(Op);
5303   SDValue Chain = Op.getOperand(0);
5304   return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
5305 }
5306 
5307 SDValue SITargetLowering::loadImplicitKernelArgument(SelectionDAG &DAG, MVT VT,
5308     const SDLoc &DL, Align Alignment, ImplicitParameter Param) const {
5309   MachineFunction &MF = DAG.getMachineFunction();
5310   uint64_t Offset = getImplicitParameterOffset(MF, Param);
5311   SDValue Ptr = lowerKernArgParameterPtr(DAG, DL, DAG.getEntryNode(), Offset);
5312   MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
5313   return DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, PtrInfo, Alignment,
5314                      MachineMemOperand::MODereferenceable |
5315                          MachineMemOperand::MOInvariant);
5316 }
5317 
5318 SDValue SITargetLowering::lowerTrapHsaQueuePtr(
5319     SDValue Op, SelectionDAG &DAG) const {
5320   SDLoc SL(Op);
5321   SDValue Chain = Op.getOperand(0);
5322 
5323   SDValue QueuePtr;
5324   // For code object version 5, QueuePtr is passed through implicit kernarg.
5325   if (AMDGPU::getAmdhsaCodeObjectVersion() == 5) {
5326     QueuePtr =
5327         loadImplicitKernelArgument(DAG, MVT::i64, SL, Align(8), QUEUE_PTR);
5328   } else {
5329     MachineFunction &MF = DAG.getMachineFunction();
5330     SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
5331     Register UserSGPR = Info->getQueuePtrUserSGPR();
5332 
5333     if (UserSGPR == AMDGPU::NoRegister) {
5334       // We probably are in a function incorrectly marked with
5335       // amdgpu-no-queue-ptr. This is undefined. We don't want to delete the
5336       // trap, so just use a null pointer.
5337       QueuePtr = DAG.getConstant(0, SL, MVT::i64);
5338     } else {
5339       QueuePtr = CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, UserSGPR,
5340                                       MVT::i64);
5341     }
5342   }
5343 
5344   SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
5345   SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
5346                                    QueuePtr, SDValue());
5347 
5348   uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSATrap);
5349   SDValue Ops[] = {
5350     ToReg,
5351     DAG.getTargetConstant(TrapID, SL, MVT::i16),
5352     SGPR01,
5353     ToReg.getValue(1)
5354   };
5355   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
5356 }
5357 
5358 SDValue SITargetLowering::lowerTrapHsa(
5359     SDValue Op, SelectionDAG &DAG) const {
5360   SDLoc SL(Op);
5361   SDValue Chain = Op.getOperand(0);
5362 
5363   uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSATrap);
5364   SDValue Ops[] = {
5365     Chain,
5366     DAG.getTargetConstant(TrapID, SL, MVT::i16)
5367   };
5368   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
5369 }
5370 
5371 SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
5372   SDLoc SL(Op);
5373   SDValue Chain = Op.getOperand(0);
5374   MachineFunction &MF = DAG.getMachineFunction();
5375 
5376   if (!Subtarget->isTrapHandlerEnabled() ||
5377       Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA) {
5378     DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
5379                                      "debugtrap handler not supported",
5380                                      Op.getDebugLoc(),
5381                                      DS_Warning);
5382     LLVMContext &Ctx = MF.getFunction().getContext();
5383     Ctx.diagnose(NoTrap);
5384     return Chain;
5385   }
5386 
5387   uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSADebugTrap);
5388   SDValue Ops[] = {
5389     Chain,
5390     DAG.getTargetConstant(TrapID, SL, MVT::i16)
5391   };
5392   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
5393 }
5394 
5395 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
5396                                              SelectionDAG &DAG) const {
5397   // FIXME: Use inline constants (src_{shared, private}_base) instead.
5398   if (Subtarget->hasApertureRegs()) {
5399     unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
5400         AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
5401         AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
5402     unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
5403         AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
5404         AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
5405     unsigned Encoding =
5406         AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
5407         Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
5408         WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
5409 
5410     SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
5411     SDValue ApertureReg = SDValue(
5412         DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
5413     SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
5414     return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
5415   }
5416 
5417   // For code object version 5, private_base and shared_base are passed through
5418   // implicit kernargs.
5419   if (AMDGPU::getAmdhsaCodeObjectVersion() == 5) {
5420     ImplicitParameter Param =
5421         (AS == AMDGPUAS::LOCAL_ADDRESS) ? SHARED_BASE : PRIVATE_BASE;
5422     return loadImplicitKernelArgument(DAG, MVT::i32, DL, Align(4), Param);
5423   }
5424 
5425   MachineFunction &MF = DAG.getMachineFunction();
5426   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
5427   Register UserSGPR = Info->getQueuePtrUserSGPR();
5428   if (UserSGPR == AMDGPU::NoRegister) {
5429     // We probably are in a function incorrectly marked with
5430     // amdgpu-no-queue-ptr. This is undefined.
5431     return DAG.getUNDEF(MVT::i32);
5432   }
5433 
5434   SDValue QueuePtr = CreateLiveInRegister(
5435     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
5436 
5437   // Offset into amd_queue_t for group_segment_aperture_base_hi /
5438   // private_segment_aperture_base_hi.
5439   uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
5440 
5441   SDValue Ptr =
5442       DAG.getObjectPtrOffset(DL, QueuePtr, TypeSize::Fixed(StructOffset));
5443 
5444   // TODO: Use custom target PseudoSourceValue.
5445   // TODO: We should use the value from the IR intrinsic call, but it might not
5446   // be available and how do we get it?
5447   MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
5448   return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
5449                      commonAlignment(Align(64), StructOffset),
5450                      MachineMemOperand::MODereferenceable |
5451                          MachineMemOperand::MOInvariant);
5452 }
5453 
5454 /// Return true if the value is a known valid address, such that a null check is
5455 /// not necessary.
5456 static bool isKnownNonNull(SDValue Val, SelectionDAG &DAG,
5457                            const AMDGPUTargetMachine &TM, unsigned AddrSpace) {
5458   if (isa<FrameIndexSDNode>(Val) || isa<GlobalAddressSDNode>(Val) ||
5459       isa<BasicBlockSDNode>(Val))
5460     return true;
5461 
5462   if (auto *ConstVal = dyn_cast<ConstantSDNode>(Val))
5463     return ConstVal->getSExtValue() != TM.getNullPointerValue(AddrSpace);
5464 
5465   // TODO: Search through arithmetic, handle arguments and loads
5466   // marked nonnull.
5467   return false;
5468 }
5469 
5470 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
5471                                              SelectionDAG &DAG) const {
5472   SDLoc SL(Op);
5473   const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
5474 
5475   SDValue Src = ASC->getOperand(0);
5476   SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
5477   unsigned SrcAS = ASC->getSrcAddressSpace();
5478 
5479   const AMDGPUTargetMachine &TM =
5480     static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
5481 
5482   // flat -> local/private
5483   if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
5484     unsigned DestAS = ASC->getDestAddressSpace();
5485 
5486     if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
5487         DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
5488       SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
5489 
5490       if (isKnownNonNull(Src, DAG, TM, SrcAS))
5491         return Ptr;
5492 
5493       unsigned NullVal = TM.getNullPointerValue(DestAS);
5494       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
5495       SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
5496 
5497       return DAG.getNode(ISD::SELECT, SL, MVT::i32, NonNull, Ptr,
5498                          SegmentNullPtr);
5499     }
5500   }
5501 
5502   // local/private -> flat
5503   if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
5504     if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
5505         SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
5506 
5507       SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
5508       SDValue CvtPtr =
5509           DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
5510       CvtPtr = DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr);
5511 
5512       if (isKnownNonNull(Src, DAG, TM, SrcAS))
5513         return CvtPtr;
5514 
5515       unsigned NullVal = TM.getNullPointerValue(SrcAS);
5516       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
5517 
5518       SDValue NonNull
5519         = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
5520 
5521       return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, CvtPtr,
5522                          FlatNullPtr);
5523     }
5524   }
5525 
5526   if (SrcAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
5527       Op.getValueType() == MVT::i64) {
5528     const SIMachineFunctionInfo *Info =
5529         DAG.getMachineFunction().getInfo<SIMachineFunctionInfo>();
5530     SDValue Hi = DAG.getConstant(Info->get32BitAddressHighBits(), SL, MVT::i32);
5531     SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Hi);
5532     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
5533   }
5534 
5535   if (ASC->getDestAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
5536       Src.getValueType() == MVT::i64)
5537     return DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
5538 
5539   // global <-> flat are no-ops and never emitted.
5540 
5541   const MachineFunction &MF = DAG.getMachineFunction();
5542   DiagnosticInfoUnsupported InvalidAddrSpaceCast(
5543     MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
5544   DAG.getContext()->diagnose(InvalidAddrSpaceCast);
5545 
5546   return DAG.getUNDEF(ASC->getValueType(0));
5547 }
5548 
5549 // This lowers an INSERT_SUBVECTOR by extracting the individual elements from
5550 // the small vector and inserting them into the big vector. That is better than
5551 // the default expansion of doing it via a stack slot. Even though the use of
5552 // the stack slot would be optimized away afterwards, the stack slot itself
5553 // remains.
5554 SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5555                                                 SelectionDAG &DAG) const {
5556   SDValue Vec = Op.getOperand(0);
5557   SDValue Ins = Op.getOperand(1);
5558   SDValue Idx = Op.getOperand(2);
5559   EVT VecVT = Vec.getValueType();
5560   EVT InsVT = Ins.getValueType();
5561   EVT EltVT = VecVT.getVectorElementType();
5562   unsigned InsNumElts = InsVT.getVectorNumElements();
5563   unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
5564   SDLoc SL(Op);
5565 
5566   for (unsigned I = 0; I != InsNumElts; ++I) {
5567     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins,
5568                               DAG.getConstant(I, SL, MVT::i32));
5569     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt,
5570                       DAG.getConstant(IdxVal + I, SL, MVT::i32));
5571   }
5572   return Vec;
5573 }
5574 
5575 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
5576                                                  SelectionDAG &DAG) const {
5577   SDValue Vec = Op.getOperand(0);
5578   SDValue InsVal = Op.getOperand(1);
5579   SDValue Idx = Op.getOperand(2);
5580   EVT VecVT = Vec.getValueType();
5581   EVT EltVT = VecVT.getVectorElementType();
5582   unsigned VecSize = VecVT.getSizeInBits();
5583   unsigned EltSize = EltVT.getSizeInBits();
5584   SDLoc SL(Op);
5585 
5586   // Specially handle the case of v4i16 with static indexing.
5587   unsigned NumElts = VecVT.getVectorNumElements();
5588   auto KIdx = dyn_cast<ConstantSDNode>(Idx);
5589   if (NumElts == 4 && EltSize == 16 && KIdx) {
5590     SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
5591 
5592     SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
5593                                  DAG.getConstant(0, SL, MVT::i32));
5594     SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
5595                                  DAG.getConstant(1, SL, MVT::i32));
5596 
5597     SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
5598     SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
5599 
5600     unsigned Idx = KIdx->getZExtValue();
5601     bool InsertLo = Idx < 2;
5602     SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
5603       InsertLo ? LoVec : HiVec,
5604       DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
5605       DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
5606 
5607     InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
5608 
5609     SDValue Concat = InsertLo ?
5610       DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
5611       DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
5612 
5613     return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
5614   }
5615 
5616   // Static indexing does not lower to stack access, and hence there is no need
5617   // for special custom lowering to avoid stack access.
5618   if (isa<ConstantSDNode>(Idx))
5619     return SDValue();
5620 
5621   // Avoid stack access for dynamic indexing by custom lowering to
5622   // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
5623 
5624   assert(VecSize <= 64 && "Expected target vector size to be <= 64 bits");
5625 
5626   MVT IntVT = MVT::getIntegerVT(VecSize);
5627 
5628   // Convert vector index to bit-index and get the required bit mask.
5629   assert(isPowerOf2_32(EltSize));
5630   SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
5631   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
5632   SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
5633                             DAG.getConstant(0xffff, SL, IntVT),
5634                             ScaledIdx);
5635 
5636   // 1. Create a congruent vector with the target value in each element.
5637   SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT,
5638                                DAG.getSplatBuildVector(VecVT, SL, InsVal));
5639 
5640   // 2. Mask off all other indicies except the required index within (1).
5641   SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
5642 
5643   // 3. Mask off the required index within the target vector.
5644   SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
5645   SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
5646                             DAG.getNOT(SL, BFM, IntVT), BCVec);
5647 
5648   // 4. Get (2) and (3) ORed into the target vector.
5649   SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
5650 
5651   return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
5652 }
5653 
5654 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
5655                                                   SelectionDAG &DAG) const {
5656   SDLoc SL(Op);
5657 
5658   EVT ResultVT = Op.getValueType();
5659   SDValue Vec = Op.getOperand(0);
5660   SDValue Idx = Op.getOperand(1);
5661   EVT VecVT = Vec.getValueType();
5662   unsigned VecSize = VecVT.getSizeInBits();
5663   EVT EltVT = VecVT.getVectorElementType();
5664 
5665   DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
5666 
5667   // Make sure we do any optimizations that will make it easier to fold
5668   // source modifiers before obscuring it with bit operations.
5669 
5670   // XXX - Why doesn't this get called when vector_shuffle is expanded?
5671   if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
5672     return Combined;
5673 
5674   if (VecSize == 128) {
5675     SDValue Lo, Hi;
5676     EVT LoVT, HiVT;
5677     SDValue V2 = DAG.getBitcast(MVT::v2i64, Vec);
5678     std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5679     Lo =
5680         DAG.getBitcast(LoVT, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i64,
5681                                          V2, DAG.getConstant(0, SL, MVT::i32)));
5682     Hi =
5683         DAG.getBitcast(HiVT, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i64,
5684                                          V2, DAG.getConstant(1, SL, MVT::i32)));
5685     EVT IdxVT = Idx.getValueType();
5686     unsigned NElem = VecVT.getVectorNumElements();
5687     assert(isPowerOf2_32(NElem));
5688     SDValue IdxMask = DAG.getConstant(NElem / 2 - 1, SL, IdxVT);
5689     SDValue NewIdx = DAG.getNode(ISD::AND, SL, IdxVT, Idx, IdxMask);
5690     SDValue Half = DAG.getSelectCC(SL, Idx, IdxMask, Hi, Lo, ISD::SETUGT);
5691     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Half, NewIdx);
5692   }
5693 
5694   assert(VecSize <= 64);
5695 
5696   MVT IntVT = MVT::getIntegerVT(VecSize);
5697 
5698   // If Vec is just a SCALAR_TO_VECTOR, then use the scalar integer directly.
5699   SDValue VecBC = peekThroughBitcasts(Vec);
5700   if (VecBC.getOpcode() == ISD::SCALAR_TO_VECTOR) {
5701     SDValue Src = VecBC.getOperand(0);
5702     Src = DAG.getBitcast(Src.getValueType().changeTypeToInteger(), Src);
5703     Vec = DAG.getAnyExtOrTrunc(Src, SL, IntVT);
5704   }
5705 
5706   unsigned EltSize = EltVT.getSizeInBits();
5707   assert(isPowerOf2_32(EltSize));
5708 
5709   SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
5710 
5711   // Convert vector index to bit-index (* EltSize)
5712   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
5713 
5714   SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
5715   SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
5716 
5717   if (ResultVT == MVT::f16) {
5718     SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
5719     return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
5720   }
5721 
5722   return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
5723 }
5724 
5725 static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) {
5726   assert(Elt % 2 == 0);
5727   return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0);
5728 }
5729 
5730 SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
5731                                               SelectionDAG &DAG) const {
5732   SDLoc SL(Op);
5733   EVT ResultVT = Op.getValueType();
5734   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
5735 
5736   EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16;
5737   EVT EltVT = PackVT.getVectorElementType();
5738   int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements();
5739 
5740   // vector_shuffle <0,1,6,7> lhs, rhs
5741   // -> concat_vectors (extract_subvector lhs, 0), (extract_subvector rhs, 2)
5742   //
5743   // vector_shuffle <6,7,2,3> lhs, rhs
5744   // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 2)
5745   //
5746   // vector_shuffle <6,7,0,1> lhs, rhs
5747   // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 0)
5748 
5749   // Avoid scalarizing when both halves are reading from consecutive elements.
5750   SmallVector<SDValue, 4> Pieces;
5751   for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) {
5752     if (elementPairIsContiguous(SVN->getMask(), I)) {
5753       const int Idx = SVN->getMaskElt(I);
5754       int VecIdx = Idx < SrcNumElts ? 0 : 1;
5755       int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts;
5756       SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL,
5757                                     PackVT, SVN->getOperand(VecIdx),
5758                                     DAG.getConstant(EltIdx, SL, MVT::i32));
5759       Pieces.push_back(SubVec);
5760     } else {
5761       const int Idx0 = SVN->getMaskElt(I);
5762       const int Idx1 = SVN->getMaskElt(I + 1);
5763       int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1;
5764       int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1;
5765       int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts;
5766       int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts;
5767 
5768       SDValue Vec0 = SVN->getOperand(VecIdx0);
5769       SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
5770                                  Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32));
5771 
5772       SDValue Vec1 = SVN->getOperand(VecIdx1);
5773       SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
5774                                  Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32));
5775       Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 }));
5776     }
5777   }
5778 
5779   return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces);
5780 }
5781 
5782 SDValue SITargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
5783                                                 SelectionDAG &DAG) const {
5784   SDValue SVal = Op.getOperand(0);
5785   EVT ResultVT = Op.getValueType();
5786   EVT SValVT = SVal.getValueType();
5787   SDValue UndefVal = DAG.getUNDEF(SValVT);
5788   SDLoc SL(Op);
5789 
5790   SmallVector<SDValue, 8> VElts;
5791   VElts.push_back(SVal);
5792   for (int I = 1, E = ResultVT.getVectorNumElements(); I < E; ++I)
5793     VElts.push_back(UndefVal);
5794 
5795   return DAG.getBuildVector(ResultVT, SL, VElts);
5796 }
5797 
5798 SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
5799                                             SelectionDAG &DAG) const {
5800   SDLoc SL(Op);
5801   EVT VT = Op.getValueType();
5802 
5803   if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
5804       VT == MVT::v8i16 || VT == MVT::v8f16) {
5805     EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(),
5806                                   VT.getVectorNumElements() / 2);
5807     MVT HalfIntVT = MVT::getIntegerVT(HalfVT.getSizeInBits());
5808 
5809     // Turn into pair of packed build_vectors.
5810     // TODO: Special case for constants that can be materialized with s_mov_b64.
5811     SmallVector<SDValue, 4> LoOps, HiOps;
5812     for (unsigned I = 0, E = VT.getVectorNumElements() / 2; I != E; ++I) {
5813       LoOps.push_back(Op.getOperand(I));
5814       HiOps.push_back(Op.getOperand(I + E));
5815     }
5816     SDValue Lo = DAG.getBuildVector(HalfVT, SL, LoOps);
5817     SDValue Hi = DAG.getBuildVector(HalfVT, SL, HiOps);
5818 
5819     SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, HalfIntVT, Lo);
5820     SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, HalfIntVT, Hi);
5821 
5822     SDValue Blend = DAG.getBuildVector(MVT::getVectorVT(HalfIntVT, 2), SL,
5823                                        { CastLo, CastHi });
5824     return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
5825   }
5826 
5827   assert(VT == MVT::v2f16 || VT == MVT::v2i16);
5828   assert(!Subtarget->hasVOP3PInsts() && "this should be legal");
5829 
5830   SDValue Lo = Op.getOperand(0);
5831   SDValue Hi = Op.getOperand(1);
5832 
5833   // Avoid adding defined bits with the zero_extend.
5834   if (Hi.isUndef()) {
5835     Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
5836     SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo);
5837     return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo);
5838   }
5839 
5840   Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
5841   Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
5842 
5843   SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
5844                               DAG.getConstant(16, SL, MVT::i32));
5845   if (Lo.isUndef())
5846     return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi);
5847 
5848   Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
5849   Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
5850 
5851   SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
5852   return DAG.getNode(ISD::BITCAST, SL, VT, Or);
5853 }
5854 
5855 bool
5856 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
5857   // We can fold offsets for anything that doesn't require a GOT relocation.
5858   return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
5859           GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
5860           GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
5861          !shouldEmitGOTReloc(GA->getGlobal());
5862 }
5863 
5864 static SDValue
5865 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
5866                         const SDLoc &DL, int64_t Offset, EVT PtrVT,
5867                         unsigned GAFlags = SIInstrInfo::MO_NONE) {
5868   assert(isInt<32>(Offset + 4) && "32-bit offset is expected!");
5869   // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
5870   // lowered to the following code sequence:
5871   //
5872   // For constant address space:
5873   //   s_getpc_b64 s[0:1]
5874   //   s_add_u32 s0, s0, $symbol
5875   //   s_addc_u32 s1, s1, 0
5876   //
5877   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
5878   //   a fixup or relocation is emitted to replace $symbol with a literal
5879   //   constant, which is a pc-relative offset from the encoding of the $symbol
5880   //   operand to the global variable.
5881   //
5882   // For global address space:
5883   //   s_getpc_b64 s[0:1]
5884   //   s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
5885   //   s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
5886   //
5887   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
5888   //   fixups or relocations are emitted to replace $symbol@*@lo and
5889   //   $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
5890   //   which is a 64-bit pc-relative offset from the encoding of the $symbol
5891   //   operand to the global variable.
5892   //
5893   // What we want here is an offset from the value returned by s_getpc
5894   // (which is the address of the s_add_u32 instruction) to the global
5895   // variable, but since the encoding of $symbol starts 4 bytes after the start
5896   // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
5897   // small. This requires us to add 4 to the global variable offset in order to
5898   // compute the correct address. Similarly for the s_addc_u32 instruction, the
5899   // encoding of $symbol starts 12 bytes after the start of the s_add_u32
5900   // instruction.
5901   SDValue PtrLo =
5902       DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags);
5903   SDValue PtrHi;
5904   if (GAFlags == SIInstrInfo::MO_NONE) {
5905     PtrHi = DAG.getTargetConstant(0, DL, MVT::i32);
5906   } else {
5907     PtrHi =
5908         DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 12, GAFlags + 1);
5909   }
5910   return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
5911 }
5912 
5913 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
5914                                              SDValue Op,
5915                                              SelectionDAG &DAG) const {
5916   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
5917   SDLoc DL(GSD);
5918   EVT PtrVT = Op.getValueType();
5919 
5920   const GlobalValue *GV = GSD->getGlobal();
5921   if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
5922        shouldUseLDSConstAddress(GV)) ||
5923       GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS ||
5924       GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) {
5925     if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
5926         GV->hasExternalLinkage()) {
5927       Type *Ty = GV->getValueType();
5928       // HIP uses an unsized array `extern __shared__ T s[]` or similar
5929       // zero-sized type in other languages to declare the dynamic shared
5930       // memory which size is not known at the compile time. They will be
5931       // allocated by the runtime and placed directly after the static
5932       // allocated ones. They all share the same offset.
5933       if (DAG.getDataLayout().getTypeAllocSize(Ty).isZero()) {
5934         assert(PtrVT == MVT::i32 && "32-bit pointer is expected.");
5935         // Adjust alignment for that dynamic shared memory array.
5936         MFI->setDynLDSAlign(DAG.getDataLayout(), *cast<GlobalVariable>(GV));
5937         return SDValue(
5938             DAG.getMachineNode(AMDGPU::GET_GROUPSTATICSIZE, DL, PtrVT), 0);
5939       }
5940     }
5941     return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
5942   }
5943 
5944   if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
5945     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(),
5946                                             SIInstrInfo::MO_ABS32_LO);
5947     return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA);
5948   }
5949 
5950   if (shouldEmitFixup(GV))
5951     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
5952   else if (shouldEmitPCReloc(GV))
5953     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
5954                                    SIInstrInfo::MO_REL32);
5955 
5956   SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
5957                                             SIInstrInfo::MO_GOTPCREL32);
5958 
5959   Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
5960   PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
5961   const DataLayout &DataLayout = DAG.getDataLayout();
5962   Align Alignment = DataLayout.getABITypeAlign(PtrTy);
5963   MachinePointerInfo PtrInfo
5964     = MachinePointerInfo::getGOT(DAG.getMachineFunction());
5965 
5966   return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Alignment,
5967                      MachineMemOperand::MODereferenceable |
5968                          MachineMemOperand::MOInvariant);
5969 }
5970 
5971 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
5972                                    const SDLoc &DL, SDValue V) const {
5973   // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
5974   // the destination register.
5975   //
5976   // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
5977   // so we will end up with redundant moves to m0.
5978   //
5979   // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
5980 
5981   // A Null SDValue creates a glue result.
5982   SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
5983                                   V, Chain);
5984   return SDValue(M0, 0);
5985 }
5986 
5987 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
5988                                                  SDValue Op,
5989                                                  MVT VT,
5990                                                  unsigned Offset) const {
5991   SDLoc SL(Op);
5992   SDValue Param = lowerKernargMemParameter(
5993       DAG, MVT::i32, MVT::i32, SL, DAG.getEntryNode(), Offset, Align(4), false);
5994   // The local size values will have the hi 16-bits as zero.
5995   return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
5996                      DAG.getValueType(VT));
5997 }
5998 
5999 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
6000                                         EVT VT) {
6001   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
6002                                       "non-hsa intrinsic with hsa target",
6003                                       DL.getDebugLoc());
6004   DAG.getContext()->diagnose(BadIntrin);
6005   return DAG.getUNDEF(VT);
6006 }
6007 
6008 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
6009                                          EVT VT) {
6010   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
6011                                       "intrinsic not supported on subtarget",
6012                                       DL.getDebugLoc());
6013   DAG.getContext()->diagnose(BadIntrin);
6014   return DAG.getUNDEF(VT);
6015 }
6016 
6017 static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
6018                                     ArrayRef<SDValue> Elts) {
6019   assert(!Elts.empty());
6020   MVT Type;
6021   unsigned NumElts = Elts.size();
6022 
6023   if (NumElts <= 8) {
6024     Type = MVT::getVectorVT(MVT::f32, NumElts);
6025   } else {
6026     assert(Elts.size() <= 16);
6027     Type = MVT::v16f32;
6028     NumElts = 16;
6029   }
6030 
6031   SmallVector<SDValue, 16> VecElts(NumElts);
6032   for (unsigned i = 0; i < Elts.size(); ++i) {
6033     SDValue Elt = Elts[i];
6034     if (Elt.getValueType() != MVT::f32)
6035       Elt = DAG.getBitcast(MVT::f32, Elt);
6036     VecElts[i] = Elt;
6037   }
6038   for (unsigned i = Elts.size(); i < NumElts; ++i)
6039     VecElts[i] = DAG.getUNDEF(MVT::f32);
6040 
6041   if (NumElts == 1)
6042     return VecElts[0];
6043   return DAG.getBuildVector(Type, DL, VecElts);
6044 }
6045 
6046 static SDValue padEltsToUndef(SelectionDAG &DAG, const SDLoc &DL, EVT CastVT,
6047                               SDValue Src, int ExtraElts) {
6048   EVT SrcVT = Src.getValueType();
6049 
6050   SmallVector<SDValue, 8> Elts;
6051 
6052   if (SrcVT.isVector())
6053     DAG.ExtractVectorElements(Src, Elts);
6054   else
6055     Elts.push_back(Src);
6056 
6057   SDValue Undef = DAG.getUNDEF(SrcVT.getScalarType());
6058   while (ExtraElts--)
6059     Elts.push_back(Undef);
6060 
6061   return DAG.getBuildVector(CastVT, DL, Elts);
6062 }
6063 
6064 // Re-construct the required return value for a image load intrinsic.
6065 // This is more complicated due to the optional use TexFailCtrl which means the required
6066 // return type is an aggregate
6067 static SDValue constructRetValue(SelectionDAG &DAG,
6068                                  MachineSDNode *Result,
6069                                  ArrayRef<EVT> ResultTypes,
6070                                  bool IsTexFail, bool Unpacked, bool IsD16,
6071                                  int DMaskPop, int NumVDataDwords,
6072                                  const SDLoc &DL) {
6073   // Determine the required return type. This is the same regardless of IsTexFail flag
6074   EVT ReqRetVT = ResultTypes[0];
6075   int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1;
6076   int NumDataDwords = (!IsD16 || (IsD16 && Unpacked)) ?
6077     ReqRetNumElts : (ReqRetNumElts + 1) / 2;
6078 
6079   int MaskPopDwords = (!IsD16 || (IsD16 && Unpacked)) ?
6080     DMaskPop : (DMaskPop + 1) / 2;
6081 
6082   MVT DataDwordVT = NumDataDwords == 1 ?
6083     MVT::i32 : MVT::getVectorVT(MVT::i32, NumDataDwords);
6084 
6085   MVT MaskPopVT = MaskPopDwords == 1 ?
6086     MVT::i32 : MVT::getVectorVT(MVT::i32, MaskPopDwords);
6087 
6088   SDValue Data(Result, 0);
6089   SDValue TexFail;
6090 
6091   if (DMaskPop > 0 && Data.getValueType() != MaskPopVT) {
6092     SDValue ZeroIdx = DAG.getConstant(0, DL, MVT::i32);
6093     if (MaskPopVT.isVector()) {
6094       Data = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MaskPopVT,
6095                          SDValue(Result, 0), ZeroIdx);
6096     } else {
6097       Data = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MaskPopVT,
6098                          SDValue(Result, 0), ZeroIdx);
6099     }
6100   }
6101 
6102   if (DataDwordVT.isVector())
6103     Data = padEltsToUndef(DAG, DL, DataDwordVT, Data,
6104                           NumDataDwords - MaskPopDwords);
6105 
6106   if (IsD16)
6107     Data = adjustLoadValueTypeImpl(Data, ReqRetVT, DL, DAG, Unpacked);
6108 
6109   EVT LegalReqRetVT = ReqRetVT;
6110   if (!ReqRetVT.isVector()) {
6111     if (!Data.getValueType().isInteger())
6112       Data = DAG.getNode(ISD::BITCAST, DL,
6113                          Data.getValueType().changeTypeToInteger(), Data);
6114     Data = DAG.getNode(ISD::TRUNCATE, DL, ReqRetVT.changeTypeToInteger(), Data);
6115   } else {
6116     // We need to widen the return vector to a legal type
6117     if ((ReqRetVT.getVectorNumElements() % 2) == 1 &&
6118         ReqRetVT.getVectorElementType().getSizeInBits() == 16) {
6119       LegalReqRetVT =
6120           EVT::getVectorVT(*DAG.getContext(), ReqRetVT.getVectorElementType(),
6121                            ReqRetVT.getVectorNumElements() + 1);
6122     }
6123   }
6124   Data = DAG.getNode(ISD::BITCAST, DL, LegalReqRetVT, Data);
6125 
6126   if (IsTexFail) {
6127     TexFail =
6128         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, SDValue(Result, 0),
6129                     DAG.getConstant(MaskPopDwords, DL, MVT::i32));
6130 
6131     return DAG.getMergeValues({Data, TexFail, SDValue(Result, 1)}, DL);
6132   }
6133 
6134   if (Result->getNumValues() == 1)
6135     return Data;
6136 
6137   return DAG.getMergeValues({Data, SDValue(Result, 1)}, DL);
6138 }
6139 
6140 static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE,
6141                          SDValue *LWE, bool &IsTexFail) {
6142   auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode());
6143 
6144   uint64_t Value = TexFailCtrlConst->getZExtValue();
6145   if (Value) {
6146     IsTexFail = true;
6147   }
6148 
6149   SDLoc DL(TexFailCtrlConst);
6150   *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
6151   Value &= ~(uint64_t)0x1;
6152   *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
6153   Value &= ~(uint64_t)0x2;
6154 
6155   return Value == 0;
6156 }
6157 
6158 static void packImage16bitOpsToDwords(SelectionDAG &DAG, SDValue Op,
6159                                       MVT PackVectorVT,
6160                                       SmallVectorImpl<SDValue> &PackedAddrs,
6161                                       unsigned DimIdx, unsigned EndIdx,
6162                                       unsigned NumGradients) {
6163   SDLoc DL(Op);
6164   for (unsigned I = DimIdx; I < EndIdx; I++) {
6165     SDValue Addr = Op.getOperand(I);
6166 
6167     // Gradients are packed with undef for each coordinate.
6168     // In <hi 16 bit>,<lo 16 bit> notation, the registers look like this:
6169     // 1D: undef,dx/dh; undef,dx/dv
6170     // 2D: dy/dh,dx/dh; dy/dv,dx/dv
6171     // 3D: dy/dh,dx/dh; undef,dz/dh; dy/dv,dx/dv; undef,dz/dv
6172     if (((I + 1) >= EndIdx) ||
6173         ((NumGradients / 2) % 2 == 1 && (I == DimIdx + (NumGradients / 2) - 1 ||
6174                                          I == DimIdx + NumGradients - 1))) {
6175       if (Addr.getValueType() != MVT::i16)
6176         Addr = DAG.getBitcast(MVT::i16, Addr);
6177       Addr = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Addr);
6178     } else {
6179       Addr = DAG.getBuildVector(PackVectorVT, DL, {Addr, Op.getOperand(I + 1)});
6180       I++;
6181     }
6182     Addr = DAG.getBitcast(MVT::f32, Addr);
6183     PackedAddrs.push_back(Addr);
6184   }
6185 }
6186 
6187 SDValue SITargetLowering::lowerImage(SDValue Op,
6188                                      const AMDGPU::ImageDimIntrinsicInfo *Intr,
6189                                      SelectionDAG &DAG, bool WithChain) const {
6190   SDLoc DL(Op);
6191   MachineFunction &MF = DAG.getMachineFunction();
6192   const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>();
6193   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
6194       AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
6195   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
6196   unsigned IntrOpcode = Intr->BaseOpcode;
6197   bool IsGFX10Plus = AMDGPU::isGFX10Plus(*Subtarget);
6198 
6199   SmallVector<EVT, 3> ResultTypes(Op->values());
6200   SmallVector<EVT, 3> OrigResultTypes(Op->values());
6201   bool IsD16 = false;
6202   bool IsG16 = false;
6203   bool IsA16 = false;
6204   SDValue VData;
6205   int NumVDataDwords;
6206   bool AdjustRetType = false;
6207 
6208   // Offset of intrinsic arguments
6209   const unsigned ArgOffset = WithChain ? 2 : 1;
6210 
6211   unsigned DMask;
6212   unsigned DMaskLanes = 0;
6213 
6214   if (BaseOpcode->Atomic) {
6215     VData = Op.getOperand(2);
6216 
6217     bool Is64Bit = VData.getValueType() == MVT::i64;
6218     if (BaseOpcode->AtomicX2) {
6219       SDValue VData2 = Op.getOperand(3);
6220       VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL,
6221                                  {VData, VData2});
6222       if (Is64Bit)
6223         VData = DAG.getBitcast(MVT::v4i32, VData);
6224 
6225       ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32;
6226       DMask = Is64Bit ? 0xf : 0x3;
6227       NumVDataDwords = Is64Bit ? 4 : 2;
6228     } else {
6229       DMask = Is64Bit ? 0x3 : 0x1;
6230       NumVDataDwords = Is64Bit ? 2 : 1;
6231     }
6232   } else {
6233     auto *DMaskConst =
6234         cast<ConstantSDNode>(Op.getOperand(ArgOffset + Intr->DMaskIndex));
6235     DMask = DMaskConst->getZExtValue();
6236     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
6237 
6238     if (BaseOpcode->Store) {
6239       VData = Op.getOperand(2);
6240 
6241       MVT StoreVT = VData.getSimpleValueType();
6242       if (StoreVT.getScalarType() == MVT::f16) {
6243         if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
6244           return Op; // D16 is unsupported for this instruction
6245 
6246         IsD16 = true;
6247         VData = handleD16VData(VData, DAG, true);
6248       }
6249 
6250       NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
6251     } else {
6252       // Work out the num dwords based on the dmask popcount and underlying type
6253       // and whether packing is supported.
6254       MVT LoadVT = ResultTypes[0].getSimpleVT();
6255       if (LoadVT.getScalarType() == MVT::f16) {
6256         if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
6257           return Op; // D16 is unsupported for this instruction
6258 
6259         IsD16 = true;
6260       }
6261 
6262       // Confirm that the return type is large enough for the dmask specified
6263       if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) ||
6264           (!LoadVT.isVector() && DMaskLanes > 1))
6265           return Op;
6266 
6267       // The sq block of gfx8 and gfx9 do not estimate register use correctly
6268       // for d16 image_gather4, image_gather4_l, and image_gather4_lz
6269       // instructions.
6270       if (IsD16 && !Subtarget->hasUnpackedD16VMem() &&
6271           !(BaseOpcode->Gather4 && Subtarget->hasImageGather4D16Bug()))
6272         NumVDataDwords = (DMaskLanes + 1) / 2;
6273       else
6274         NumVDataDwords = DMaskLanes;
6275 
6276       AdjustRetType = true;
6277     }
6278   }
6279 
6280   unsigned VAddrEnd = ArgOffset + Intr->VAddrEnd;
6281   SmallVector<SDValue, 4> VAddrs;
6282 
6283   // Check for 16 bit addresses or derivatives and pack if true.
6284   MVT VAddrVT =
6285       Op.getOperand(ArgOffset + Intr->GradientStart).getSimpleValueType();
6286   MVT VAddrScalarVT = VAddrVT.getScalarType();
6287   MVT GradPackVectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16;
6288   IsG16 = VAddrScalarVT == MVT::f16 || VAddrScalarVT == MVT::i16;
6289 
6290   VAddrVT = Op.getOperand(ArgOffset + Intr->CoordStart).getSimpleValueType();
6291   VAddrScalarVT = VAddrVT.getScalarType();
6292   MVT AddrPackVectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16;
6293   IsA16 = VAddrScalarVT == MVT::f16 || VAddrScalarVT == MVT::i16;
6294 
6295   // Push back extra arguments.
6296   for (unsigned I = Intr->VAddrStart; I < Intr->GradientStart; I++) {
6297     if (IsA16 && (Op.getOperand(ArgOffset + I).getValueType() == MVT::f16)) {
6298       assert(I == Intr->BiasIndex && "Got unexpected 16-bit extra argument");
6299       // Special handling of bias when A16 is on. Bias is of type half but
6300       // occupies full 32-bit.
6301       SDValue Bias = DAG.getBuildVector(
6302           MVT::v2f16, DL,
6303           {Op.getOperand(ArgOffset + I), DAG.getUNDEF(MVT::f16)});
6304       VAddrs.push_back(Bias);
6305     } else {
6306       assert((!IsA16 || Intr->NumBiasArgs == 0 || I != Intr->BiasIndex) &&
6307              "Bias needs to be converted to 16 bit in A16 mode");
6308       VAddrs.push_back(Op.getOperand(ArgOffset + I));
6309     }
6310   }
6311 
6312   if (BaseOpcode->Gradients && !ST->hasG16() && (IsA16 != IsG16)) {
6313     // 16 bit gradients are supported, but are tied to the A16 control
6314     // so both gradients and addresses must be 16 bit
6315     LLVM_DEBUG(
6316         dbgs() << "Failed to lower image intrinsic: 16 bit addresses "
6317                   "require 16 bit args for both gradients and addresses");
6318     return Op;
6319   }
6320 
6321   if (IsA16) {
6322     if (!ST->hasA16()) {
6323       LLVM_DEBUG(dbgs() << "Failed to lower image intrinsic: Target does not "
6324                            "support 16 bit addresses\n");
6325       return Op;
6326     }
6327   }
6328 
6329   // We've dealt with incorrect input so we know that if IsA16, IsG16
6330   // are set then we have to compress/pack operands (either address,
6331   // gradient or both)
6332   // In the case where a16 and gradients are tied (no G16 support) then we
6333   // have already verified that both IsA16 and IsG16 are true
6334   if (BaseOpcode->Gradients && IsG16 && ST->hasG16()) {
6335     // Activate g16
6336     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
6337         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
6338     IntrOpcode = G16MappingInfo->G16; // set new opcode to variant with _g16
6339   }
6340 
6341   // Add gradients (packed or unpacked)
6342   if (IsG16) {
6343     // Pack the gradients
6344     // const int PackEndIdx = IsA16 ? VAddrEnd : (ArgOffset + Intr->CoordStart);
6345     packImage16bitOpsToDwords(DAG, Op, GradPackVectorVT, VAddrs,
6346                               ArgOffset + Intr->GradientStart,
6347                               ArgOffset + Intr->CoordStart, Intr->NumGradients);
6348   } else {
6349     for (unsigned I = ArgOffset + Intr->GradientStart;
6350          I < ArgOffset + Intr->CoordStart; I++)
6351       VAddrs.push_back(Op.getOperand(I));
6352   }
6353 
6354   // Add addresses (packed or unpacked)
6355   if (IsA16) {
6356     packImage16bitOpsToDwords(DAG, Op, AddrPackVectorVT, VAddrs,
6357                               ArgOffset + Intr->CoordStart, VAddrEnd,
6358                               0 /* No gradients */);
6359   } else {
6360     // Add uncompressed address
6361     for (unsigned I = ArgOffset + Intr->CoordStart; I < VAddrEnd; I++)
6362       VAddrs.push_back(Op.getOperand(I));
6363   }
6364 
6365   // If the register allocator cannot place the address registers contiguously
6366   // without introducing moves, then using the non-sequential address encoding
6367   // is always preferable, since it saves VALU instructions and is usually a
6368   // wash in terms of code size or even better.
6369   //
6370   // However, we currently have no way of hinting to the register allocator that
6371   // MIMG addresses should be placed contiguously when it is possible to do so,
6372   // so force non-NSA for the common 2-address case as a heuristic.
6373   //
6374   // SIShrinkInstructions will convert NSA encodings to non-NSA after register
6375   // allocation when possible.
6376   bool UseNSA = ST->hasFeature(AMDGPU::FeatureNSAEncoding) &&
6377                 VAddrs.size() >= 3 &&
6378                 VAddrs.size() <= (unsigned)ST->getNSAMaxSize();
6379   SDValue VAddr;
6380   if (!UseNSA)
6381     VAddr = getBuildDwordsVector(DAG, DL, VAddrs);
6382 
6383   SDValue True = DAG.getTargetConstant(1, DL, MVT::i1);
6384   SDValue False = DAG.getTargetConstant(0, DL, MVT::i1);
6385   SDValue Unorm;
6386   if (!BaseOpcode->Sampler) {
6387     Unorm = True;
6388   } else {
6389     auto UnormConst =
6390         cast<ConstantSDNode>(Op.getOperand(ArgOffset + Intr->UnormIndex));
6391 
6392     Unorm = UnormConst->getZExtValue() ? True : False;
6393   }
6394 
6395   SDValue TFE;
6396   SDValue LWE;
6397   SDValue TexFail = Op.getOperand(ArgOffset + Intr->TexFailCtrlIndex);
6398   bool IsTexFail = false;
6399   if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail))
6400     return Op;
6401 
6402   if (IsTexFail) {
6403     if (!DMaskLanes) {
6404       // Expecting to get an error flag since TFC is on - and dmask is 0
6405       // Force dmask to be at least 1 otherwise the instruction will fail
6406       DMask = 0x1;
6407       DMaskLanes = 1;
6408       NumVDataDwords = 1;
6409     }
6410     NumVDataDwords += 1;
6411     AdjustRetType = true;
6412   }
6413 
6414   // Has something earlier tagged that the return type needs adjusting
6415   // This happens if the instruction is a load or has set TexFailCtrl flags
6416   if (AdjustRetType) {
6417     // NumVDataDwords reflects the true number of dwords required in the return type
6418     if (DMaskLanes == 0 && !BaseOpcode->Store) {
6419       // This is a no-op load. This can be eliminated
6420       SDValue Undef = DAG.getUNDEF(Op.getValueType());
6421       if (isa<MemSDNode>(Op))
6422         return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
6423       return Undef;
6424     }
6425 
6426     EVT NewVT = NumVDataDwords > 1 ?
6427                   EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumVDataDwords)
6428                 : MVT::i32;
6429 
6430     ResultTypes[0] = NewVT;
6431     if (ResultTypes.size() == 3) {
6432       // Original result was aggregate type used for TexFailCtrl results
6433       // The actual instruction returns as a vector type which has now been
6434       // created. Remove the aggregate result.
6435       ResultTypes.erase(&ResultTypes[1]);
6436     }
6437   }
6438 
6439   unsigned CPol = cast<ConstantSDNode>(
6440       Op.getOperand(ArgOffset + Intr->CachePolicyIndex))->getZExtValue();
6441   if (BaseOpcode->Atomic)
6442     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
6443   if (CPol & ~AMDGPU::CPol::ALL)
6444     return Op;
6445 
6446   SmallVector<SDValue, 26> Ops;
6447   if (BaseOpcode->Store || BaseOpcode->Atomic)
6448     Ops.push_back(VData); // vdata
6449   if (UseNSA)
6450     append_range(Ops, VAddrs);
6451   else
6452     Ops.push_back(VAddr);
6453   Ops.push_back(Op.getOperand(ArgOffset + Intr->RsrcIndex));
6454   if (BaseOpcode->Sampler)
6455     Ops.push_back(Op.getOperand(ArgOffset + Intr->SampIndex));
6456   Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32));
6457   if (IsGFX10Plus)
6458     Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32));
6459   Ops.push_back(Unorm);
6460   Ops.push_back(DAG.getTargetConstant(CPol, DL, MVT::i32));
6461   Ops.push_back(IsA16 &&  // r128, a16 for gfx9
6462                 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False);
6463   if (IsGFX10Plus)
6464     Ops.push_back(IsA16 ? True : False);
6465   if (!Subtarget->hasGFX90AInsts()) {
6466     Ops.push_back(TFE); //tfe
6467   } else if (cast<ConstantSDNode>(TFE)->getZExtValue()) {
6468     report_fatal_error("TFE is not supported on this GPU");
6469   }
6470   Ops.push_back(LWE); // lwe
6471   if (!IsGFX10Plus)
6472     Ops.push_back(DimInfo->DA ? True : False);
6473   if (BaseOpcode->HasD16)
6474     Ops.push_back(IsD16 ? True : False);
6475   if (isa<MemSDNode>(Op))
6476     Ops.push_back(Op.getOperand(0)); // chain
6477 
6478   int NumVAddrDwords =
6479       UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32;
6480   int Opcode = -1;
6481 
6482   if (IsGFX10Plus) {
6483     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
6484                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
6485                                           : AMDGPU::MIMGEncGfx10Default,
6486                                    NumVDataDwords, NumVAddrDwords);
6487   } else {
6488     if (Subtarget->hasGFX90AInsts()) {
6489       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a,
6490                                      NumVDataDwords, NumVAddrDwords);
6491       if (Opcode == -1)
6492         report_fatal_error(
6493             "requested image instruction is not supported on this GPU");
6494     }
6495     if (Opcode == -1 &&
6496         Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
6497       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
6498                                      NumVDataDwords, NumVAddrDwords);
6499     if (Opcode == -1)
6500       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
6501                                      NumVDataDwords, NumVAddrDwords);
6502   }
6503   assert(Opcode != -1);
6504 
6505   MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops);
6506   if (auto MemOp = dyn_cast<MemSDNode>(Op)) {
6507     MachineMemOperand *MemRef = MemOp->getMemOperand();
6508     DAG.setNodeMemRefs(NewNode, {MemRef});
6509   }
6510 
6511   if (BaseOpcode->AtomicX2) {
6512     SmallVector<SDValue, 1> Elt;
6513     DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
6514     return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
6515   }
6516   if (BaseOpcode->Store)
6517     return SDValue(NewNode, 0);
6518   return constructRetValue(DAG, NewNode,
6519                            OrigResultTypes, IsTexFail,
6520                            Subtarget->hasUnpackedD16VMem(), IsD16,
6521                            DMaskLanes, NumVDataDwords, DL);
6522 }
6523 
6524 SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
6525                                        SDValue Offset, SDValue CachePolicy,
6526                                        SelectionDAG &DAG) const {
6527   MachineFunction &MF = DAG.getMachineFunction();
6528 
6529   const DataLayout &DataLayout = DAG.getDataLayout();
6530   Align Alignment =
6531       DataLayout.getABITypeAlign(VT.getTypeForEVT(*DAG.getContext()));
6532 
6533   MachineMemOperand *MMO = MF.getMachineMemOperand(
6534       MachinePointerInfo(),
6535       MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
6536           MachineMemOperand::MOInvariant,
6537       VT.getStoreSize(), Alignment);
6538 
6539   if (!Offset->isDivergent()) {
6540     SDValue Ops[] = {
6541         Rsrc,
6542         Offset, // Offset
6543         CachePolicy
6544     };
6545 
6546     // Widen vec3 load to vec4.
6547     if (VT.isVector() && VT.getVectorNumElements() == 3) {
6548       EVT WidenedVT =
6549           EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
6550       auto WidenedOp = DAG.getMemIntrinsicNode(
6551           AMDGPUISD::SBUFFER_LOAD, DL, DAG.getVTList(WidenedVT), Ops, WidenedVT,
6552           MF.getMachineMemOperand(MMO, 0, WidenedVT.getStoreSize()));
6553       auto Subvector = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, WidenedOp,
6554                                    DAG.getVectorIdxConstant(0, DL));
6555       return Subvector;
6556     }
6557 
6558     return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL,
6559                                    DAG.getVTList(VT), Ops, VT, MMO);
6560   }
6561 
6562   // We have a divergent offset. Emit a MUBUF buffer load instead. We can
6563   // assume that the buffer is unswizzled.
6564   SmallVector<SDValue, 4> Loads;
6565   unsigned NumLoads = 1;
6566   MVT LoadVT = VT.getSimpleVT();
6567   unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1;
6568   assert((LoadVT.getScalarType() == MVT::i32 ||
6569           LoadVT.getScalarType() == MVT::f32));
6570 
6571   if (NumElts == 8 || NumElts == 16) {
6572     NumLoads = NumElts / 4;
6573     LoadVT = MVT::getVectorVT(LoadVT.getScalarType(), 4);
6574   }
6575 
6576   SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue});
6577   SDValue Ops[] = {
6578       DAG.getEntryNode(),                               // Chain
6579       Rsrc,                                             // rsrc
6580       DAG.getConstant(0, DL, MVT::i32),                 // vindex
6581       {},                                               // voffset
6582       {},                                               // soffset
6583       {},                                               // offset
6584       CachePolicy,                                      // cachepolicy
6585       DAG.getTargetConstant(0, DL, MVT::i1),            // idxen
6586   };
6587 
6588   // Use the alignment to ensure that the required offsets will fit into the
6589   // immediate offsets.
6590   setBufferOffsets(Offset, DAG, &Ops[3],
6591                    NumLoads > 1 ? Align(16 * NumLoads) : Align(4));
6592 
6593   uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
6594   for (unsigned i = 0; i < NumLoads; ++i) {
6595     Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32);
6596     Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops,
6597                                         LoadVT, MMO, DAG));
6598   }
6599 
6600   if (NumElts == 8 || NumElts == 16)
6601     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads);
6602 
6603   return Loads[0];
6604 }
6605 
6606 SDValue SITargetLowering::lowerWorkitemID(SelectionDAG &DAG, SDValue Op,
6607                                           unsigned Dim,
6608                                           const ArgDescriptor &Arg) const {
6609   SDLoc SL(Op);
6610   MachineFunction &MF = DAG.getMachineFunction();
6611   unsigned MaxID = Subtarget->getMaxWorkitemID(MF.getFunction(), Dim);
6612   if (MaxID == 0)
6613     return DAG.getConstant(0, SL, MVT::i32);
6614 
6615   SDValue Val = loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
6616                                SDLoc(DAG.getEntryNode()), Arg);
6617 
6618   // Don't bother inserting AssertZext for packed IDs since we're emitting the
6619   // masking operations anyway.
6620   //
6621   // TODO: We could assert the top bit is 0 for the source copy.
6622   if (Arg.isMasked())
6623     return Val;
6624 
6625   // Preserve the known bits after expansion to a copy.
6626   EVT SmallVT =
6627       EVT::getIntegerVT(*DAG.getContext(), 32 - countLeadingZeros(MaxID));
6628   return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Val,
6629                      DAG.getValueType(SmallVT));
6630 }
6631 
6632 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
6633                                                   SelectionDAG &DAG) const {
6634   MachineFunction &MF = DAG.getMachineFunction();
6635   auto MFI = MF.getInfo<SIMachineFunctionInfo>();
6636 
6637   EVT VT = Op.getValueType();
6638   SDLoc DL(Op);
6639   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
6640 
6641   // TODO: Should this propagate fast-math-flags?
6642 
6643   switch (IntrinsicID) {
6644   case Intrinsic::amdgcn_implicit_buffer_ptr: {
6645     if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction()))
6646       return emitNonHSAIntrinsicError(DAG, DL, VT);
6647     return getPreloadedValue(DAG, *MFI, VT,
6648                              AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
6649   }
6650   case Intrinsic::amdgcn_dispatch_ptr:
6651   case Intrinsic::amdgcn_queue_ptr: {
6652     if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) {
6653       DiagnosticInfoUnsupported BadIntrin(
6654           MF.getFunction(), "unsupported hsa intrinsic without hsa target",
6655           DL.getDebugLoc());
6656       DAG.getContext()->diagnose(BadIntrin);
6657       return DAG.getUNDEF(VT);
6658     }
6659 
6660     auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
6661       AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
6662     return getPreloadedValue(DAG, *MFI, VT, RegID);
6663   }
6664   case Intrinsic::amdgcn_implicitarg_ptr: {
6665     if (MFI->isEntryFunction())
6666       return getImplicitArgPtr(DAG, DL);
6667     return getPreloadedValue(DAG, *MFI, VT,
6668                              AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
6669   }
6670   case Intrinsic::amdgcn_kernarg_segment_ptr: {
6671     if (!AMDGPU::isKernel(MF.getFunction().getCallingConv())) {
6672       // This only makes sense to call in a kernel, so just lower to null.
6673       return DAG.getConstant(0, DL, VT);
6674     }
6675 
6676     return getPreloadedValue(DAG, *MFI, VT,
6677                              AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
6678   }
6679   case Intrinsic::amdgcn_dispatch_id: {
6680     return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
6681   }
6682   case Intrinsic::amdgcn_rcp:
6683     return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
6684   case Intrinsic::amdgcn_rsq:
6685     return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
6686   case Intrinsic::amdgcn_rsq_legacy:
6687     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
6688       return emitRemovedIntrinsicError(DAG, DL, VT);
6689     return SDValue();
6690   case Intrinsic::amdgcn_rcp_legacy:
6691     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
6692       return emitRemovedIntrinsicError(DAG, DL, VT);
6693     return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
6694   case Intrinsic::amdgcn_rsq_clamp: {
6695     if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
6696       return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
6697 
6698     Type *Type = VT.getTypeForEVT(*DAG.getContext());
6699     APFloat Max = APFloat::getLargest(Type->getFltSemantics());
6700     APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
6701 
6702     SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
6703     SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
6704                               DAG.getConstantFP(Max, DL, VT));
6705     return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
6706                        DAG.getConstantFP(Min, DL, VT));
6707   }
6708   case Intrinsic::r600_read_ngroups_x:
6709     if (Subtarget->isAmdHsaOS())
6710       return emitNonHSAIntrinsicError(DAG, DL, VT);
6711 
6712     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
6713                                     SI::KernelInputOffsets::NGROUPS_X, Align(4),
6714                                     false);
6715   case Intrinsic::r600_read_ngroups_y:
6716     if (Subtarget->isAmdHsaOS())
6717       return emitNonHSAIntrinsicError(DAG, DL, VT);
6718 
6719     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
6720                                     SI::KernelInputOffsets::NGROUPS_Y, Align(4),
6721                                     false);
6722   case Intrinsic::r600_read_ngroups_z:
6723     if (Subtarget->isAmdHsaOS())
6724       return emitNonHSAIntrinsicError(DAG, DL, VT);
6725 
6726     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
6727                                     SI::KernelInputOffsets::NGROUPS_Z, Align(4),
6728                                     false);
6729   case Intrinsic::r600_read_global_size_x:
6730     if (Subtarget->isAmdHsaOS())
6731       return emitNonHSAIntrinsicError(DAG, DL, VT);
6732 
6733     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
6734                                     SI::KernelInputOffsets::GLOBAL_SIZE_X,
6735                                     Align(4), false);
6736   case Intrinsic::r600_read_global_size_y:
6737     if (Subtarget->isAmdHsaOS())
6738       return emitNonHSAIntrinsicError(DAG, DL, VT);
6739 
6740     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
6741                                     SI::KernelInputOffsets::GLOBAL_SIZE_Y,
6742                                     Align(4), false);
6743   case Intrinsic::r600_read_global_size_z:
6744     if (Subtarget->isAmdHsaOS())
6745       return emitNonHSAIntrinsicError(DAG, DL, VT);
6746 
6747     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
6748                                     SI::KernelInputOffsets::GLOBAL_SIZE_Z,
6749                                     Align(4), false);
6750   case Intrinsic::r600_read_local_size_x:
6751     if (Subtarget->isAmdHsaOS())
6752       return emitNonHSAIntrinsicError(DAG, DL, VT);
6753 
6754     return lowerImplicitZextParam(DAG, Op, MVT::i16,
6755                                   SI::KernelInputOffsets::LOCAL_SIZE_X);
6756   case Intrinsic::r600_read_local_size_y:
6757     if (Subtarget->isAmdHsaOS())
6758       return emitNonHSAIntrinsicError(DAG, DL, VT);
6759 
6760     return lowerImplicitZextParam(DAG, Op, MVT::i16,
6761                                   SI::KernelInputOffsets::LOCAL_SIZE_Y);
6762   case Intrinsic::r600_read_local_size_z:
6763     if (Subtarget->isAmdHsaOS())
6764       return emitNonHSAIntrinsicError(DAG, DL, VT);
6765 
6766     return lowerImplicitZextParam(DAG, Op, MVT::i16,
6767                                   SI::KernelInputOffsets::LOCAL_SIZE_Z);
6768   case Intrinsic::amdgcn_workgroup_id_x:
6769     return getPreloadedValue(DAG, *MFI, VT,
6770                              AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
6771   case Intrinsic::amdgcn_workgroup_id_y:
6772     return getPreloadedValue(DAG, *MFI, VT,
6773                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
6774   case Intrinsic::amdgcn_workgroup_id_z:
6775     return getPreloadedValue(DAG, *MFI, VT,
6776                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
6777   case Intrinsic::amdgcn_workitem_id_x:
6778     return lowerWorkitemID(DAG, Op, 0, MFI->getArgInfo().WorkItemIDX);
6779   case Intrinsic::amdgcn_workitem_id_y:
6780     return lowerWorkitemID(DAG, Op, 1, MFI->getArgInfo().WorkItemIDY);
6781   case Intrinsic::amdgcn_workitem_id_z:
6782     return lowerWorkitemID(DAG, Op, 2, MFI->getArgInfo().WorkItemIDZ);
6783   case Intrinsic::amdgcn_wavefrontsize:
6784     return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(),
6785                            SDLoc(Op), MVT::i32);
6786   case Intrinsic::amdgcn_s_buffer_load: {
6787     unsigned CPol = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
6788     if (CPol & ~AMDGPU::CPol::ALL)
6789       return Op;
6790     return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
6791                         DAG);
6792   }
6793   case Intrinsic::amdgcn_fdiv_fast:
6794     return lowerFDIV_FAST(Op, DAG);
6795   case Intrinsic::amdgcn_sin:
6796     return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
6797 
6798   case Intrinsic::amdgcn_cos:
6799     return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
6800 
6801   case Intrinsic::amdgcn_mul_u24:
6802     return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2));
6803   case Intrinsic::amdgcn_mul_i24:
6804     return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2));
6805 
6806   case Intrinsic::amdgcn_log_clamp: {
6807     if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
6808       return SDValue();
6809 
6810     return emitRemovedIntrinsicError(DAG, DL, VT);
6811   }
6812   case Intrinsic::amdgcn_ldexp:
6813     return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
6814                        Op.getOperand(1), Op.getOperand(2));
6815 
6816   case Intrinsic::amdgcn_fract:
6817     return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
6818 
6819   case Intrinsic::amdgcn_class:
6820     return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
6821                        Op.getOperand(1), Op.getOperand(2));
6822   case Intrinsic::amdgcn_div_fmas:
6823     return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
6824                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
6825                        Op.getOperand(4));
6826 
6827   case Intrinsic::amdgcn_div_fixup:
6828     return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
6829                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
6830 
6831   case Intrinsic::amdgcn_div_scale: {
6832     const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3));
6833 
6834     // Translate to the operands expected by the machine instruction. The
6835     // first parameter must be the same as the first instruction.
6836     SDValue Numerator = Op.getOperand(1);
6837     SDValue Denominator = Op.getOperand(2);
6838 
6839     // Note this order is opposite of the machine instruction's operations,
6840     // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
6841     // intrinsic has the numerator as the first operand to match a normal
6842     // division operation.
6843 
6844     SDValue Src0 = Param->isAllOnes() ? Numerator : Denominator;
6845 
6846     return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
6847                        Denominator, Numerator);
6848   }
6849   case Intrinsic::amdgcn_icmp: {
6850     // There is a Pat that handles this variant, so return it as-is.
6851     if (Op.getOperand(1).getValueType() == MVT::i1 &&
6852         Op.getConstantOperandVal(2) == 0 &&
6853         Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE)
6854       return Op;
6855     return lowerICMPIntrinsic(*this, Op.getNode(), DAG);
6856   }
6857   case Intrinsic::amdgcn_fcmp: {
6858     return lowerFCMPIntrinsic(*this, Op.getNode(), DAG);
6859   }
6860   case Intrinsic::amdgcn_ballot:
6861     return lowerBALLOTIntrinsic(*this, Op.getNode(), DAG);
6862   case Intrinsic::amdgcn_fmed3:
6863     return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
6864                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
6865   case Intrinsic::amdgcn_fdot2:
6866     return DAG.getNode(AMDGPUISD::FDOT2, DL, VT,
6867                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
6868                        Op.getOperand(4));
6869   case Intrinsic::amdgcn_fmul_legacy:
6870     return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
6871                        Op.getOperand(1), Op.getOperand(2));
6872   case Intrinsic::amdgcn_sffbh:
6873     return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
6874   case Intrinsic::amdgcn_sbfe:
6875     return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
6876                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
6877   case Intrinsic::amdgcn_ubfe:
6878     return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
6879                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
6880   case Intrinsic::amdgcn_cvt_pkrtz:
6881   case Intrinsic::amdgcn_cvt_pknorm_i16:
6882   case Intrinsic::amdgcn_cvt_pknorm_u16:
6883   case Intrinsic::amdgcn_cvt_pk_i16:
6884   case Intrinsic::amdgcn_cvt_pk_u16: {
6885     // FIXME: Stop adding cast if v2f16/v2i16 are legal.
6886     EVT VT = Op.getValueType();
6887     unsigned Opcode;
6888 
6889     if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
6890       Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
6891     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
6892       Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
6893     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
6894       Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
6895     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
6896       Opcode = AMDGPUISD::CVT_PK_I16_I32;
6897     else
6898       Opcode = AMDGPUISD::CVT_PK_U16_U32;
6899 
6900     if (isTypeLegal(VT))
6901       return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2));
6902 
6903     SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
6904                                Op.getOperand(1), Op.getOperand(2));
6905     return DAG.getNode(ISD::BITCAST, DL, VT, Node);
6906   }
6907   case Intrinsic::amdgcn_fmad_ftz:
6908     return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1),
6909                        Op.getOperand(2), Op.getOperand(3));
6910 
6911   case Intrinsic::amdgcn_if_break:
6912     return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT,
6913                                       Op->getOperand(1), Op->getOperand(2)), 0);
6914 
6915   case Intrinsic::amdgcn_groupstaticsize: {
6916     Triple::OSType OS = getTargetMachine().getTargetTriple().getOS();
6917     if (OS == Triple::AMDHSA || OS == Triple::AMDPAL)
6918       return Op;
6919 
6920     const Module *M = MF.getFunction().getParent();
6921     const GlobalValue *GV =
6922         M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize));
6923     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0,
6924                                             SIInstrInfo::MO_ABS32_LO);
6925     return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0};
6926   }
6927   case Intrinsic::amdgcn_is_shared:
6928   case Intrinsic::amdgcn_is_private: {
6929     SDLoc SL(Op);
6930     unsigned AS = (IntrinsicID == Intrinsic::amdgcn_is_shared) ?
6931       AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
6932     SDValue Aperture = getSegmentAperture(AS, SL, DAG);
6933     SDValue SrcVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32,
6934                                  Op.getOperand(1));
6935 
6936     SDValue SrcHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, SrcVec,
6937                                 DAG.getConstant(1, SL, MVT::i32));
6938     return DAG.getSetCC(SL, MVT::i1, SrcHi, Aperture, ISD::SETEQ);
6939   }
6940   case Intrinsic::amdgcn_perm:
6941     return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, Op.getOperand(1),
6942                        Op.getOperand(2), Op.getOperand(3));
6943   case Intrinsic::amdgcn_reloc_constant: {
6944     Module *M = const_cast<Module *>(MF.getFunction().getParent());
6945     const MDNode *Metadata = cast<MDNodeSDNode>(Op.getOperand(1))->getMD();
6946     auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
6947     auto RelocSymbol = cast<GlobalVariable>(
6948         M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
6949     SDValue GA = DAG.getTargetGlobalAddress(RelocSymbol, DL, MVT::i32, 0,
6950                                             SIInstrInfo::MO_ABS32_LO);
6951     return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0};
6952   }
6953   default:
6954     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6955             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
6956       return lowerImage(Op, ImageDimIntr, DAG, false);
6957 
6958     return Op;
6959   }
6960 }
6961 
6962 /// Update \p MMO based on the offset inputs to an intrinsic.
6963 static void updateBufferMMO(MachineMemOperand *MMO, SDValue VOffset,
6964                             SDValue SOffset, SDValue Offset,
6965                             SDValue VIndex = SDValue()) {
6966   if (!isa<ConstantSDNode>(VOffset) || !isa<ConstantSDNode>(SOffset) ||
6967       !isa<ConstantSDNode>(Offset)) {
6968     // The combined offset is not known to be constant, so we cannot represent
6969     // it in the MMO. Give up.
6970     MMO->setValue((Value *)nullptr);
6971     return;
6972   }
6973 
6974   if (VIndex && (!isa<ConstantSDNode>(VIndex) ||
6975                  !cast<ConstantSDNode>(VIndex)->isZero())) {
6976     // The strided index component of the address is not known to be zero, so we
6977     // cannot represent it in the MMO. Give up.
6978     MMO->setValue((Value *)nullptr);
6979     return;
6980   }
6981 
6982   MMO->setOffset(cast<ConstantSDNode>(VOffset)->getSExtValue() +
6983                  cast<ConstantSDNode>(SOffset)->getSExtValue() +
6984                  cast<ConstantSDNode>(Offset)->getSExtValue());
6985 }
6986 
6987 SDValue SITargetLowering::lowerRawBufferAtomicIntrin(SDValue Op,
6988                                                      SelectionDAG &DAG,
6989                                                      unsigned NewOpcode) const {
6990   SDLoc DL(Op);
6991 
6992   SDValue VData = Op.getOperand(2);
6993   auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6994   SDValue Ops[] = {
6995     Op.getOperand(0), // Chain
6996     VData,            // vdata
6997     Op.getOperand(3), // rsrc
6998     DAG.getConstant(0, DL, MVT::i32), // vindex
6999     Offsets.first,    // voffset
7000     Op.getOperand(5), // soffset
7001     Offsets.second,   // offset
7002     Op.getOperand(6), // cachepolicy
7003     DAG.getTargetConstant(0, DL, MVT::i1), // idxen
7004   };
7005 
7006   auto *M = cast<MemSDNode>(Op);
7007   updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6]);
7008 
7009   EVT MemVT = VData.getValueType();
7010   return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT,
7011                                  M->getMemOperand());
7012 }
7013 
7014 // Return a value to use for the idxen operand by examining the vindex operand.
7015 static unsigned getIdxEn(SDValue VIndex) {
7016   if (auto VIndexC = dyn_cast<ConstantSDNode>(VIndex))
7017     // No need to set idxen if vindex is known to be zero.
7018     return VIndexC->getZExtValue() != 0;
7019   return 1;
7020 }
7021 
7022 SDValue
7023 SITargetLowering::lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG,
7024                                                 unsigned NewOpcode) const {
7025   SDLoc DL(Op);
7026 
7027   SDValue VData = Op.getOperand(2);
7028   auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
7029   SDValue Ops[] = {
7030     Op.getOperand(0), // Chain
7031     VData,            // vdata
7032     Op.getOperand(3), // rsrc
7033     Op.getOperand(4), // vindex
7034     Offsets.first,    // voffset
7035     Op.getOperand(6), // soffset
7036     Offsets.second,   // offset
7037     Op.getOperand(7), // cachepolicy
7038     DAG.getTargetConstant(1, DL, MVT::i1), // idxen
7039   };
7040 
7041   auto *M = cast<MemSDNode>(Op);
7042   updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]);
7043 
7044   EVT MemVT = VData.getValueType();
7045   return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT,
7046                                  M->getMemOperand());
7047 }
7048 
7049 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
7050                                                  SelectionDAG &DAG) const {
7051   unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
7052   SDLoc DL(Op);
7053 
7054   switch (IntrID) {
7055   case Intrinsic::amdgcn_ds_ordered_add:
7056   case Intrinsic::amdgcn_ds_ordered_swap: {
7057     MemSDNode *M = cast<MemSDNode>(Op);
7058     SDValue Chain = M->getOperand(0);
7059     SDValue M0 = M->getOperand(2);
7060     SDValue Value = M->getOperand(3);
7061     unsigned IndexOperand = M->getConstantOperandVal(7);
7062     unsigned WaveRelease = M->getConstantOperandVal(8);
7063     unsigned WaveDone = M->getConstantOperandVal(9);
7064 
7065     unsigned OrderedCountIndex = IndexOperand & 0x3f;
7066     IndexOperand &= ~0x3f;
7067     unsigned CountDw = 0;
7068 
7069     if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) {
7070       CountDw = (IndexOperand >> 24) & 0xf;
7071       IndexOperand &= ~(0xf << 24);
7072 
7073       if (CountDw < 1 || CountDw > 4) {
7074         report_fatal_error(
7075             "ds_ordered_count: dword count must be between 1 and 4");
7076       }
7077     }
7078 
7079     if (IndexOperand)
7080       report_fatal_error("ds_ordered_count: bad index operand");
7081 
7082     if (WaveDone && !WaveRelease)
7083       report_fatal_error("ds_ordered_count: wave_done requires wave_release");
7084 
7085     unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
7086     unsigned ShaderType =
7087         SIInstrInfo::getDSShaderTypeValue(DAG.getMachineFunction());
7088     unsigned Offset0 = OrderedCountIndex << 2;
7089     unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
7090                        (Instruction << 4);
7091 
7092     if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
7093       Offset1 |= (CountDw - 1) << 6;
7094 
7095     unsigned Offset = Offset0 | (Offset1 << 8);
7096 
7097     SDValue Ops[] = {
7098       Chain,
7099       Value,
7100       DAG.getTargetConstant(Offset, DL, MVT::i16),
7101       copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue
7102     };
7103     return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL,
7104                                    M->getVTList(), Ops, M->getMemoryVT(),
7105                                    M->getMemOperand());
7106   }
7107   case Intrinsic::amdgcn_ds_fadd: {
7108     MemSDNode *M = cast<MemSDNode>(Op);
7109     unsigned Opc;
7110     switch (IntrID) {
7111     case Intrinsic::amdgcn_ds_fadd:
7112       Opc = ISD::ATOMIC_LOAD_FADD;
7113       break;
7114     }
7115 
7116     return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(),
7117                          M->getOperand(0), M->getOperand(2), M->getOperand(3),
7118                          M->getMemOperand());
7119   }
7120   case Intrinsic::amdgcn_atomic_inc:
7121   case Intrinsic::amdgcn_atomic_dec:
7122   case Intrinsic::amdgcn_ds_fmin:
7123   case Intrinsic::amdgcn_ds_fmax: {
7124     MemSDNode *M = cast<MemSDNode>(Op);
7125     unsigned Opc;
7126     switch (IntrID) {
7127     case Intrinsic::amdgcn_atomic_inc:
7128       Opc = AMDGPUISD::ATOMIC_INC;
7129       break;
7130     case Intrinsic::amdgcn_atomic_dec:
7131       Opc = AMDGPUISD::ATOMIC_DEC;
7132       break;
7133     case Intrinsic::amdgcn_ds_fmin:
7134       Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
7135       break;
7136     case Intrinsic::amdgcn_ds_fmax:
7137       Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
7138       break;
7139     default:
7140       llvm_unreachable("Unknown intrinsic!");
7141     }
7142     SDValue Ops[] = {
7143       M->getOperand(0), // Chain
7144       M->getOperand(2), // Ptr
7145       M->getOperand(3)  // Value
7146     };
7147 
7148     return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
7149                                    M->getMemoryVT(), M->getMemOperand());
7150   }
7151   case Intrinsic::amdgcn_buffer_load:
7152   case Intrinsic::amdgcn_buffer_load_format: {
7153     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
7154     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
7155     unsigned IdxEn = getIdxEn(Op.getOperand(3));
7156     SDValue Ops[] = {
7157       Op.getOperand(0), // Chain
7158       Op.getOperand(2), // rsrc
7159       Op.getOperand(3), // vindex
7160       SDValue(),        // voffset -- will be set by setBufferOffsets
7161       SDValue(),        // soffset -- will be set by setBufferOffsets
7162       SDValue(),        // offset -- will be set by setBufferOffsets
7163       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
7164       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
7165     };
7166     setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]);
7167 
7168     unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
7169         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
7170 
7171     EVT VT = Op.getValueType();
7172     EVT IntVT = VT.changeTypeToInteger();
7173     auto *M = cast<MemSDNode>(Op);
7174     updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5], Ops[2]);
7175     EVT LoadVT = Op.getValueType();
7176 
7177     if (LoadVT.getScalarType() == MVT::f16)
7178       return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
7179                                  M, DAG, Ops);
7180 
7181     // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
7182     if (LoadVT.getScalarType() == MVT::i8 ||
7183         LoadVT.getScalarType() == MVT::i16)
7184       return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
7185 
7186     return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
7187                                M->getMemOperand(), DAG);
7188   }
7189   case Intrinsic::amdgcn_raw_buffer_load:
7190   case Intrinsic::amdgcn_raw_buffer_load_format: {
7191     const bool IsFormat = IntrID == Intrinsic::amdgcn_raw_buffer_load_format;
7192 
7193     auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
7194     SDValue Ops[] = {
7195       Op.getOperand(0), // Chain
7196       Op.getOperand(2), // rsrc
7197       DAG.getConstant(0, DL, MVT::i32), // vindex
7198       Offsets.first,    // voffset
7199       Op.getOperand(4), // soffset
7200       Offsets.second,   // offset
7201       Op.getOperand(5), // cachepolicy, swizzled buffer
7202       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
7203     };
7204 
7205     auto *M = cast<MemSDNode>(Op);
7206     updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5]);
7207     return lowerIntrinsicLoad(M, IsFormat, DAG, Ops);
7208   }
7209   case Intrinsic::amdgcn_struct_buffer_load:
7210   case Intrinsic::amdgcn_struct_buffer_load_format: {
7211     const bool IsFormat = IntrID == Intrinsic::amdgcn_struct_buffer_load_format;
7212 
7213     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
7214     SDValue Ops[] = {
7215       Op.getOperand(0), // Chain
7216       Op.getOperand(2), // rsrc
7217       Op.getOperand(3), // vindex
7218       Offsets.first,    // voffset
7219       Op.getOperand(5), // soffset
7220       Offsets.second,   // offset
7221       Op.getOperand(6), // cachepolicy, swizzled buffer
7222       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
7223     };
7224 
7225     auto *M = cast<MemSDNode>(Op);
7226     updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5], Ops[2]);
7227     return lowerIntrinsicLoad(cast<MemSDNode>(Op), IsFormat, DAG, Ops);
7228   }
7229   case Intrinsic::amdgcn_tbuffer_load: {
7230     MemSDNode *M = cast<MemSDNode>(Op);
7231     EVT LoadVT = Op.getValueType();
7232 
7233     unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
7234     unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
7235     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
7236     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
7237     unsigned IdxEn = getIdxEn(Op.getOperand(3));
7238     SDValue Ops[] = {
7239       Op.getOperand(0),  // Chain
7240       Op.getOperand(2),  // rsrc
7241       Op.getOperand(3),  // vindex
7242       Op.getOperand(4),  // voffset
7243       Op.getOperand(5),  // soffset
7244       Op.getOperand(6),  // offset
7245       DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
7246       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
7247       DAG.getTargetConstant(IdxEn, DL, MVT::i1) // idxen
7248     };
7249 
7250     if (LoadVT.getScalarType() == MVT::f16)
7251       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
7252                                  M, DAG, Ops);
7253     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
7254                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
7255                                DAG);
7256   }
7257   case Intrinsic::amdgcn_raw_tbuffer_load: {
7258     MemSDNode *M = cast<MemSDNode>(Op);
7259     EVT LoadVT = Op.getValueType();
7260     auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
7261 
7262     SDValue Ops[] = {
7263       Op.getOperand(0),  // Chain
7264       Op.getOperand(2),  // rsrc
7265       DAG.getConstant(0, DL, MVT::i32), // vindex
7266       Offsets.first,     // voffset
7267       Op.getOperand(4),  // soffset
7268       Offsets.second,    // offset
7269       Op.getOperand(5),  // format
7270       Op.getOperand(6),  // cachepolicy, swizzled buffer
7271       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
7272     };
7273 
7274     if (LoadVT.getScalarType() == MVT::f16)
7275       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
7276                                  M, DAG, Ops);
7277     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
7278                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
7279                                DAG);
7280   }
7281   case Intrinsic::amdgcn_struct_tbuffer_load: {
7282     MemSDNode *M = cast<MemSDNode>(Op);
7283     EVT LoadVT = Op.getValueType();
7284     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
7285 
7286     SDValue Ops[] = {
7287       Op.getOperand(0),  // Chain
7288       Op.getOperand(2),  // rsrc
7289       Op.getOperand(3),  // vindex
7290       Offsets.first,     // voffset
7291       Op.getOperand(5),  // soffset
7292       Offsets.second,    // offset
7293       Op.getOperand(6),  // format
7294       Op.getOperand(7),  // cachepolicy, swizzled buffer
7295       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
7296     };
7297 
7298     if (LoadVT.getScalarType() == MVT::f16)
7299       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
7300                                  M, DAG, Ops);
7301     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
7302                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
7303                                DAG);
7304   }
7305   case Intrinsic::amdgcn_buffer_atomic_swap:
7306   case Intrinsic::amdgcn_buffer_atomic_add:
7307   case Intrinsic::amdgcn_buffer_atomic_sub:
7308   case Intrinsic::amdgcn_buffer_atomic_csub:
7309   case Intrinsic::amdgcn_buffer_atomic_smin:
7310   case Intrinsic::amdgcn_buffer_atomic_umin:
7311   case Intrinsic::amdgcn_buffer_atomic_smax:
7312   case Intrinsic::amdgcn_buffer_atomic_umax:
7313   case Intrinsic::amdgcn_buffer_atomic_and:
7314   case Intrinsic::amdgcn_buffer_atomic_or:
7315   case Intrinsic::amdgcn_buffer_atomic_xor:
7316   case Intrinsic::amdgcn_buffer_atomic_fadd: {
7317     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
7318     unsigned IdxEn = getIdxEn(Op.getOperand(4));
7319     SDValue Ops[] = {
7320       Op.getOperand(0), // Chain
7321       Op.getOperand(2), // vdata
7322       Op.getOperand(3), // rsrc
7323       Op.getOperand(4), // vindex
7324       SDValue(),        // voffset -- will be set by setBufferOffsets
7325       SDValue(),        // soffset -- will be set by setBufferOffsets
7326       SDValue(),        // offset -- will be set by setBufferOffsets
7327       DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy
7328       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
7329     };
7330     setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
7331 
7332     EVT VT = Op.getValueType();
7333 
7334     auto *M = cast<MemSDNode>(Op);
7335     updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]);
7336     unsigned Opcode = 0;
7337 
7338     switch (IntrID) {
7339     case Intrinsic::amdgcn_buffer_atomic_swap:
7340       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
7341       break;
7342     case Intrinsic::amdgcn_buffer_atomic_add:
7343       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
7344       break;
7345     case Intrinsic::amdgcn_buffer_atomic_sub:
7346       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
7347       break;
7348     case Intrinsic::amdgcn_buffer_atomic_csub:
7349       Opcode = AMDGPUISD::BUFFER_ATOMIC_CSUB;
7350       break;
7351     case Intrinsic::amdgcn_buffer_atomic_smin:
7352       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
7353       break;
7354     case Intrinsic::amdgcn_buffer_atomic_umin:
7355       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
7356       break;
7357     case Intrinsic::amdgcn_buffer_atomic_smax:
7358       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
7359       break;
7360     case Intrinsic::amdgcn_buffer_atomic_umax:
7361       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
7362       break;
7363     case Intrinsic::amdgcn_buffer_atomic_and:
7364       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
7365       break;
7366     case Intrinsic::amdgcn_buffer_atomic_or:
7367       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
7368       break;
7369     case Intrinsic::amdgcn_buffer_atomic_xor:
7370       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
7371       break;
7372     case Intrinsic::amdgcn_buffer_atomic_fadd:
7373       if (!Op.getValue(0).use_empty() && !Subtarget->hasGFX90AInsts()) {
7374         DiagnosticInfoUnsupported
7375           NoFpRet(DAG.getMachineFunction().getFunction(),
7376                   "return versions of fp atomics not supported",
7377                   DL.getDebugLoc(), DS_Error);
7378         DAG.getContext()->diagnose(NoFpRet);
7379         return SDValue();
7380       }
7381       Opcode = AMDGPUISD::BUFFER_ATOMIC_FADD;
7382       break;
7383     default:
7384       llvm_unreachable("unhandled atomic opcode");
7385     }
7386 
7387     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
7388                                    M->getMemOperand());
7389   }
7390   case Intrinsic::amdgcn_raw_buffer_atomic_fadd:
7391     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FADD);
7392   case Intrinsic::amdgcn_struct_buffer_atomic_fadd:
7393     return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FADD);
7394   case Intrinsic::amdgcn_raw_buffer_atomic_fmin:
7395     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMIN);
7396   case Intrinsic::amdgcn_struct_buffer_atomic_fmin:
7397     return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMIN);
7398   case Intrinsic::amdgcn_raw_buffer_atomic_fmax:
7399     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMAX);
7400   case Intrinsic::amdgcn_struct_buffer_atomic_fmax:
7401     return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMAX);
7402   case Intrinsic::amdgcn_raw_buffer_atomic_swap:
7403     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SWAP);
7404   case Intrinsic::amdgcn_raw_buffer_atomic_add:
7405     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_ADD);
7406   case Intrinsic::amdgcn_raw_buffer_atomic_sub:
7407     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SUB);
7408   case Intrinsic::amdgcn_raw_buffer_atomic_smin:
7409     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SMIN);
7410   case Intrinsic::amdgcn_raw_buffer_atomic_umin:
7411     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_UMIN);
7412   case Intrinsic::amdgcn_raw_buffer_atomic_smax:
7413     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SMAX);
7414   case Intrinsic::amdgcn_raw_buffer_atomic_umax:
7415     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_UMAX);
7416   case Intrinsic::amdgcn_raw_buffer_atomic_and:
7417     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_AND);
7418   case Intrinsic::amdgcn_raw_buffer_atomic_or:
7419     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_OR);
7420   case Intrinsic::amdgcn_raw_buffer_atomic_xor:
7421     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_XOR);
7422   case Intrinsic::amdgcn_raw_buffer_atomic_inc:
7423     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_INC);
7424   case Intrinsic::amdgcn_raw_buffer_atomic_dec:
7425     return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC);
7426   case Intrinsic::amdgcn_struct_buffer_atomic_swap:
7427     return lowerStructBufferAtomicIntrin(Op, DAG,
7428                                          AMDGPUISD::BUFFER_ATOMIC_SWAP);
7429   case Intrinsic::amdgcn_struct_buffer_atomic_add:
7430     return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_ADD);
7431   case Intrinsic::amdgcn_struct_buffer_atomic_sub:
7432     return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SUB);
7433   case Intrinsic::amdgcn_struct_buffer_atomic_smin:
7434     return lowerStructBufferAtomicIntrin(Op, DAG,
7435                                          AMDGPUISD::BUFFER_ATOMIC_SMIN);
7436   case Intrinsic::amdgcn_struct_buffer_atomic_umin:
7437     return lowerStructBufferAtomicIntrin(Op, DAG,
7438                                          AMDGPUISD::BUFFER_ATOMIC_UMIN);
7439   case Intrinsic::amdgcn_struct_buffer_atomic_smax:
7440     return lowerStructBufferAtomicIntrin(Op, DAG,
7441                                          AMDGPUISD::BUFFER_ATOMIC_SMAX);
7442   case Intrinsic::amdgcn_struct_buffer_atomic_umax:
7443     return lowerStructBufferAtomicIntrin(Op, DAG,
7444                                          AMDGPUISD::BUFFER_ATOMIC_UMAX);
7445   case Intrinsic::amdgcn_struct_buffer_atomic_and:
7446     return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_AND);
7447   case Intrinsic::amdgcn_struct_buffer_atomic_or:
7448     return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_OR);
7449   case Intrinsic::amdgcn_struct_buffer_atomic_xor:
7450     return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_XOR);
7451   case Intrinsic::amdgcn_struct_buffer_atomic_inc:
7452     return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_INC);
7453   case Intrinsic::amdgcn_struct_buffer_atomic_dec:
7454     return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC);
7455 
7456   case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
7457     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
7458     unsigned IdxEn = getIdxEn(Op.getOperand(5));
7459     SDValue Ops[] = {
7460       Op.getOperand(0), // Chain
7461       Op.getOperand(2), // src
7462       Op.getOperand(3), // cmp
7463       Op.getOperand(4), // rsrc
7464       Op.getOperand(5), // vindex
7465       SDValue(),        // voffset -- will be set by setBufferOffsets
7466       SDValue(),        // soffset -- will be set by setBufferOffsets
7467       SDValue(),        // offset -- will be set by setBufferOffsets
7468       DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy
7469       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
7470     };
7471     setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]);
7472 
7473     EVT VT = Op.getValueType();
7474     auto *M = cast<MemSDNode>(Op);
7475     updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7], Ops[4]);
7476 
7477     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
7478                                    Op->getVTList(), Ops, VT, M->getMemOperand());
7479   }
7480   case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: {
7481     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
7482     SDValue Ops[] = {
7483       Op.getOperand(0), // Chain
7484       Op.getOperand(2), // src
7485       Op.getOperand(3), // cmp
7486       Op.getOperand(4), // rsrc
7487       DAG.getConstant(0, DL, MVT::i32), // vindex
7488       Offsets.first,    // voffset
7489       Op.getOperand(6), // soffset
7490       Offsets.second,   // offset
7491       Op.getOperand(7), // cachepolicy
7492       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
7493     };
7494     EVT VT = Op.getValueType();
7495     auto *M = cast<MemSDNode>(Op);
7496     updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7]);
7497 
7498     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
7499                                    Op->getVTList(), Ops, VT, M->getMemOperand());
7500   }
7501   case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: {
7502     auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG);
7503     SDValue Ops[] = {
7504       Op.getOperand(0), // Chain
7505       Op.getOperand(2), // src
7506       Op.getOperand(3), // cmp
7507       Op.getOperand(4), // rsrc
7508       Op.getOperand(5), // vindex
7509       Offsets.first,    // voffset
7510       Op.getOperand(7), // soffset
7511       Offsets.second,   // offset
7512       Op.getOperand(8), // cachepolicy
7513       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
7514     };
7515     EVT VT = Op.getValueType();
7516     auto *M = cast<MemSDNode>(Op);
7517     updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7], Ops[4]);
7518 
7519     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
7520                                    Op->getVTList(), Ops, VT, M->getMemOperand());
7521   }
7522   case Intrinsic::amdgcn_image_bvh_intersect_ray: {
7523     MemSDNode *M = cast<MemSDNode>(Op);
7524     SDValue NodePtr = M->getOperand(2);
7525     SDValue RayExtent = M->getOperand(3);
7526     SDValue RayOrigin = M->getOperand(4);
7527     SDValue RayDir = M->getOperand(5);
7528     SDValue RayInvDir = M->getOperand(6);
7529     SDValue TDescr = M->getOperand(7);
7530 
7531     assert(NodePtr.getValueType() == MVT::i32 ||
7532            NodePtr.getValueType() == MVT::i64);
7533     assert(RayDir.getValueType() == MVT::v3f16 ||
7534            RayDir.getValueType() == MVT::v3f32);
7535 
7536     if (!Subtarget->hasGFX10_AEncoding()) {
7537       emitRemovedIntrinsicError(DAG, DL, Op.getValueType());
7538       return SDValue();
7539     }
7540 
7541     const bool IsA16 = RayDir.getValueType().getVectorElementType() == MVT::f16;
7542     const bool Is64 = NodePtr.getValueType() == MVT::i64;
7543     const unsigned NumVDataDwords = 4;
7544     const unsigned NumVAddrDwords = IsA16 ? (Is64 ? 9 : 8) : (Is64 ? 12 : 11);
7545     const bool UseNSA = Subtarget->hasNSAEncoding() &&
7546                         NumVAddrDwords <= Subtarget->getNSAMaxSize();
7547     const unsigned BaseOpcodes[2][2] = {
7548         {AMDGPU::IMAGE_BVH_INTERSECT_RAY, AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16},
7549         {AMDGPU::IMAGE_BVH64_INTERSECT_RAY,
7550          AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16}};
7551     int Opcode;
7552     if (UseNSA) {
7553       Opcode = AMDGPU::getMIMGOpcode(BaseOpcodes[Is64][IsA16],
7554                                      AMDGPU::MIMGEncGfx10NSA, NumVDataDwords,
7555                                      NumVAddrDwords);
7556     } else {
7557       Opcode = AMDGPU::getMIMGOpcode(
7558           BaseOpcodes[Is64][IsA16], AMDGPU::MIMGEncGfx10Default, NumVDataDwords,
7559           PowerOf2Ceil(NumVAddrDwords));
7560     }
7561     assert(Opcode != -1);
7562 
7563     SmallVector<SDValue, 16> Ops;
7564 
7565     auto packLanes = [&DAG, &Ops, &DL] (SDValue Op, bool IsAligned) {
7566       SmallVector<SDValue, 3> Lanes;
7567       DAG.ExtractVectorElements(Op, Lanes, 0, 3);
7568       if (Lanes[0].getValueSizeInBits() == 32) {
7569         for (unsigned I = 0; I < 3; ++I)
7570           Ops.push_back(DAG.getBitcast(MVT::i32, Lanes[I]));
7571       } else {
7572         if (IsAligned) {
7573           Ops.push_back(
7574             DAG.getBitcast(MVT::i32,
7575                            DAG.getBuildVector(MVT::v2f16, DL,
7576                                               { Lanes[0], Lanes[1] })));
7577           Ops.push_back(Lanes[2]);
7578         } else {
7579           SDValue Elt0 = Ops.pop_back_val();
7580           Ops.push_back(
7581             DAG.getBitcast(MVT::i32,
7582                            DAG.getBuildVector(MVT::v2f16, DL,
7583                                               { Elt0, Lanes[0] })));
7584           Ops.push_back(
7585             DAG.getBitcast(MVT::i32,
7586                            DAG.getBuildVector(MVT::v2f16, DL,
7587                                               { Lanes[1], Lanes[2] })));
7588         }
7589       }
7590     };
7591 
7592     if (Is64)
7593       DAG.ExtractVectorElements(DAG.getBitcast(MVT::v2i32, NodePtr), Ops, 0, 2);
7594     else
7595       Ops.push_back(NodePtr);
7596 
7597     Ops.push_back(DAG.getBitcast(MVT::i32, RayExtent));
7598     packLanes(RayOrigin, true);
7599     packLanes(RayDir, true);
7600     packLanes(RayInvDir, false);
7601 
7602     if (!UseNSA) {
7603       // Build a single vector containing all the operands so far prepared.
7604       if (NumVAddrDwords > 8) {
7605         SDValue Undef = DAG.getUNDEF(MVT::i32);
7606         Ops.append(16 - Ops.size(), Undef);
7607       }
7608       assert(Ops.size() == 8 || Ops.size() == 16);
7609       SDValue MergedOps = DAG.getBuildVector(
7610           Ops.size() == 16 ? MVT::v16i32 : MVT::v8i32, DL, Ops);
7611       Ops.clear();
7612       Ops.push_back(MergedOps);
7613     }
7614 
7615     Ops.push_back(TDescr);
7616     if (IsA16)
7617       Ops.push_back(DAG.getTargetConstant(1, DL, MVT::i1));
7618     Ops.push_back(M->getChain());
7619 
7620     auto *NewNode = DAG.getMachineNode(Opcode, DL, M->getVTList(), Ops);
7621     MachineMemOperand *MemRef = M->getMemOperand();
7622     DAG.setNodeMemRefs(NewNode, {MemRef});
7623     return SDValue(NewNode, 0);
7624   }
7625   case Intrinsic::amdgcn_global_atomic_fadd:
7626     if (!Op.getValue(0).use_empty() && !Subtarget->hasGFX90AInsts()) {
7627       DiagnosticInfoUnsupported
7628         NoFpRet(DAG.getMachineFunction().getFunction(),
7629                 "return versions of fp atomics not supported",
7630                 DL.getDebugLoc(), DS_Error);
7631       DAG.getContext()->diagnose(NoFpRet);
7632       return SDValue();
7633     }
7634     LLVM_FALLTHROUGH;
7635   case Intrinsic::amdgcn_global_atomic_fmin:
7636   case Intrinsic::amdgcn_global_atomic_fmax:
7637   case Intrinsic::amdgcn_flat_atomic_fadd:
7638   case Intrinsic::amdgcn_flat_atomic_fmin:
7639   case Intrinsic::amdgcn_flat_atomic_fmax: {
7640     MemSDNode *M = cast<MemSDNode>(Op);
7641     SDValue Ops[] = {
7642       M->getOperand(0), // Chain
7643       M->getOperand(2), // Ptr
7644       M->getOperand(3)  // Value
7645     };
7646     unsigned Opcode = 0;
7647     switch (IntrID) {
7648     case Intrinsic::amdgcn_global_atomic_fadd:
7649     case Intrinsic::amdgcn_flat_atomic_fadd: {
7650       EVT VT = Op.getOperand(3).getValueType();
7651       return DAG.getAtomic(ISD::ATOMIC_LOAD_FADD, DL, VT,
7652                            DAG.getVTList(VT, MVT::Other), Ops,
7653                            M->getMemOperand());
7654     }
7655     case Intrinsic::amdgcn_global_atomic_fmin:
7656     case Intrinsic::amdgcn_flat_atomic_fmin: {
7657       Opcode = AMDGPUISD::ATOMIC_LOAD_FMIN;
7658       break;
7659     }
7660     case Intrinsic::amdgcn_global_atomic_fmax:
7661     case Intrinsic::amdgcn_flat_atomic_fmax: {
7662       Opcode = AMDGPUISD::ATOMIC_LOAD_FMAX;
7663       break;
7664     }
7665     default:
7666       llvm_unreachable("unhandled atomic opcode");
7667     }
7668     return DAG.getMemIntrinsicNode(Opcode, SDLoc(Op),
7669                                    M->getVTList(), Ops, M->getMemoryVT(),
7670                                    M->getMemOperand());
7671   }
7672   default:
7673 
7674     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
7675             AMDGPU::getImageDimIntrinsicInfo(IntrID))
7676       return lowerImage(Op, ImageDimIntr, DAG, true);
7677 
7678     return SDValue();
7679   }
7680 }
7681 
7682 // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to
7683 // dwordx4 if on SI.
7684 SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL,
7685                                               SDVTList VTList,
7686                                               ArrayRef<SDValue> Ops, EVT MemVT,
7687                                               MachineMemOperand *MMO,
7688                                               SelectionDAG &DAG) const {
7689   EVT VT = VTList.VTs[0];
7690   EVT WidenedVT = VT;
7691   EVT WidenedMemVT = MemVT;
7692   if (!Subtarget->hasDwordx3LoadStores() &&
7693       (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) {
7694     WidenedVT = EVT::getVectorVT(*DAG.getContext(),
7695                                  WidenedVT.getVectorElementType(), 4);
7696     WidenedMemVT = EVT::getVectorVT(*DAG.getContext(),
7697                                     WidenedMemVT.getVectorElementType(), 4);
7698     MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16);
7699   }
7700 
7701   assert(VTList.NumVTs == 2);
7702   SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]);
7703 
7704   auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops,
7705                                        WidenedMemVT, MMO);
7706   if (WidenedVT != VT) {
7707     auto Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp,
7708                                DAG.getVectorIdxConstant(0, DL));
7709     NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL);
7710   }
7711   return NewOp;
7712 }
7713 
7714 SDValue SITargetLowering::handleD16VData(SDValue VData, SelectionDAG &DAG,
7715                                          bool ImageStore) const {
7716   EVT StoreVT = VData.getValueType();
7717 
7718   // No change for f16 and legal vector D16 types.
7719   if (!StoreVT.isVector())
7720     return VData;
7721 
7722   SDLoc DL(VData);
7723   unsigned NumElements = StoreVT.getVectorNumElements();
7724 
7725   if (Subtarget->hasUnpackedD16VMem()) {
7726     // We need to unpack the packed data to store.
7727     EVT IntStoreVT = StoreVT.changeTypeToInteger();
7728     SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
7729 
7730     EVT EquivStoreVT =
7731         EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElements);
7732     SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
7733     return DAG.UnrollVectorOp(ZExt.getNode());
7734   }
7735 
7736   // The sq block of gfx8.1 does not estimate register use correctly for d16
7737   // image store instructions. The data operand is computed as if it were not a
7738   // d16 image instruction.
7739   if (ImageStore && Subtarget->hasImageStoreD16Bug()) {
7740     // Bitcast to i16
7741     EVT IntStoreVT = StoreVT.changeTypeToInteger();
7742     SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
7743 
7744     // Decompose into scalars
7745     SmallVector<SDValue, 4> Elts;
7746     DAG.ExtractVectorElements(IntVData, Elts);
7747 
7748     // Group pairs of i16 into v2i16 and bitcast to i32
7749     SmallVector<SDValue, 4> PackedElts;
7750     for (unsigned I = 0; I < Elts.size() / 2; I += 1) {
7751       SDValue Pair =
7752           DAG.getBuildVector(MVT::v2i16, DL, {Elts[I * 2], Elts[I * 2 + 1]});
7753       SDValue IntPair = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Pair);
7754       PackedElts.push_back(IntPair);
7755     }
7756     if ((NumElements % 2) == 1) {
7757       // Handle v3i16
7758       unsigned I = Elts.size() / 2;
7759       SDValue Pair = DAG.getBuildVector(MVT::v2i16, DL,
7760                                         {Elts[I * 2], DAG.getUNDEF(MVT::i16)});
7761       SDValue IntPair = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Pair);
7762       PackedElts.push_back(IntPair);
7763     }
7764 
7765     // Pad using UNDEF
7766     PackedElts.resize(Elts.size(), DAG.getUNDEF(MVT::i32));
7767 
7768     // Build final vector
7769     EVT VecVT =
7770         EVT::getVectorVT(*DAG.getContext(), MVT::i32, PackedElts.size());
7771     return DAG.getBuildVector(VecVT, DL, PackedElts);
7772   }
7773 
7774   if (NumElements == 3) {
7775     EVT IntStoreVT =
7776         EVT::getIntegerVT(*DAG.getContext(), StoreVT.getStoreSizeInBits());
7777     SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
7778 
7779     EVT WidenedStoreVT = EVT::getVectorVT(
7780         *DAG.getContext(), StoreVT.getVectorElementType(), NumElements + 1);
7781     EVT WidenedIntVT = EVT::getIntegerVT(*DAG.getContext(),
7782                                          WidenedStoreVT.getStoreSizeInBits());
7783     SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenedIntVT, IntVData);
7784     return DAG.getNode(ISD::BITCAST, DL, WidenedStoreVT, ZExt);
7785   }
7786 
7787   assert(isTypeLegal(StoreVT));
7788   return VData;
7789 }
7790 
7791 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
7792                                               SelectionDAG &DAG) const {
7793   SDLoc DL(Op);
7794   SDValue Chain = Op.getOperand(0);
7795   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
7796   MachineFunction &MF = DAG.getMachineFunction();
7797 
7798   switch (IntrinsicID) {
7799   case Intrinsic::amdgcn_exp_compr: {
7800     SDValue Src0 = Op.getOperand(4);
7801     SDValue Src1 = Op.getOperand(5);
7802     // Hack around illegal type on SI by directly selecting it.
7803     if (isTypeLegal(Src0.getValueType()))
7804       return SDValue();
7805 
7806     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
7807     SDValue Undef = DAG.getUNDEF(MVT::f32);
7808     const SDValue Ops[] = {
7809       Op.getOperand(2), // tgt
7810       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), // src0
7811       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), // src1
7812       Undef, // src2
7813       Undef, // src3
7814       Op.getOperand(7), // vm
7815       DAG.getTargetConstant(1, DL, MVT::i1), // compr
7816       Op.getOperand(3), // en
7817       Op.getOperand(0) // Chain
7818     };
7819 
7820     unsigned Opc = Done->isZero() ? AMDGPU::EXP : AMDGPU::EXP_DONE;
7821     return SDValue(DAG.getMachineNode(Opc, DL, Op->getVTList(), Ops), 0);
7822   }
7823   case Intrinsic::amdgcn_s_barrier: {
7824     if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
7825       const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
7826       unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
7827       if (WGSize <= ST.getWavefrontSize())
7828         return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
7829                                           Op.getOperand(0)), 0);
7830     }
7831     return SDValue();
7832   };
7833   case Intrinsic::amdgcn_tbuffer_store: {
7834     SDValue VData = Op.getOperand(2);
7835     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
7836     if (IsD16)
7837       VData = handleD16VData(VData, DAG);
7838     unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
7839     unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
7840     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
7841     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue();
7842     unsigned IdxEn = getIdxEn(Op.getOperand(4));
7843     SDValue Ops[] = {
7844       Chain,
7845       VData,             // vdata
7846       Op.getOperand(3),  // rsrc
7847       Op.getOperand(4),  // vindex
7848       Op.getOperand(5),  // voffset
7849       Op.getOperand(6),  // soffset
7850       Op.getOperand(7),  // offset
7851       DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
7852       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
7853       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
7854     };
7855     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
7856                            AMDGPUISD::TBUFFER_STORE_FORMAT;
7857     MemSDNode *M = cast<MemSDNode>(Op);
7858     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
7859                                    M->getMemoryVT(), M->getMemOperand());
7860   }
7861 
7862   case Intrinsic::amdgcn_struct_tbuffer_store: {
7863     SDValue VData = Op.getOperand(2);
7864     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
7865     if (IsD16)
7866       VData = handleD16VData(VData, DAG);
7867     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
7868     SDValue Ops[] = {
7869       Chain,
7870       VData,             // vdata
7871       Op.getOperand(3),  // rsrc
7872       Op.getOperand(4),  // vindex
7873       Offsets.first,     // voffset
7874       Op.getOperand(6),  // soffset
7875       Offsets.second,    // offset
7876       Op.getOperand(7),  // format
7877       Op.getOperand(8),  // cachepolicy, swizzled buffer
7878       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
7879     };
7880     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
7881                            AMDGPUISD::TBUFFER_STORE_FORMAT;
7882     MemSDNode *M = cast<MemSDNode>(Op);
7883     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
7884                                    M->getMemoryVT(), M->getMemOperand());
7885   }
7886 
7887   case Intrinsic::amdgcn_raw_tbuffer_store: {
7888     SDValue VData = Op.getOperand(2);
7889     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
7890     if (IsD16)
7891       VData = handleD16VData(VData, DAG);
7892     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
7893     SDValue Ops[] = {
7894       Chain,
7895       VData,             // vdata
7896       Op.getOperand(3),  // rsrc
7897       DAG.getConstant(0, DL, MVT::i32), // vindex
7898       Offsets.first,     // voffset
7899       Op.getOperand(5),  // soffset
7900       Offsets.second,    // offset
7901       Op.getOperand(6),  // format
7902       Op.getOperand(7),  // cachepolicy, swizzled buffer
7903       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
7904     };
7905     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
7906                            AMDGPUISD::TBUFFER_STORE_FORMAT;
7907     MemSDNode *M = cast<MemSDNode>(Op);
7908     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
7909                                    M->getMemoryVT(), M->getMemOperand());
7910   }
7911 
7912   case Intrinsic::amdgcn_buffer_store:
7913   case Intrinsic::amdgcn_buffer_store_format: {
7914     SDValue VData = Op.getOperand(2);
7915     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
7916     if (IsD16)
7917       VData = handleD16VData(VData, DAG);
7918     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
7919     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
7920     unsigned IdxEn = getIdxEn(Op.getOperand(4));
7921     SDValue Ops[] = {
7922       Chain,
7923       VData,
7924       Op.getOperand(3), // rsrc
7925       Op.getOperand(4), // vindex
7926       SDValue(), // voffset -- will be set by setBufferOffsets
7927       SDValue(), // soffset -- will be set by setBufferOffsets
7928       SDValue(), // offset -- will be set by setBufferOffsets
7929       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
7930       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
7931     };
7932     setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
7933 
7934     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
7935                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
7936     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
7937     MemSDNode *M = cast<MemSDNode>(Op);
7938     updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]);
7939 
7940     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
7941     EVT VDataType = VData.getValueType().getScalarType();
7942     if (VDataType == MVT::i8 || VDataType == MVT::i16)
7943       return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
7944 
7945     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
7946                                    M->getMemoryVT(), M->getMemOperand());
7947   }
7948 
7949   case Intrinsic::amdgcn_raw_buffer_store:
7950   case Intrinsic::amdgcn_raw_buffer_store_format: {
7951     const bool IsFormat =
7952         IntrinsicID == Intrinsic::amdgcn_raw_buffer_store_format;
7953 
7954     SDValue VData = Op.getOperand(2);
7955     EVT VDataVT = VData.getValueType();
7956     EVT EltType = VDataVT.getScalarType();
7957     bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
7958     if (IsD16) {
7959       VData = handleD16VData(VData, DAG);
7960       VDataVT = VData.getValueType();
7961     }
7962 
7963     if (!isTypeLegal(VDataVT)) {
7964       VData =
7965           DAG.getNode(ISD::BITCAST, DL,
7966                       getEquivalentMemType(*DAG.getContext(), VDataVT), VData);
7967     }
7968 
7969     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
7970     SDValue Ops[] = {
7971       Chain,
7972       VData,
7973       Op.getOperand(3), // rsrc
7974       DAG.getConstant(0, DL, MVT::i32), // vindex
7975       Offsets.first,    // voffset
7976       Op.getOperand(5), // soffset
7977       Offsets.second,   // offset
7978       Op.getOperand(6), // cachepolicy, swizzled buffer
7979       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
7980     };
7981     unsigned Opc =
7982         IsFormat ? AMDGPUISD::BUFFER_STORE_FORMAT : AMDGPUISD::BUFFER_STORE;
7983     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
7984     MemSDNode *M = cast<MemSDNode>(Op);
7985     updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6]);
7986 
7987     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
7988     if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32)
7989       return handleByteShortBufferStores(DAG, VDataVT, DL, Ops, M);
7990 
7991     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
7992                                    M->getMemoryVT(), M->getMemOperand());
7993   }
7994 
7995   case Intrinsic::amdgcn_struct_buffer_store:
7996   case Intrinsic::amdgcn_struct_buffer_store_format: {
7997     const bool IsFormat =
7998         IntrinsicID == Intrinsic::amdgcn_struct_buffer_store_format;
7999 
8000     SDValue VData = Op.getOperand(2);
8001     EVT VDataVT = VData.getValueType();
8002     EVT EltType = VDataVT.getScalarType();
8003     bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
8004 
8005     if (IsD16) {
8006       VData = handleD16VData(VData, DAG);
8007       VDataVT = VData.getValueType();
8008     }
8009 
8010     if (!isTypeLegal(VDataVT)) {
8011       VData =
8012           DAG.getNode(ISD::BITCAST, DL,
8013                       getEquivalentMemType(*DAG.getContext(), VDataVT), VData);
8014     }
8015 
8016     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
8017     SDValue Ops[] = {
8018       Chain,
8019       VData,
8020       Op.getOperand(3), // rsrc
8021       Op.getOperand(4), // vindex
8022       Offsets.first,    // voffset
8023       Op.getOperand(6), // soffset
8024       Offsets.second,   // offset
8025       Op.getOperand(7), // cachepolicy, swizzled buffer
8026       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
8027     };
8028     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ?
8029                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
8030     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
8031     MemSDNode *M = cast<MemSDNode>(Op);
8032     updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]);
8033 
8034     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
8035     EVT VDataType = VData.getValueType().getScalarType();
8036     if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32)
8037       return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
8038 
8039     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
8040                                    M->getMemoryVT(), M->getMemOperand());
8041   }
8042   case Intrinsic::amdgcn_raw_buffer_load_lds:
8043   case Intrinsic::amdgcn_struct_buffer_load_lds: {
8044     unsigned Opc;
8045     bool HasVIndex = IntrinsicID == Intrinsic::amdgcn_struct_buffer_load_lds;
8046     unsigned OpOffset = HasVIndex ? 1 : 0;
8047     SDValue VOffset = Op.getOperand(5 + OpOffset);
8048     auto CVOffset = dyn_cast<ConstantSDNode>(VOffset);
8049     bool HasVOffset = !CVOffset || !CVOffset->isZero();
8050     unsigned Size = Op->getConstantOperandVal(4);
8051 
8052     switch (Size) {
8053     default:
8054       return SDValue();
8055     case 1:
8056       Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN
8057                                    : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN
8058                       : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN
8059                                    : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET;
8060       break;
8061     case 2:
8062       Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN
8063                                    : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN
8064                       : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN
8065                                    : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET;
8066       break;
8067     case 4:
8068       Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN
8069                                    : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN
8070                       : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
8071                                    : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
8072       break;
8073     }
8074 
8075     SDValue M0Val = copyToM0(DAG, Chain, DL, Op.getOperand(3));
8076 
8077     SmallVector<SDValue, 8> Ops;
8078 
8079     if (HasVIndex && HasVOffset)
8080       Ops.push_back(DAG.getBuildVector(MVT::v2i32, DL,
8081                                        { Op.getOperand(5), // VIndex
8082                                          VOffset }));
8083     else if (HasVIndex)
8084       Ops.push_back(Op.getOperand(5));
8085     else if (HasVOffset)
8086       Ops.push_back(VOffset);
8087 
8088     Ops.push_back(Op.getOperand(2));           // rsrc
8089     Ops.push_back(Op.getOperand(6 + OpOffset)); // soffset
8090     Ops.push_back(Op.getOperand(7 + OpOffset)); // imm offset
8091     unsigned Aux = Op.getConstantOperandVal(8 + OpOffset);
8092     Ops.push_back(
8093       DAG.getTargetConstant(Aux & AMDGPU::CPol::ALL, DL, MVT::i8)); // cpol
8094     Ops.push_back(
8095       DAG.getTargetConstant((Aux >> 3) & 1, DL, MVT::i8));          // swz
8096     Ops.push_back(M0Val.getValue(0)); // Chain
8097     Ops.push_back(M0Val.getValue(1)); // Glue
8098 
8099     auto *M = cast<MemSDNode>(Op);
8100     MachineMemOperand *LoadMMO = M->getMemOperand();
8101     MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
8102     LoadPtrI.Offset = Op->getConstantOperandVal(7 + OpOffset);
8103     MachinePointerInfo StorePtrI = LoadPtrI;
8104     StorePtrI.V = nullptr;
8105     StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
8106 
8107     auto F = LoadMMO->getFlags() &
8108              ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
8109     LoadMMO = MF.getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
8110                                       Size, LoadMMO->getBaseAlign());
8111 
8112     MachineMemOperand *StoreMMO =
8113         MF.getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
8114                                 sizeof(int32_t), LoadMMO->getBaseAlign());
8115 
8116     auto Load = DAG.getMachineNode(Opc, DL, M->getVTList(), Ops);
8117     DAG.setNodeMemRefs(Load, {LoadMMO, StoreMMO});
8118 
8119     return SDValue(Load, 0);
8120   }
8121   case Intrinsic::amdgcn_global_load_lds: {
8122     unsigned Opc;
8123     unsigned Size = Op->getConstantOperandVal(4);
8124     switch (Size) {
8125     default:
8126       return SDValue();
8127     case 1:
8128       Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
8129       break;
8130     case 2:
8131       Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
8132       break;
8133     case 4:
8134       Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
8135       break;
8136     }
8137 
8138     auto *M = cast<MemSDNode>(Op);
8139     SDValue M0Val = copyToM0(DAG, Chain, DL, Op.getOperand(3));
8140 
8141     SmallVector<SDValue, 6> Ops;
8142 
8143     SDValue Addr = Op.getOperand(2); // Global ptr
8144     SDValue VOffset;
8145     // Try to split SAddr and VOffset. Global and LDS pointers share the same
8146     // immediate offset, so we cannot use a regular SelectGlobalSAddr().
8147     if (Addr->isDivergent() && Addr.getOpcode() == ISD::ADD) {
8148       SDValue LHS = Addr.getOperand(0);
8149       SDValue RHS = Addr.getOperand(1);
8150 
8151       if (LHS->isDivergent())
8152         std::swap(LHS, RHS);
8153 
8154       if (!LHS->isDivergent() && RHS.getOpcode() == ISD::ZERO_EXTEND &&
8155           RHS.getOperand(0).getValueType() == MVT::i32) {
8156         // add (i64 sgpr), (zero_extend (i32 vgpr))
8157         Addr = LHS;
8158         VOffset = RHS.getOperand(0);
8159       }
8160     }
8161 
8162     Ops.push_back(Addr);
8163     if (!Addr->isDivergent()) {
8164       Opc = AMDGPU::getGlobalSaddrOp(Opc);
8165       if (!VOffset)
8166         VOffset = SDValue(
8167             DAG.getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32,
8168                                DAG.getTargetConstant(0, DL, MVT::i32)), 0);
8169       Ops.push_back(VOffset);
8170     }
8171 
8172     Ops.push_back(Op.getOperand(5));  // Offset
8173     Ops.push_back(Op.getOperand(6));  // CPol
8174     Ops.push_back(M0Val.getValue(0)); // Chain
8175     Ops.push_back(M0Val.getValue(1)); // Glue
8176 
8177     MachineMemOperand *LoadMMO = M->getMemOperand();
8178     MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
8179     LoadPtrI.Offset = Op->getConstantOperandVal(5);
8180     MachinePointerInfo StorePtrI = LoadPtrI;
8181     LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS;
8182     StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
8183     auto F = LoadMMO->getFlags() &
8184              ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
8185     LoadMMO = MF.getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
8186                                       Size, LoadMMO->getBaseAlign());
8187     MachineMemOperand *StoreMMO =
8188         MF.getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
8189                                 sizeof(int32_t), Align(4));
8190 
8191     auto Load = DAG.getMachineNode(Opc, DL, Op->getVTList(), Ops);
8192     DAG.setNodeMemRefs(Load, {LoadMMO, StoreMMO});
8193 
8194     return SDValue(Load, 0);
8195   }
8196   case Intrinsic::amdgcn_end_cf:
8197     return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other,
8198                                       Op->getOperand(2), Chain), 0);
8199 
8200   default: {
8201     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
8202             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
8203       return lowerImage(Op, ImageDimIntr, DAG, true);
8204 
8205     return Op;
8206   }
8207   }
8208 }
8209 
8210 // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args:
8211 // offset (the offset that is included in bounds checking and swizzling, to be
8212 // split between the instruction's voffset and immoffset fields) and soffset
8213 // (the offset that is excluded from bounds checking and swizzling, to go in
8214 // the instruction's soffset field).  This function takes the first kind of
8215 // offset and figures out how to split it between voffset and immoffset.
8216 std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets(
8217     SDValue Offset, SelectionDAG &DAG) const {
8218   SDLoc DL(Offset);
8219   const unsigned MaxImm = 4095;
8220   SDValue N0 = Offset;
8221   ConstantSDNode *C1 = nullptr;
8222 
8223   if ((C1 = dyn_cast<ConstantSDNode>(N0)))
8224     N0 = SDValue();
8225   else if (DAG.isBaseWithConstantOffset(N0)) {
8226     C1 = cast<ConstantSDNode>(N0.getOperand(1));
8227     N0 = N0.getOperand(0);
8228   }
8229 
8230   if (C1) {
8231     unsigned ImmOffset = C1->getZExtValue();
8232     // If the immediate value is too big for the immoffset field, put the value
8233     // and -4096 into the immoffset field so that the value that is copied/added
8234     // for the voffset field is a multiple of 4096, and it stands more chance
8235     // of being CSEd with the copy/add for another similar load/store.
8236     // However, do not do that rounding down to a multiple of 4096 if that is a
8237     // negative number, as it appears to be illegal to have a negative offset
8238     // in the vgpr, even if adding the immediate offset makes it positive.
8239     unsigned Overflow = ImmOffset & ~MaxImm;
8240     ImmOffset -= Overflow;
8241     if ((int32_t)Overflow < 0) {
8242       Overflow += ImmOffset;
8243       ImmOffset = 0;
8244     }
8245     C1 = cast<ConstantSDNode>(DAG.getTargetConstant(ImmOffset, DL, MVT::i32));
8246     if (Overflow) {
8247       auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32);
8248       if (!N0)
8249         N0 = OverflowVal;
8250       else {
8251         SDValue Ops[] = { N0, OverflowVal };
8252         N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops);
8253       }
8254     }
8255   }
8256   if (!N0)
8257     N0 = DAG.getConstant(0, DL, MVT::i32);
8258   if (!C1)
8259     C1 = cast<ConstantSDNode>(DAG.getTargetConstant(0, DL, MVT::i32));
8260   return {N0, SDValue(C1, 0)};
8261 }
8262 
8263 // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the
8264 // three offsets (voffset, soffset and instoffset) into the SDValue[3] array
8265 // pointed to by Offsets.
8266 void SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
8267                                         SelectionDAG &DAG, SDValue *Offsets,
8268                                         Align Alignment) const {
8269   SDLoc DL(CombinedOffset);
8270   if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) {
8271     uint32_t Imm = C->getZExtValue();
8272     uint32_t SOffset, ImmOffset;
8273     if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget,
8274                                  Alignment)) {
8275       Offsets[0] = DAG.getConstant(0, DL, MVT::i32);
8276       Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
8277       Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32);
8278       return;
8279     }
8280   }
8281   if (DAG.isBaseWithConstantOffset(CombinedOffset)) {
8282     SDValue N0 = CombinedOffset.getOperand(0);
8283     SDValue N1 = CombinedOffset.getOperand(1);
8284     uint32_t SOffset, ImmOffset;
8285     int Offset = cast<ConstantSDNode>(N1)->getSExtValue();
8286     if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset,
8287                                                 Subtarget, Alignment)) {
8288       Offsets[0] = N0;
8289       Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
8290       Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32);
8291       return;
8292     }
8293   }
8294   Offsets[0] = CombinedOffset;
8295   Offsets[1] = DAG.getConstant(0, DL, MVT::i32);
8296   Offsets[2] = DAG.getTargetConstant(0, DL, MVT::i32);
8297 }
8298 
8299 // Handle 8 bit and 16 bit buffer loads
8300 SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG,
8301                                                      EVT LoadVT, SDLoc DL,
8302                                                      ArrayRef<SDValue> Ops,
8303                                                      MemSDNode *M) const {
8304   EVT IntVT = LoadVT.changeTypeToInteger();
8305   unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ?
8306          AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT;
8307 
8308   SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other);
8309   SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList,
8310                                                Ops, IntVT,
8311                                                M->getMemOperand());
8312   SDValue LoadVal = DAG.getNode(ISD::TRUNCATE, DL, IntVT, BufferLoad);
8313   LoadVal = DAG.getNode(ISD::BITCAST, DL, LoadVT, LoadVal);
8314 
8315   return DAG.getMergeValues({LoadVal, BufferLoad.getValue(1)}, DL);
8316 }
8317 
8318 // Handle 8 bit and 16 bit buffer stores
8319 SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG,
8320                                                       EVT VDataType, SDLoc DL,
8321                                                       SDValue Ops[],
8322                                                       MemSDNode *M) const {
8323   if (VDataType == MVT::f16)
8324     Ops[1] = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Ops[1]);
8325 
8326   SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]);
8327   Ops[1] = BufferStoreExt;
8328   unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE :
8329                                  AMDGPUISD::BUFFER_STORE_SHORT;
8330   ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9);
8331   return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType,
8332                                      M->getMemOperand());
8333 }
8334 
8335 static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
8336                                  ISD::LoadExtType ExtType, SDValue Op,
8337                                  const SDLoc &SL, EVT VT) {
8338   if (VT.bitsLT(Op.getValueType()))
8339     return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
8340 
8341   switch (ExtType) {
8342   case ISD::SEXTLOAD:
8343     return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
8344   case ISD::ZEXTLOAD:
8345     return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
8346   case ISD::EXTLOAD:
8347     return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
8348   case ISD::NON_EXTLOAD:
8349     return Op;
8350   }
8351 
8352   llvm_unreachable("invalid ext type");
8353 }
8354 
8355 SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
8356   SelectionDAG &DAG = DCI.DAG;
8357   if (Ld->getAlignment() < 4 || Ld->isDivergent())
8358     return SDValue();
8359 
8360   // FIXME: Constant loads should all be marked invariant.
8361   unsigned AS = Ld->getAddressSpace();
8362   if (AS != AMDGPUAS::CONSTANT_ADDRESS &&
8363       AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
8364       (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
8365     return SDValue();
8366 
8367   // Don't do this early, since it may interfere with adjacent load merging for
8368   // illegal types. We can avoid losing alignment information for exotic types
8369   // pre-legalize.
8370   EVT MemVT = Ld->getMemoryVT();
8371   if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
8372       MemVT.getSizeInBits() >= 32)
8373     return SDValue();
8374 
8375   SDLoc SL(Ld);
8376 
8377   assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
8378          "unexpected vector extload");
8379 
8380   // TODO: Drop only high part of range.
8381   SDValue Ptr = Ld->getBasePtr();
8382   SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
8383                                 MVT::i32, SL, Ld->getChain(), Ptr,
8384                                 Ld->getOffset(),
8385                                 Ld->getPointerInfo(), MVT::i32,
8386                                 Ld->getAlignment(),
8387                                 Ld->getMemOperand()->getFlags(),
8388                                 Ld->getAAInfo(),
8389                                 nullptr); // Drop ranges
8390 
8391   EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
8392   if (MemVT.isFloatingPoint()) {
8393     assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
8394            "unexpected fp extload");
8395     TruncVT = MemVT.changeTypeToInteger();
8396   }
8397 
8398   SDValue Cvt = NewLoad;
8399   if (Ld->getExtensionType() == ISD::SEXTLOAD) {
8400     Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
8401                       DAG.getValueType(TruncVT));
8402   } else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
8403              Ld->getExtensionType() == ISD::NON_EXTLOAD) {
8404     Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
8405   } else {
8406     assert(Ld->getExtensionType() == ISD::EXTLOAD);
8407   }
8408 
8409   EVT VT = Ld->getValueType(0);
8410   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
8411 
8412   DCI.AddToWorklist(Cvt.getNode());
8413 
8414   // We may need to handle exotic cases, such as i16->i64 extloads, so insert
8415   // the appropriate extension from the 32-bit load.
8416   Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
8417   DCI.AddToWorklist(Cvt.getNode());
8418 
8419   // Handle conversion back to floating point if necessary.
8420   Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
8421 
8422   return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
8423 }
8424 
8425 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
8426   SDLoc DL(Op);
8427   LoadSDNode *Load = cast<LoadSDNode>(Op);
8428   ISD::LoadExtType ExtType = Load->getExtensionType();
8429   EVT MemVT = Load->getMemoryVT();
8430 
8431   if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
8432     if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
8433       return SDValue();
8434 
8435     // FIXME: Copied from PPC
8436     // First, load into 32 bits, then truncate to 1 bit.
8437 
8438     SDValue Chain = Load->getChain();
8439     SDValue BasePtr = Load->getBasePtr();
8440     MachineMemOperand *MMO = Load->getMemOperand();
8441 
8442     EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
8443 
8444     SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
8445                                    BasePtr, RealMemVT, MMO);
8446 
8447     if (!MemVT.isVector()) {
8448       SDValue Ops[] = {
8449         DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
8450         NewLD.getValue(1)
8451       };
8452 
8453       return DAG.getMergeValues(Ops, DL);
8454     }
8455 
8456     SmallVector<SDValue, 3> Elts;
8457     for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) {
8458       SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD,
8459                                 DAG.getConstant(I, DL, MVT::i32));
8460 
8461       Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt));
8462     }
8463 
8464     SDValue Ops[] = {
8465       DAG.getBuildVector(MemVT, DL, Elts),
8466       NewLD.getValue(1)
8467     };
8468 
8469     return DAG.getMergeValues(Ops, DL);
8470   }
8471 
8472   if (!MemVT.isVector())
8473     return SDValue();
8474 
8475   assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
8476          "Custom lowering for non-i32 vectors hasn't been implemented.");
8477 
8478   unsigned Alignment = Load->getAlignment();
8479   unsigned AS = Load->getAddressSpace();
8480   if (Subtarget->hasLDSMisalignedBug() &&
8481       AS == AMDGPUAS::FLAT_ADDRESS &&
8482       Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) {
8483     return SplitVectorLoad(Op, DAG);
8484   }
8485 
8486   MachineFunction &MF = DAG.getMachineFunction();
8487   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
8488   // If there is a possibility that flat instruction access scratch memory
8489   // then we need to use the same legalization rules we use for private.
8490   if (AS == AMDGPUAS::FLAT_ADDRESS &&
8491       !Subtarget->hasMultiDwordFlatScratchAddressing())
8492     AS = MFI->hasFlatScratchInit() ?
8493          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
8494 
8495   unsigned NumElements = MemVT.getVectorNumElements();
8496 
8497   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
8498       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
8499     if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) {
8500       if (MemVT.isPow2VectorType())
8501         return SDValue();
8502       return WidenOrSplitVectorLoad(Op, DAG);
8503     }
8504     // Non-uniform loads will be selected to MUBUF instructions, so they
8505     // have the same legalization requirements as global and private
8506     // loads.
8507     //
8508   }
8509 
8510   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
8511       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
8512       AS == AMDGPUAS::GLOBAL_ADDRESS) {
8513     if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
8514         Load->isSimple() && isMemOpHasNoClobberedMemOperand(Load) &&
8515         Alignment >= 4 && NumElements < 32) {
8516       if (MemVT.isPow2VectorType())
8517         return SDValue();
8518       return WidenOrSplitVectorLoad(Op, DAG);
8519     }
8520     // Non-uniform loads will be selected to MUBUF instructions, so they
8521     // have the same legalization requirements as global and private
8522     // loads.
8523     //
8524   }
8525   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
8526       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
8527       AS == AMDGPUAS::GLOBAL_ADDRESS ||
8528       AS == AMDGPUAS::FLAT_ADDRESS) {
8529     if (NumElements > 4)
8530       return SplitVectorLoad(Op, DAG);
8531     // v3 loads not supported on SI.
8532     if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
8533       return WidenOrSplitVectorLoad(Op, DAG);
8534 
8535     // v3 and v4 loads are supported for private and global memory.
8536     return SDValue();
8537   }
8538   if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
8539     // Depending on the setting of the private_element_size field in the
8540     // resource descriptor, we can only make private accesses up to a certain
8541     // size.
8542     switch (Subtarget->getMaxPrivateElementSize()) {
8543     case 4: {
8544       SDValue Ops[2];
8545       std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
8546       return DAG.getMergeValues(Ops, DL);
8547     }
8548     case 8:
8549       if (NumElements > 2)
8550         return SplitVectorLoad(Op, DAG);
8551       return SDValue();
8552     case 16:
8553       // Same as global/flat
8554       if (NumElements > 4)
8555         return SplitVectorLoad(Op, DAG);
8556       // v3 loads not supported on SI.
8557       if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
8558         return WidenOrSplitVectorLoad(Op, DAG);
8559 
8560       return SDValue();
8561     default:
8562       llvm_unreachable("unsupported private_element_size");
8563     }
8564   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
8565     bool Fast = false;
8566     auto Flags = Load->getMemOperand()->getFlags();
8567     if (allowsMisalignedMemoryAccessesImpl(MemVT.getSizeInBits(), AS,
8568                                            Load->getAlign(), Flags, &Fast) &&
8569         Fast)
8570       return SDValue();
8571 
8572     if (MemVT.isVector())
8573       return SplitVectorLoad(Op, DAG);
8574   }
8575 
8576   if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
8577                                       MemVT, *Load->getMemOperand())) {
8578     SDValue Ops[2];
8579     std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
8580     return DAG.getMergeValues(Ops, DL);
8581   }
8582 
8583   return SDValue();
8584 }
8585 
8586 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
8587   EVT VT = Op.getValueType();
8588   if (VT.getSizeInBits() == 128)
8589     return splitTernaryVectorOp(Op, DAG);
8590 
8591   assert(VT.getSizeInBits() == 64);
8592 
8593   SDLoc DL(Op);
8594   SDValue Cond = Op.getOperand(0);
8595 
8596   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
8597   SDValue One = DAG.getConstant(1, DL, MVT::i32);
8598 
8599   SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
8600   SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
8601 
8602   SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
8603   SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
8604 
8605   SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
8606 
8607   SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
8608   SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
8609 
8610   SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
8611 
8612   SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
8613   return DAG.getNode(ISD::BITCAST, DL, VT, Res);
8614 }
8615 
8616 // Catch division cases where we can use shortcuts with rcp and rsq
8617 // instructions.
8618 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
8619                                               SelectionDAG &DAG) const {
8620   SDLoc SL(Op);
8621   SDValue LHS = Op.getOperand(0);
8622   SDValue RHS = Op.getOperand(1);
8623   EVT VT = Op.getValueType();
8624   const SDNodeFlags Flags = Op->getFlags();
8625 
8626   bool AllowInaccurateRcp = Flags.hasApproximateFuncs();
8627 
8628   // Without !fpmath accuracy information, we can't do more because we don't
8629   // know exactly whether rcp is accurate enough to meet !fpmath requirement.
8630   if (!AllowInaccurateRcp)
8631     return SDValue();
8632 
8633   if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
8634     if (CLHS->isExactlyValue(1.0)) {
8635       // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
8636       // the CI documentation has a worst case error of 1 ulp.
8637       // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
8638       // use it as long as we aren't trying to use denormals.
8639       //
8640       // v_rcp_f16 and v_rsq_f16 DO support denormals.
8641 
8642       // 1.0 / sqrt(x) -> rsq(x)
8643 
8644       // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
8645       // error seems really high at 2^29 ULP.
8646       if (RHS.getOpcode() == ISD::FSQRT)
8647         return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
8648 
8649       // 1.0 / x -> rcp(x)
8650       return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
8651     }
8652 
8653     // Same as for 1.0, but expand the sign out of the constant.
8654     if (CLHS->isExactlyValue(-1.0)) {
8655       // -1.0 / x -> rcp (fneg x)
8656       SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
8657       return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
8658     }
8659   }
8660 
8661   // Turn into multiply by the reciprocal.
8662   // x / y -> x * (1.0 / y)
8663   SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
8664   return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
8665 }
8666 
8667 SDValue SITargetLowering::lowerFastUnsafeFDIV64(SDValue Op,
8668                                                 SelectionDAG &DAG) const {
8669   SDLoc SL(Op);
8670   SDValue X = Op.getOperand(0);
8671   SDValue Y = Op.getOperand(1);
8672   EVT VT = Op.getValueType();
8673   const SDNodeFlags Flags = Op->getFlags();
8674 
8675   bool AllowInaccurateDiv = Flags.hasApproximateFuncs() ||
8676                             DAG.getTarget().Options.UnsafeFPMath;
8677   if (!AllowInaccurateDiv)
8678     return SDValue();
8679 
8680   SDValue NegY = DAG.getNode(ISD::FNEG, SL, VT, Y);
8681   SDValue One = DAG.getConstantFP(1.0, SL, VT);
8682 
8683   SDValue R = DAG.getNode(AMDGPUISD::RCP, SL, VT, Y);
8684   SDValue Tmp0 = DAG.getNode(ISD::FMA, SL, VT, NegY, R, One);
8685 
8686   R = DAG.getNode(ISD::FMA, SL, VT, Tmp0, R, R);
8687   SDValue Tmp1 = DAG.getNode(ISD::FMA, SL, VT, NegY, R, One);
8688   R = DAG.getNode(ISD::FMA, SL, VT, Tmp1, R, R);
8689   SDValue Ret = DAG.getNode(ISD::FMUL, SL, VT, X, R);
8690   SDValue Tmp2 = DAG.getNode(ISD::FMA, SL, VT, NegY, Ret, X);
8691   return DAG.getNode(ISD::FMA, SL, VT, Tmp2, R, Ret);
8692 }
8693 
8694 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
8695                           EVT VT, SDValue A, SDValue B, SDValue GlueChain,
8696                           SDNodeFlags Flags) {
8697   if (GlueChain->getNumValues() <= 1) {
8698     return DAG.getNode(Opcode, SL, VT, A, B, Flags);
8699   }
8700 
8701   assert(GlueChain->getNumValues() == 3);
8702 
8703   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
8704   switch (Opcode) {
8705   default: llvm_unreachable("no chain equivalent for opcode");
8706   case ISD::FMUL:
8707     Opcode = AMDGPUISD::FMUL_W_CHAIN;
8708     break;
8709   }
8710 
8711   return DAG.getNode(Opcode, SL, VTList,
8712                      {GlueChain.getValue(1), A, B, GlueChain.getValue(2)},
8713                      Flags);
8714 }
8715 
8716 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
8717                            EVT VT, SDValue A, SDValue B, SDValue C,
8718                            SDValue GlueChain, SDNodeFlags Flags) {
8719   if (GlueChain->getNumValues() <= 1) {
8720     return DAG.getNode(Opcode, SL, VT, {A, B, C}, Flags);
8721   }
8722 
8723   assert(GlueChain->getNumValues() == 3);
8724 
8725   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
8726   switch (Opcode) {
8727   default: llvm_unreachable("no chain equivalent for opcode");
8728   case ISD::FMA:
8729     Opcode = AMDGPUISD::FMA_W_CHAIN;
8730     break;
8731   }
8732 
8733   return DAG.getNode(Opcode, SL, VTList,
8734                      {GlueChain.getValue(1), A, B, C, GlueChain.getValue(2)},
8735                      Flags);
8736 }
8737 
8738 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
8739   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
8740     return FastLowered;
8741 
8742   SDLoc SL(Op);
8743   SDValue Src0 = Op.getOperand(0);
8744   SDValue Src1 = Op.getOperand(1);
8745 
8746   SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
8747   SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
8748 
8749   SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
8750   SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
8751 
8752   SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
8753   SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
8754 
8755   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
8756 }
8757 
8758 // Faster 2.5 ULP division that does not support denormals.
8759 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
8760   SDLoc SL(Op);
8761   SDValue LHS = Op.getOperand(1);
8762   SDValue RHS = Op.getOperand(2);
8763 
8764   SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
8765 
8766   const APFloat K0Val(BitsToFloat(0x6f800000));
8767   const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
8768 
8769   const APFloat K1Val(BitsToFloat(0x2f800000));
8770   const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
8771 
8772   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
8773 
8774   EVT SetCCVT =
8775     getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
8776 
8777   SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
8778 
8779   SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
8780 
8781   // TODO: Should this propagate fast-math-flags?
8782   r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
8783 
8784   // rcp does not support denormals.
8785   SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
8786 
8787   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
8788 
8789   return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
8790 }
8791 
8792 // Returns immediate value for setting the F32 denorm mode when using the
8793 // S_DENORM_MODE instruction.
8794 static SDValue getSPDenormModeValue(int SPDenormMode, SelectionDAG &DAG,
8795                                     const SDLoc &SL, const GCNSubtarget *ST) {
8796   assert(ST->hasDenormModeInst() && "Requires S_DENORM_MODE");
8797   int DPDenormModeDefault = hasFP64FP16Denormals(DAG.getMachineFunction())
8798                                 ? FP_DENORM_FLUSH_NONE
8799                                 : FP_DENORM_FLUSH_IN_FLUSH_OUT;
8800 
8801   int Mode = SPDenormMode | (DPDenormModeDefault << 2);
8802   return DAG.getTargetConstant(Mode, SL, MVT::i32);
8803 }
8804 
8805 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
8806   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
8807     return FastLowered;
8808 
8809   // The selection matcher assumes anything with a chain selecting to a
8810   // mayRaiseFPException machine instruction. Since we're introducing a chain
8811   // here, we need to explicitly report nofpexcept for the regular fdiv
8812   // lowering.
8813   SDNodeFlags Flags = Op->getFlags();
8814   Flags.setNoFPExcept(true);
8815 
8816   SDLoc SL(Op);
8817   SDValue LHS = Op.getOperand(0);
8818   SDValue RHS = Op.getOperand(1);
8819 
8820   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
8821 
8822   SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
8823 
8824   SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
8825                                           {RHS, RHS, LHS}, Flags);
8826   SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
8827                                         {LHS, RHS, LHS}, Flags);
8828 
8829   // Denominator is scaled to not be denormal, so using rcp is ok.
8830   SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
8831                                   DenominatorScaled, Flags);
8832   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
8833                                      DenominatorScaled, Flags);
8834 
8835   const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
8836                                (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
8837                                (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
8838   const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i32);
8839 
8840   const bool HasFP32Denormals = hasFP32Denormals(DAG.getMachineFunction());
8841 
8842   if (!HasFP32Denormals) {
8843     // Note we can't use the STRICT_FMA/STRICT_FMUL for the non-strict FDIV
8844     // lowering. The chain dependence is insufficient, and we need glue. We do
8845     // not need the glue variants in a strictfp function.
8846 
8847     SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
8848 
8849     SDNode *EnableDenorm;
8850     if (Subtarget->hasDenormModeInst()) {
8851       const SDValue EnableDenormValue =
8852           getSPDenormModeValue(FP_DENORM_FLUSH_NONE, DAG, SL, Subtarget);
8853 
8854       EnableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, BindParamVTs,
8855                                  DAG.getEntryNode(), EnableDenormValue).getNode();
8856     } else {
8857       const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
8858                                                         SL, MVT::i32);
8859       EnableDenorm =
8860           DAG.getMachineNode(AMDGPU::S_SETREG_B32, SL, BindParamVTs,
8861                              {EnableDenormValue, BitField, DAG.getEntryNode()});
8862     }
8863 
8864     SDValue Ops[3] = {
8865       NegDivScale0,
8866       SDValue(EnableDenorm, 0),
8867       SDValue(EnableDenorm, 1)
8868     };
8869 
8870     NegDivScale0 = DAG.getMergeValues(Ops, SL);
8871   }
8872 
8873   SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
8874                              ApproxRcp, One, NegDivScale0, Flags);
8875 
8876   SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
8877                              ApproxRcp, Fma0, Flags);
8878 
8879   SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
8880                            Fma1, Fma1, Flags);
8881 
8882   SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
8883                              NumeratorScaled, Mul, Flags);
8884 
8885   SDValue Fma3 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32,
8886                              Fma2, Fma1, Mul, Fma2, Flags);
8887 
8888   SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
8889                              NumeratorScaled, Fma3, Flags);
8890 
8891   if (!HasFP32Denormals) {
8892     SDNode *DisableDenorm;
8893     if (Subtarget->hasDenormModeInst()) {
8894       const SDValue DisableDenormValue =
8895           getSPDenormModeValue(FP_DENORM_FLUSH_IN_FLUSH_OUT, DAG, SL, Subtarget);
8896 
8897       DisableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, MVT::Other,
8898                                   Fma4.getValue(1), DisableDenormValue,
8899                                   Fma4.getValue(2)).getNode();
8900     } else {
8901       const SDValue DisableDenormValue =
8902           DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
8903 
8904       DisableDenorm = DAG.getMachineNode(
8905           AMDGPU::S_SETREG_B32, SL, MVT::Other,
8906           {DisableDenormValue, BitField, Fma4.getValue(1), Fma4.getValue(2)});
8907     }
8908 
8909     SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
8910                                       SDValue(DisableDenorm, 0), DAG.getRoot());
8911     DAG.setRoot(OutputChain);
8912   }
8913 
8914   SDValue Scale = NumeratorScaled.getValue(1);
8915   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
8916                              {Fma4, Fma1, Fma3, Scale}, Flags);
8917 
8918   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS, Flags);
8919 }
8920 
8921 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
8922   if (SDValue FastLowered = lowerFastUnsafeFDIV64(Op, DAG))
8923     return FastLowered;
8924 
8925   SDLoc SL(Op);
8926   SDValue X = Op.getOperand(0);
8927   SDValue Y = Op.getOperand(1);
8928 
8929   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
8930 
8931   SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
8932 
8933   SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
8934 
8935   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
8936 
8937   SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
8938 
8939   SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
8940 
8941   SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
8942 
8943   SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
8944 
8945   SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
8946 
8947   SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
8948   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
8949 
8950   SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
8951                              NegDivScale0, Mul, DivScale1);
8952 
8953   SDValue Scale;
8954 
8955   if (!Subtarget->hasUsableDivScaleConditionOutput()) {
8956     // Workaround a hardware bug on SI where the condition output from div_scale
8957     // is not usable.
8958 
8959     const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
8960 
8961     // Figure out if the scale to use for div_fmas.
8962     SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
8963     SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
8964     SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
8965     SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
8966 
8967     SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
8968     SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
8969 
8970     SDValue Scale0Hi
8971       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
8972     SDValue Scale1Hi
8973       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
8974 
8975     SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
8976     SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
8977     Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
8978   } else {
8979     Scale = DivScale1.getValue(1);
8980   }
8981 
8982   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
8983                              Fma4, Fma3, Mul, Scale);
8984 
8985   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
8986 }
8987 
8988 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
8989   EVT VT = Op.getValueType();
8990 
8991   if (VT == MVT::f32)
8992     return LowerFDIV32(Op, DAG);
8993 
8994   if (VT == MVT::f64)
8995     return LowerFDIV64(Op, DAG);
8996 
8997   if (VT == MVT::f16)
8998     return LowerFDIV16(Op, DAG);
8999 
9000   llvm_unreachable("Unexpected type for fdiv");
9001 }
9002 
9003 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
9004   SDLoc DL(Op);
9005   StoreSDNode *Store = cast<StoreSDNode>(Op);
9006   EVT VT = Store->getMemoryVT();
9007 
9008   if (VT == MVT::i1) {
9009     return DAG.getTruncStore(Store->getChain(), DL,
9010        DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
9011        Store->getBasePtr(), MVT::i1, Store->getMemOperand());
9012   }
9013 
9014   assert(VT.isVector() &&
9015          Store->getValue().getValueType().getScalarType() == MVT::i32);
9016 
9017   unsigned AS = Store->getAddressSpace();
9018   if (Subtarget->hasLDSMisalignedBug() &&
9019       AS == AMDGPUAS::FLAT_ADDRESS &&
9020       Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) {
9021     return SplitVectorStore(Op, DAG);
9022   }
9023 
9024   MachineFunction &MF = DAG.getMachineFunction();
9025   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
9026   // If there is a possibility that flat instruction access scratch memory
9027   // then we need to use the same legalization rules we use for private.
9028   if (AS == AMDGPUAS::FLAT_ADDRESS &&
9029       !Subtarget->hasMultiDwordFlatScratchAddressing())
9030     AS = MFI->hasFlatScratchInit() ?
9031          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
9032 
9033   unsigned NumElements = VT.getVectorNumElements();
9034   if (AS == AMDGPUAS::GLOBAL_ADDRESS ||
9035       AS == AMDGPUAS::FLAT_ADDRESS) {
9036     if (NumElements > 4)
9037       return SplitVectorStore(Op, DAG);
9038     // v3 stores not supported on SI.
9039     if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
9040       return SplitVectorStore(Op, DAG);
9041 
9042     if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
9043                                         VT, *Store->getMemOperand()))
9044       return expandUnalignedStore(Store, DAG);
9045 
9046     return SDValue();
9047   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
9048     switch (Subtarget->getMaxPrivateElementSize()) {
9049     case 4:
9050       return scalarizeVectorStore(Store, DAG);
9051     case 8:
9052       if (NumElements > 2)
9053         return SplitVectorStore(Op, DAG);
9054       return SDValue();
9055     case 16:
9056       if (NumElements > 4 ||
9057           (NumElements == 3 && !Subtarget->enableFlatScratch()))
9058         return SplitVectorStore(Op, DAG);
9059       return SDValue();
9060     default:
9061       llvm_unreachable("unsupported private_element_size");
9062     }
9063   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
9064     bool Fast = false;
9065     auto Flags = Store->getMemOperand()->getFlags();
9066     if (allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AS,
9067                                            Store->getAlign(), Flags, &Fast) &&
9068         Fast)
9069       return SDValue();
9070 
9071     if (VT.isVector())
9072       return SplitVectorStore(Op, DAG);
9073 
9074     return expandUnalignedStore(Store, DAG);
9075   }
9076 
9077   // Probably an invalid store. If so we'll end up emitting a selection error.
9078   return SDValue();
9079 }
9080 
9081 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
9082   SDLoc DL(Op);
9083   EVT VT = Op.getValueType();
9084   SDValue Arg = Op.getOperand(0);
9085   SDValue TrigVal;
9086 
9087   // Propagate fast-math flags so that the multiply we introduce can be folded
9088   // if Arg is already the result of a multiply by constant.
9089   auto Flags = Op->getFlags();
9090 
9091   SDValue OneOver2Pi = DAG.getConstantFP(0.5 * numbers::inv_pi, DL, VT);
9092 
9093   if (Subtarget->hasTrigReducedRange()) {
9094     SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi, Flags);
9095     TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal, Flags);
9096   } else {
9097     TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi, Flags);
9098   }
9099 
9100   switch (Op.getOpcode()) {
9101   case ISD::FCOS:
9102     return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal, Flags);
9103   case ISD::FSIN:
9104     return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal, Flags);
9105   default:
9106     llvm_unreachable("Wrong trig opcode");
9107   }
9108 }
9109 
9110 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
9111   AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
9112   assert(AtomicNode->isCompareAndSwap());
9113   unsigned AS = AtomicNode->getAddressSpace();
9114 
9115   // No custom lowering required for local address space
9116   if (!AMDGPU::isFlatGlobalAddrSpace(AS))
9117     return Op;
9118 
9119   // Non-local address space requires custom lowering for atomic compare
9120   // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
9121   SDLoc DL(Op);
9122   SDValue ChainIn = Op.getOperand(0);
9123   SDValue Addr = Op.getOperand(1);
9124   SDValue Old = Op.getOperand(2);
9125   SDValue New = Op.getOperand(3);
9126   EVT VT = Op.getValueType();
9127   MVT SimpleVT = VT.getSimpleVT();
9128   MVT VecType = MVT::getVectorVT(SimpleVT, 2);
9129 
9130   SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
9131   SDValue Ops[] = { ChainIn, Addr, NewOld };
9132 
9133   return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
9134                                  Ops, VT, AtomicNode->getMemOperand());
9135 }
9136 
9137 //===----------------------------------------------------------------------===//
9138 // Custom DAG optimizations
9139 //===----------------------------------------------------------------------===//
9140 
9141 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
9142                                                      DAGCombinerInfo &DCI) const {
9143   EVT VT = N->getValueType(0);
9144   EVT ScalarVT = VT.getScalarType();
9145   if (ScalarVT != MVT::f32 && ScalarVT != MVT::f16)
9146     return SDValue();
9147 
9148   SelectionDAG &DAG = DCI.DAG;
9149   SDLoc DL(N);
9150 
9151   SDValue Src = N->getOperand(0);
9152   EVT SrcVT = Src.getValueType();
9153 
9154   // TODO: We could try to match extracting the higher bytes, which would be
9155   // easier if i8 vectors weren't promoted to i32 vectors, particularly after
9156   // types are legalized. v4i8 -> v4f32 is probably the only case to worry
9157   // about in practice.
9158   if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
9159     if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
9160       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, MVT::f32, Src);
9161       DCI.AddToWorklist(Cvt.getNode());
9162 
9163       // For the f16 case, fold to a cast to f32 and then cast back to f16.
9164       if (ScalarVT != MVT::f32) {
9165         Cvt = DAG.getNode(ISD::FP_ROUND, DL, VT, Cvt,
9166                           DAG.getTargetConstant(0, DL, MVT::i32));
9167       }
9168       return Cvt;
9169     }
9170   }
9171 
9172   return SDValue();
9173 }
9174 
9175 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
9176 
9177 // This is a variant of
9178 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
9179 //
9180 // The normal DAG combiner will do this, but only if the add has one use since
9181 // that would increase the number of instructions.
9182 //
9183 // This prevents us from seeing a constant offset that can be folded into a
9184 // memory instruction's addressing mode. If we know the resulting add offset of
9185 // a pointer can be folded into an addressing offset, we can replace the pointer
9186 // operand with the add of new constant offset. This eliminates one of the uses,
9187 // and may allow the remaining use to also be simplified.
9188 //
9189 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
9190                                                unsigned AddrSpace,
9191                                                EVT MemVT,
9192                                                DAGCombinerInfo &DCI) const {
9193   SDValue N0 = N->getOperand(0);
9194   SDValue N1 = N->getOperand(1);
9195 
9196   // We only do this to handle cases where it's profitable when there are
9197   // multiple uses of the add, so defer to the standard combine.
9198   if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
9199       N0->hasOneUse())
9200     return SDValue();
9201 
9202   const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
9203   if (!CN1)
9204     return SDValue();
9205 
9206   const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
9207   if (!CAdd)
9208     return SDValue();
9209 
9210   // If the resulting offset is too large, we can't fold it into the addressing
9211   // mode offset.
9212   APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
9213   Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
9214 
9215   AddrMode AM;
9216   AM.HasBaseReg = true;
9217   AM.BaseOffs = Offset.getSExtValue();
9218   if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
9219     return SDValue();
9220 
9221   SelectionDAG &DAG = DCI.DAG;
9222   SDLoc SL(N);
9223   EVT VT = N->getValueType(0);
9224 
9225   SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
9226   SDValue COffset = DAG.getConstant(Offset, SL, VT);
9227 
9228   SDNodeFlags Flags;
9229   Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
9230                           (N0.getOpcode() == ISD::OR ||
9231                            N0->getFlags().hasNoUnsignedWrap()));
9232 
9233   return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
9234 }
9235 
9236 /// MemSDNode::getBasePtr() does not work for intrinsics, which needs to offset
9237 /// by the chain and intrinsic ID. Theoretically we would also need to check the
9238 /// specific intrinsic, but they all place the pointer operand first.
9239 static unsigned getBasePtrIndex(const MemSDNode *N) {
9240   switch (N->getOpcode()) {
9241   case ISD::STORE:
9242   case ISD::INTRINSIC_W_CHAIN:
9243   case ISD::INTRINSIC_VOID:
9244     return 2;
9245   default:
9246     return 1;
9247   }
9248 }
9249 
9250 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
9251                                                   DAGCombinerInfo &DCI) const {
9252   SelectionDAG &DAG = DCI.DAG;
9253   SDLoc SL(N);
9254 
9255   unsigned PtrIdx = getBasePtrIndex(N);
9256   SDValue Ptr = N->getOperand(PtrIdx);
9257 
9258   // TODO: We could also do this for multiplies.
9259   if (Ptr.getOpcode() == ISD::SHL) {
9260     SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(),  N->getAddressSpace(),
9261                                           N->getMemoryVT(), DCI);
9262     if (NewPtr) {
9263       SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
9264 
9265       NewOps[PtrIdx] = NewPtr;
9266       return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
9267     }
9268   }
9269 
9270   return SDValue();
9271 }
9272 
9273 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
9274   return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
9275          (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
9276          (Opc == ISD::XOR && Val == 0);
9277 }
9278 
9279 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
9280 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
9281 // integer combine opportunities since most 64-bit operations are decomposed
9282 // this way.  TODO: We won't want this for SALU especially if it is an inline
9283 // immediate.
9284 SDValue SITargetLowering::splitBinaryBitConstantOp(
9285   DAGCombinerInfo &DCI,
9286   const SDLoc &SL,
9287   unsigned Opc, SDValue LHS,
9288   const ConstantSDNode *CRHS) const {
9289   uint64_t Val = CRHS->getZExtValue();
9290   uint32_t ValLo = Lo_32(Val);
9291   uint32_t ValHi = Hi_32(Val);
9292   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
9293 
9294     if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
9295          bitOpWithConstantIsReducible(Opc, ValHi)) ||
9296         (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
9297     // If we need to materialize a 64-bit immediate, it will be split up later
9298     // anyway. Avoid creating the harder to understand 64-bit immediate
9299     // materialization.
9300     return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
9301   }
9302 
9303   return SDValue();
9304 }
9305 
9306 // Returns true if argument is a boolean value which is not serialized into
9307 // memory or argument and does not require v_cndmask_b32 to be deserialized.
9308 static bool isBoolSGPR(SDValue V) {
9309   if (V.getValueType() != MVT::i1)
9310     return false;
9311   switch (V.getOpcode()) {
9312   default:
9313     break;
9314   case ISD::SETCC:
9315   case AMDGPUISD::FP_CLASS:
9316     return true;
9317   case ISD::AND:
9318   case ISD::OR:
9319   case ISD::XOR:
9320     return isBoolSGPR(V.getOperand(0)) && isBoolSGPR(V.getOperand(1));
9321   }
9322   return false;
9323 }
9324 
9325 // If a constant has all zeroes or all ones within each byte return it.
9326 // Otherwise return 0.
9327 static uint32_t getConstantPermuteMask(uint32_t C) {
9328   // 0xff for any zero byte in the mask
9329   uint32_t ZeroByteMask = 0;
9330   if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
9331   if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
9332   if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
9333   if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
9334   uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte
9335   if ((NonZeroByteMask & C) != NonZeroByteMask)
9336     return 0; // Partial bytes selected.
9337   return C;
9338 }
9339 
9340 // Check if a node selects whole bytes from its operand 0 starting at a byte
9341 // boundary while masking the rest. Returns select mask as in the v_perm_b32
9342 // or -1 if not succeeded.
9343 // Note byte select encoding:
9344 // value 0-3 selects corresponding source byte;
9345 // value 0xc selects zero;
9346 // value 0xff selects 0xff.
9347 static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
9348   assert(V.getValueSizeInBits() == 32);
9349 
9350   if (V.getNumOperands() != 2)
9351     return ~0;
9352 
9353   ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
9354   if (!N1)
9355     return ~0;
9356 
9357   uint32_t C = N1->getZExtValue();
9358 
9359   switch (V.getOpcode()) {
9360   default:
9361     break;
9362   case ISD::AND:
9363     if (uint32_t ConstMask = getConstantPermuteMask(C)) {
9364       return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
9365     }
9366     break;
9367 
9368   case ISD::OR:
9369     if (uint32_t ConstMask = getConstantPermuteMask(C)) {
9370       return (0x03020100 & ~ConstMask) | ConstMask;
9371     }
9372     break;
9373 
9374   case ISD::SHL:
9375     if (C % 8)
9376       return ~0;
9377 
9378     return uint32_t((0x030201000c0c0c0cull << C) >> 32);
9379 
9380   case ISD::SRL:
9381     if (C % 8)
9382       return ~0;
9383 
9384     return uint32_t(0x0c0c0c0c03020100ull >> C);
9385   }
9386 
9387   return ~0;
9388 }
9389 
9390 SDValue SITargetLowering::performAndCombine(SDNode *N,
9391                                             DAGCombinerInfo &DCI) const {
9392   if (DCI.isBeforeLegalize())
9393     return SDValue();
9394 
9395   SelectionDAG &DAG = DCI.DAG;
9396   EVT VT = N->getValueType(0);
9397   SDValue LHS = N->getOperand(0);
9398   SDValue RHS = N->getOperand(1);
9399 
9400 
9401   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
9402   if (VT == MVT::i64 && CRHS) {
9403     if (SDValue Split
9404         = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
9405       return Split;
9406   }
9407 
9408   if (CRHS && VT == MVT::i32) {
9409     // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
9410     // nb = number of trailing zeroes in mask
9411     // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
9412     // given that we are selecting 8 or 16 bit fields starting at byte boundary.
9413     uint64_t Mask = CRHS->getZExtValue();
9414     unsigned Bits = countPopulation(Mask);
9415     if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
9416         (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
9417       if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
9418         unsigned Shift = CShift->getZExtValue();
9419         unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
9420         unsigned Offset = NB + Shift;
9421         if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
9422           SDLoc SL(N);
9423           SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
9424                                     LHS->getOperand(0),
9425                                     DAG.getConstant(Offset, SL, MVT::i32),
9426                                     DAG.getConstant(Bits, SL, MVT::i32));
9427           EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
9428           SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
9429                                     DAG.getValueType(NarrowVT));
9430           SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
9431                                     DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
9432           return Shl;
9433         }
9434       }
9435     }
9436 
9437     // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
9438     if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
9439         isa<ConstantSDNode>(LHS.getOperand(2))) {
9440       uint32_t Sel = getConstantPermuteMask(Mask);
9441       if (!Sel)
9442         return SDValue();
9443 
9444       // Select 0xc for all zero bytes
9445       Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
9446       SDLoc DL(N);
9447       return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
9448                          LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
9449     }
9450   }
9451 
9452   // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
9453   // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
9454   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
9455     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
9456     ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
9457 
9458     SDValue X = LHS.getOperand(0);
9459     SDValue Y = RHS.getOperand(0);
9460     if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
9461       return SDValue();
9462 
9463     if (LCC == ISD::SETO) {
9464       if (X != LHS.getOperand(1))
9465         return SDValue();
9466 
9467       if (RCC == ISD::SETUNE) {
9468         const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
9469         if (!C1 || !C1->isInfinity() || C1->isNegative())
9470           return SDValue();
9471 
9472         const uint32_t Mask = SIInstrFlags::N_NORMAL |
9473                               SIInstrFlags::N_SUBNORMAL |
9474                               SIInstrFlags::N_ZERO |
9475                               SIInstrFlags::P_ZERO |
9476                               SIInstrFlags::P_SUBNORMAL |
9477                               SIInstrFlags::P_NORMAL;
9478 
9479         static_assert(((~(SIInstrFlags::S_NAN |
9480                           SIInstrFlags::Q_NAN |
9481                           SIInstrFlags::N_INFINITY |
9482                           SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
9483                       "mask not equal");
9484 
9485         SDLoc DL(N);
9486         return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
9487                            X, DAG.getConstant(Mask, DL, MVT::i32));
9488       }
9489     }
9490   }
9491 
9492   if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS)
9493     std::swap(LHS, RHS);
9494 
9495   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS &&
9496       RHS.hasOneUse()) {
9497     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
9498     // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan)
9499     // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan)
9500     const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
9501     if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask &&
9502         (RHS.getOperand(0) == LHS.getOperand(0) &&
9503          LHS.getOperand(0) == LHS.getOperand(1))) {
9504       const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN;
9505       unsigned NewMask = LCC == ISD::SETO ?
9506         Mask->getZExtValue() & ~OrdMask :
9507         Mask->getZExtValue() & OrdMask;
9508 
9509       SDLoc DL(N);
9510       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0),
9511                          DAG.getConstant(NewMask, DL, MVT::i32));
9512     }
9513   }
9514 
9515   if (VT == MVT::i32 &&
9516       (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
9517     // and x, (sext cc from i1) => select cc, x, 0
9518     if (RHS.getOpcode() != ISD::SIGN_EXTEND)
9519       std::swap(LHS, RHS);
9520     if (isBoolSGPR(RHS.getOperand(0)))
9521       return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
9522                            LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
9523   }
9524 
9525   // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
9526   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
9527   if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
9528       N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32_e64) != -1) {
9529     uint32_t LHSMask = getPermuteMask(DAG, LHS);
9530     uint32_t RHSMask = getPermuteMask(DAG, RHS);
9531     if (LHSMask != ~0u && RHSMask != ~0u) {
9532       // Canonicalize the expression in an attempt to have fewer unique masks
9533       // and therefore fewer registers used to hold the masks.
9534       if (LHSMask > RHSMask) {
9535         std::swap(LHSMask, RHSMask);
9536         std::swap(LHS, RHS);
9537       }
9538 
9539       // Select 0xc for each lane used from source operand. Zero has 0xc mask
9540       // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
9541       uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
9542       uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
9543 
9544       // Check of we need to combine values from two sources within a byte.
9545       if (!(LHSUsedLanes & RHSUsedLanes) &&
9546           // If we select high and lower word keep it for SDWA.
9547           // TODO: teach SDWA to work with v_perm_b32 and remove the check.
9548           !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
9549         // Each byte in each mask is either selector mask 0-3, or has higher
9550         // bits set in either of masks, which can be 0xff for 0xff or 0x0c for
9551         // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise
9552         // mask which is not 0xff wins. By anding both masks we have a correct
9553         // result except that 0x0c shall be corrected to give 0x0c only.
9554         uint32_t Mask = LHSMask & RHSMask;
9555         for (unsigned I = 0; I < 32; I += 8) {
9556           uint32_t ByteSel = 0xff << I;
9557           if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
9558             Mask &= (0x0c << I) & 0xffffffff;
9559         }
9560 
9561         // Add 4 to each active LHS lane. It will not affect any existing 0xff
9562         // or 0x0c.
9563         uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
9564         SDLoc DL(N);
9565 
9566         return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
9567                            LHS.getOperand(0), RHS.getOperand(0),
9568                            DAG.getConstant(Sel, DL, MVT::i32));
9569       }
9570     }
9571   }
9572 
9573   return SDValue();
9574 }
9575 
9576 SDValue SITargetLowering::performOrCombine(SDNode *N,
9577                                            DAGCombinerInfo &DCI) const {
9578   SelectionDAG &DAG = DCI.DAG;
9579   SDValue LHS = N->getOperand(0);
9580   SDValue RHS = N->getOperand(1);
9581 
9582   EVT VT = N->getValueType(0);
9583   if (VT == MVT::i1) {
9584     // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
9585     if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
9586         RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
9587       SDValue Src = LHS.getOperand(0);
9588       if (Src != RHS.getOperand(0))
9589         return SDValue();
9590 
9591       const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
9592       const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
9593       if (!CLHS || !CRHS)
9594         return SDValue();
9595 
9596       // Only 10 bits are used.
9597       static const uint32_t MaxMask = 0x3ff;
9598 
9599       uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
9600       SDLoc DL(N);
9601       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
9602                          Src, DAG.getConstant(NewMask, DL, MVT::i32));
9603     }
9604 
9605     return SDValue();
9606   }
9607 
9608   // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
9609   if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
9610       LHS.getOpcode() == AMDGPUISD::PERM &&
9611       isa<ConstantSDNode>(LHS.getOperand(2))) {
9612     uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
9613     if (!Sel)
9614       return SDValue();
9615 
9616     Sel |= LHS.getConstantOperandVal(2);
9617     SDLoc DL(N);
9618     return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
9619                        LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
9620   }
9621 
9622   // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
9623   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
9624   if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
9625       N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32_e64) != -1) {
9626     uint32_t LHSMask = getPermuteMask(DAG, LHS);
9627     uint32_t RHSMask = getPermuteMask(DAG, RHS);
9628     if (LHSMask != ~0u && RHSMask != ~0u) {
9629       // Canonicalize the expression in an attempt to have fewer unique masks
9630       // and therefore fewer registers used to hold the masks.
9631       if (LHSMask > RHSMask) {
9632         std::swap(LHSMask, RHSMask);
9633         std::swap(LHS, RHS);
9634       }
9635 
9636       // Select 0xc for each lane used from source operand. Zero has 0xc mask
9637       // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
9638       uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
9639       uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
9640 
9641       // Check of we need to combine values from two sources within a byte.
9642       if (!(LHSUsedLanes & RHSUsedLanes) &&
9643           // If we select high and lower word keep it for SDWA.
9644           // TODO: teach SDWA to work with v_perm_b32 and remove the check.
9645           !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
9646         // Kill zero bytes selected by other mask. Zero value is 0xc.
9647         LHSMask &= ~RHSUsedLanes;
9648         RHSMask &= ~LHSUsedLanes;
9649         // Add 4 to each active LHS lane
9650         LHSMask |= LHSUsedLanes & 0x04040404;
9651         // Combine masks
9652         uint32_t Sel = LHSMask | RHSMask;
9653         SDLoc DL(N);
9654 
9655         return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
9656                            LHS.getOperand(0), RHS.getOperand(0),
9657                            DAG.getConstant(Sel, DL, MVT::i32));
9658       }
9659     }
9660   }
9661 
9662   if (VT != MVT::i64 || DCI.isBeforeLegalizeOps())
9663     return SDValue();
9664 
9665   // TODO: This could be a generic combine with a predicate for extracting the
9666   // high half of an integer being free.
9667 
9668   // (or i64:x, (zero_extend i32:y)) ->
9669   //   i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
9670   if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
9671       RHS.getOpcode() != ISD::ZERO_EXTEND)
9672     std::swap(LHS, RHS);
9673 
9674   if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
9675     SDValue ExtSrc = RHS.getOperand(0);
9676     EVT SrcVT = ExtSrc.getValueType();
9677     if (SrcVT == MVT::i32) {
9678       SDLoc SL(N);
9679       SDValue LowLHS, HiBits;
9680       std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
9681       SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
9682 
9683       DCI.AddToWorklist(LowOr.getNode());
9684       DCI.AddToWorklist(HiBits.getNode());
9685 
9686       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
9687                                 LowOr, HiBits);
9688       return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
9689     }
9690   }
9691 
9692   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
9693   if (CRHS) {
9694     if (SDValue Split
9695           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR,
9696                                      N->getOperand(0), CRHS))
9697       return Split;
9698   }
9699 
9700   return SDValue();
9701 }
9702 
9703 SDValue SITargetLowering::performXorCombine(SDNode *N,
9704                                             DAGCombinerInfo &DCI) const {
9705   if (SDValue RV = reassociateScalarOps(N, DCI.DAG))
9706     return RV;
9707 
9708   EVT VT = N->getValueType(0);
9709   if (VT != MVT::i64)
9710     return SDValue();
9711 
9712   SDValue LHS = N->getOperand(0);
9713   SDValue RHS = N->getOperand(1);
9714 
9715   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
9716   if (CRHS) {
9717     if (SDValue Split
9718           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
9719       return Split;
9720   }
9721 
9722   return SDValue();
9723 }
9724 
9725 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
9726                                                    DAGCombinerInfo &DCI) const {
9727   if (!Subtarget->has16BitInsts() ||
9728       DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9729     return SDValue();
9730 
9731   EVT VT = N->getValueType(0);
9732   if (VT != MVT::i32)
9733     return SDValue();
9734 
9735   SDValue Src = N->getOperand(0);
9736   if (Src.getValueType() != MVT::i16)
9737     return SDValue();
9738 
9739   return SDValue();
9740 }
9741 
9742 SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N,
9743                                                         DAGCombinerInfo &DCI)
9744                                                         const {
9745   SDValue Src = N->getOperand(0);
9746   auto *VTSign = cast<VTSDNode>(N->getOperand(1));
9747 
9748   if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE &&
9749       VTSign->getVT() == MVT::i8) ||
9750       (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT &&
9751       VTSign->getVT() == MVT::i16)) &&
9752       Src.hasOneUse()) {
9753     auto *M = cast<MemSDNode>(Src);
9754     SDValue Ops[] = {
9755       Src.getOperand(0), // Chain
9756       Src.getOperand(1), // rsrc
9757       Src.getOperand(2), // vindex
9758       Src.getOperand(3), // voffset
9759       Src.getOperand(4), // soffset
9760       Src.getOperand(5), // offset
9761       Src.getOperand(6),
9762       Src.getOperand(7)
9763     };
9764     // replace with BUFFER_LOAD_BYTE/SHORT
9765     SDVTList ResList = DCI.DAG.getVTList(MVT::i32,
9766                                          Src.getOperand(0).getValueType());
9767     unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ?
9768                    AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT;
9769     SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N),
9770                                                           ResList,
9771                                                           Ops, M->getMemoryVT(),
9772                                                           M->getMemOperand());
9773     return DCI.DAG.getMergeValues({BufferLoadSignExt,
9774                                   BufferLoadSignExt.getValue(1)}, SDLoc(N));
9775   }
9776   return SDValue();
9777 }
9778 
9779 SDValue SITargetLowering::performClassCombine(SDNode *N,
9780                                               DAGCombinerInfo &DCI) const {
9781   SelectionDAG &DAG = DCI.DAG;
9782   SDValue Mask = N->getOperand(1);
9783 
9784   // fp_class x, 0 -> false
9785   if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
9786     if (CMask->isZero())
9787       return DAG.getConstant(0, SDLoc(N), MVT::i1);
9788   }
9789 
9790   if (N->getOperand(0).isUndef())
9791     return DAG.getUNDEF(MVT::i1);
9792 
9793   return SDValue();
9794 }
9795 
9796 SDValue SITargetLowering::performRcpCombine(SDNode *N,
9797                                             DAGCombinerInfo &DCI) const {
9798   EVT VT = N->getValueType(0);
9799   SDValue N0 = N->getOperand(0);
9800 
9801   if (N0.isUndef())
9802     return N0;
9803 
9804   if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP ||
9805                          N0.getOpcode() == ISD::SINT_TO_FP)) {
9806     return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0,
9807                            N->getFlags());
9808   }
9809 
9810   if ((VT == MVT::f32 || VT == MVT::f16) && N0.getOpcode() == ISD::FSQRT) {
9811     return DCI.DAG.getNode(AMDGPUISD::RSQ, SDLoc(N), VT,
9812                            N0.getOperand(0), N->getFlags());
9813   }
9814 
9815   return AMDGPUTargetLowering::performRcpCombine(N, DCI);
9816 }
9817 
9818 bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
9819                                        unsigned MaxDepth) const {
9820   unsigned Opcode = Op.getOpcode();
9821   if (Opcode == ISD::FCANONICALIZE)
9822     return true;
9823 
9824   if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
9825     auto F = CFP->getValueAPF();
9826     if (F.isNaN() && F.isSignaling())
9827       return false;
9828     return !F.isDenormal() || denormalsEnabledForType(DAG, Op.getValueType());
9829   }
9830 
9831   // If source is a result of another standard FP operation it is already in
9832   // canonical form.
9833   if (MaxDepth == 0)
9834     return false;
9835 
9836   switch (Opcode) {
9837   // These will flush denorms if required.
9838   case ISD::FADD:
9839   case ISD::FSUB:
9840   case ISD::FMUL:
9841   case ISD::FCEIL:
9842   case ISD::FFLOOR:
9843   case ISD::FMA:
9844   case ISD::FMAD:
9845   case ISD::FSQRT:
9846   case ISD::FDIV:
9847   case ISD::FREM:
9848   case ISD::FP_ROUND:
9849   case ISD::FP_EXTEND:
9850   case AMDGPUISD::FMUL_LEGACY:
9851   case AMDGPUISD::FMAD_FTZ:
9852   case AMDGPUISD::RCP:
9853   case AMDGPUISD::RSQ:
9854   case AMDGPUISD::RSQ_CLAMP:
9855   case AMDGPUISD::RCP_LEGACY:
9856   case AMDGPUISD::RCP_IFLAG:
9857   case AMDGPUISD::DIV_SCALE:
9858   case AMDGPUISD::DIV_FMAS:
9859   case AMDGPUISD::DIV_FIXUP:
9860   case AMDGPUISD::FRACT:
9861   case AMDGPUISD::LDEXP:
9862   case AMDGPUISD::CVT_PKRTZ_F16_F32:
9863   case AMDGPUISD::CVT_F32_UBYTE0:
9864   case AMDGPUISD::CVT_F32_UBYTE1:
9865   case AMDGPUISD::CVT_F32_UBYTE2:
9866   case AMDGPUISD::CVT_F32_UBYTE3:
9867     return true;
9868 
9869   // It can/will be lowered or combined as a bit operation.
9870   // Need to check their input recursively to handle.
9871   case ISD::FNEG:
9872   case ISD::FABS:
9873   case ISD::FCOPYSIGN:
9874     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
9875 
9876   case ISD::FSIN:
9877   case ISD::FCOS:
9878   case ISD::FSINCOS:
9879     return Op.getValueType().getScalarType() != MVT::f16;
9880 
9881   case ISD::FMINNUM:
9882   case ISD::FMAXNUM:
9883   case ISD::FMINNUM_IEEE:
9884   case ISD::FMAXNUM_IEEE:
9885   case AMDGPUISD::CLAMP:
9886   case AMDGPUISD::FMED3:
9887   case AMDGPUISD::FMAX3:
9888   case AMDGPUISD::FMIN3: {
9889     // FIXME: Shouldn't treat the generic operations different based these.
9890     // However, we aren't really required to flush the result from
9891     // minnum/maxnum..
9892 
9893     // snans will be quieted, so we only need to worry about denormals.
9894     if (Subtarget->supportsMinMaxDenormModes() ||
9895         denormalsEnabledForType(DAG, Op.getValueType()))
9896       return true;
9897 
9898     // Flushing may be required.
9899     // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such
9900     // targets need to check their input recursively.
9901 
9902     // FIXME: Does this apply with clamp? It's implemented with max.
9903     for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) {
9904       if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1))
9905         return false;
9906     }
9907 
9908     return true;
9909   }
9910   case ISD::SELECT: {
9911     return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) &&
9912            isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1);
9913   }
9914   case ISD::BUILD_VECTOR: {
9915     for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
9916       SDValue SrcOp = Op.getOperand(i);
9917       if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1))
9918         return false;
9919     }
9920 
9921     return true;
9922   }
9923   case ISD::EXTRACT_VECTOR_ELT:
9924   case ISD::EXTRACT_SUBVECTOR: {
9925     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
9926   }
9927   case ISD::INSERT_VECTOR_ELT: {
9928     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
9929            isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
9930   }
9931   case ISD::UNDEF:
9932     // Could be anything.
9933     return false;
9934 
9935   case ISD::BITCAST:
9936     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
9937   case ISD::TRUNCATE: {
9938     // Hack round the mess we make when legalizing extract_vector_elt
9939     if (Op.getValueType() == MVT::i16) {
9940       SDValue TruncSrc = Op.getOperand(0);
9941       if (TruncSrc.getValueType() == MVT::i32 &&
9942           TruncSrc.getOpcode() == ISD::BITCAST &&
9943           TruncSrc.getOperand(0).getValueType() == MVT::v2f16) {
9944         return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1);
9945       }
9946     }
9947     return false;
9948   }
9949   case ISD::INTRINSIC_WO_CHAIN: {
9950     unsigned IntrinsicID
9951       = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
9952     // TODO: Handle more intrinsics
9953     switch (IntrinsicID) {
9954     case Intrinsic::amdgcn_cvt_pkrtz:
9955     case Intrinsic::amdgcn_cubeid:
9956     case Intrinsic::amdgcn_frexp_mant:
9957     case Intrinsic::amdgcn_fdot2:
9958     case Intrinsic::amdgcn_rcp:
9959     case Intrinsic::amdgcn_rsq:
9960     case Intrinsic::amdgcn_rsq_clamp:
9961     case Intrinsic::amdgcn_rcp_legacy:
9962     case Intrinsic::amdgcn_rsq_legacy:
9963     case Intrinsic::amdgcn_trig_preop:
9964       return true;
9965     default:
9966       break;
9967     }
9968 
9969     LLVM_FALLTHROUGH;
9970   }
9971   default:
9972     return denormalsEnabledForType(DAG, Op.getValueType()) &&
9973            DAG.isKnownNeverSNaN(Op);
9974   }
9975 
9976   llvm_unreachable("invalid operation");
9977 }
9978 
9979 bool SITargetLowering::isCanonicalized(Register Reg, MachineFunction &MF,
9980                                        unsigned MaxDepth) const {
9981   MachineRegisterInfo &MRI = MF.getRegInfo();
9982   MachineInstr *MI = MRI.getVRegDef(Reg);
9983   unsigned Opcode = MI->getOpcode();
9984 
9985   if (Opcode == AMDGPU::G_FCANONICALIZE)
9986     return true;
9987 
9988   Optional<FPValueAndVReg> FCR;
9989   // Constant splat (can be padded with undef) or scalar constant.
9990   if (mi_match(Reg, MRI, MIPatternMatch::m_GFCstOrSplat(FCR))) {
9991     if (FCR->Value.isSignaling())
9992       return false;
9993     return !FCR->Value.isDenormal() ||
9994            denormalsEnabledForType(MRI.getType(FCR->VReg), MF);
9995   }
9996 
9997   if (MaxDepth == 0)
9998     return false;
9999 
10000   switch (Opcode) {
10001   case AMDGPU::G_FMINNUM_IEEE:
10002   case AMDGPU::G_FMAXNUM_IEEE: {
10003     if (Subtarget->supportsMinMaxDenormModes() ||
10004         denormalsEnabledForType(MRI.getType(Reg), MF))
10005       return true;
10006     for (const MachineOperand &MO : llvm::drop_begin(MI->operands()))
10007       if (!isCanonicalized(MO.getReg(), MF, MaxDepth - 1))
10008         return false;
10009     return true;
10010   }
10011   default:
10012     return denormalsEnabledForType(MRI.getType(Reg), MF) &&
10013            isKnownNeverSNaN(Reg, MRI);
10014   }
10015 
10016   llvm_unreachable("invalid operation");
10017 }
10018 
10019 // Constant fold canonicalize.
10020 SDValue SITargetLowering::getCanonicalConstantFP(
10021   SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const {
10022   // Flush denormals to 0 if not enabled.
10023   if (C.isDenormal() && !denormalsEnabledForType(DAG, VT))
10024     return DAG.getConstantFP(0.0, SL, VT);
10025 
10026   if (C.isNaN()) {
10027     APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
10028     if (C.isSignaling()) {
10029       // Quiet a signaling NaN.
10030       // FIXME: Is this supposed to preserve payload bits?
10031       return DAG.getConstantFP(CanonicalQNaN, SL, VT);
10032     }
10033 
10034     // Make sure it is the canonical NaN bitpattern.
10035     //
10036     // TODO: Can we use -1 as the canonical NaN value since it's an inline
10037     // immediate?
10038     if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
10039       return DAG.getConstantFP(CanonicalQNaN, SL, VT);
10040   }
10041 
10042   // Already canonical.
10043   return DAG.getConstantFP(C, SL, VT);
10044 }
10045 
10046 static bool vectorEltWillFoldAway(SDValue Op) {
10047   return Op.isUndef() || isa<ConstantFPSDNode>(Op);
10048 }
10049 
10050 SDValue SITargetLowering::performFCanonicalizeCombine(
10051   SDNode *N,
10052   DAGCombinerInfo &DCI) const {
10053   SelectionDAG &DAG = DCI.DAG;
10054   SDValue N0 = N->getOperand(0);
10055   EVT VT = N->getValueType(0);
10056 
10057   // fcanonicalize undef -> qnan
10058   if (N0.isUndef()) {
10059     APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT));
10060     return DAG.getConstantFP(QNaN, SDLoc(N), VT);
10061   }
10062 
10063   if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) {
10064     EVT VT = N->getValueType(0);
10065     return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF());
10066   }
10067 
10068   // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x),
10069   //                                                   (fcanonicalize k)
10070   //
10071   // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0
10072 
10073   // TODO: This could be better with wider vectors that will be split to v2f16,
10074   // and to consider uses since there aren't that many packed operations.
10075   if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 &&
10076       isTypeLegal(MVT::v2f16)) {
10077     SDLoc SL(N);
10078     SDValue NewElts[2];
10079     SDValue Lo = N0.getOperand(0);
10080     SDValue Hi = N0.getOperand(1);
10081     EVT EltVT = Lo.getValueType();
10082 
10083     if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) {
10084       for (unsigned I = 0; I != 2; ++I) {
10085         SDValue Op = N0.getOperand(I);
10086         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
10087           NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT,
10088                                               CFP->getValueAPF());
10089         } else if (Op.isUndef()) {
10090           // Handled below based on what the other operand is.
10091           NewElts[I] = Op;
10092         } else {
10093           NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op);
10094         }
10095       }
10096 
10097       // If one half is undef, and one is constant, prefer a splat vector rather
10098       // than the normal qNaN. If it's a register, prefer 0.0 since that's
10099       // cheaper to use and may be free with a packed operation.
10100       if (NewElts[0].isUndef()) {
10101         if (isa<ConstantFPSDNode>(NewElts[1]))
10102           NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ?
10103             NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT);
10104       }
10105 
10106       if (NewElts[1].isUndef()) {
10107         NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ?
10108           NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT);
10109       }
10110 
10111       return DAG.getBuildVector(VT, SL, NewElts);
10112     }
10113   }
10114 
10115   unsigned SrcOpc = N0.getOpcode();
10116 
10117   // If it's free to do so, push canonicalizes further up the source, which may
10118   // find a canonical source.
10119   //
10120   // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for
10121   // sNaNs.
10122   if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) {
10123     auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
10124     if (CRHS && N0.hasOneUse()) {
10125       SDLoc SL(N);
10126       SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT,
10127                                    N0.getOperand(0));
10128       SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF());
10129       DCI.AddToWorklist(Canon0.getNode());
10130 
10131       return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1);
10132     }
10133   }
10134 
10135   return isCanonicalized(DAG, N0) ? N0 : SDValue();
10136 }
10137 
10138 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
10139   switch (Opc) {
10140   case ISD::FMAXNUM:
10141   case ISD::FMAXNUM_IEEE:
10142     return AMDGPUISD::FMAX3;
10143   case ISD::SMAX:
10144     return AMDGPUISD::SMAX3;
10145   case ISD::UMAX:
10146     return AMDGPUISD::UMAX3;
10147   case ISD::FMINNUM:
10148   case ISD::FMINNUM_IEEE:
10149     return AMDGPUISD::FMIN3;
10150   case ISD::SMIN:
10151     return AMDGPUISD::SMIN3;
10152   case ISD::UMIN:
10153     return AMDGPUISD::UMIN3;
10154   default:
10155     llvm_unreachable("Not a min/max opcode");
10156   }
10157 }
10158 
10159 SDValue SITargetLowering::performIntMed3ImmCombine(
10160   SelectionDAG &DAG, const SDLoc &SL,
10161   SDValue Op0, SDValue Op1, bool Signed) const {
10162   ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
10163   if (!K1)
10164     return SDValue();
10165 
10166   ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
10167   if (!K0)
10168     return SDValue();
10169 
10170   if (Signed) {
10171     if (K0->getAPIntValue().sge(K1->getAPIntValue()))
10172       return SDValue();
10173   } else {
10174     if (K0->getAPIntValue().uge(K1->getAPIntValue()))
10175       return SDValue();
10176   }
10177 
10178   EVT VT = K0->getValueType(0);
10179   unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
10180   if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
10181     return DAG.getNode(Med3Opc, SL, VT,
10182                        Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
10183   }
10184 
10185   // If there isn't a 16-bit med3 operation, convert to 32-bit.
10186   if (VT == MVT::i16) {
10187     MVT NVT = MVT::i32;
10188     unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
10189 
10190     SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
10191     SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
10192     SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
10193 
10194     SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
10195     return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
10196   }
10197 
10198   return SDValue();
10199 }
10200 
10201 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
10202   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
10203     return C;
10204 
10205   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
10206     if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
10207       return C;
10208   }
10209 
10210   return nullptr;
10211 }
10212 
10213 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
10214                                                   const SDLoc &SL,
10215                                                   SDValue Op0,
10216                                                   SDValue Op1) const {
10217   ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
10218   if (!K1)
10219     return SDValue();
10220 
10221   ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
10222   if (!K0)
10223     return SDValue();
10224 
10225   // Ordered >= (although NaN inputs should have folded away by now).
10226   if (K0->getValueAPF() > K1->getValueAPF())
10227     return SDValue();
10228 
10229   const MachineFunction &MF = DAG.getMachineFunction();
10230   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10231 
10232   // TODO: Check IEEE bit enabled?
10233   EVT VT = Op0.getValueType();
10234   if (Info->getMode().DX10Clamp) {
10235     // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
10236     // hardware fmed3 behavior converting to a min.
10237     // FIXME: Should this be allowing -0.0?
10238     if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
10239       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
10240   }
10241 
10242   // med3 for f16 is only available on gfx9+, and not available for v2f16.
10243   if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
10244     // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
10245     // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
10246     // then give the other result, which is different from med3 with a NaN
10247     // input.
10248     SDValue Var = Op0.getOperand(0);
10249     if (!DAG.isKnownNeverSNaN(Var))
10250       return SDValue();
10251 
10252     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10253 
10254     if ((!K0->hasOneUse() ||
10255          TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) &&
10256         (!K1->hasOneUse() ||
10257          TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) {
10258       return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
10259                          Var, SDValue(K0, 0), SDValue(K1, 0));
10260     }
10261   }
10262 
10263   return SDValue();
10264 }
10265 
10266 SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
10267                                                DAGCombinerInfo &DCI) const {
10268   SelectionDAG &DAG = DCI.DAG;
10269 
10270   EVT VT = N->getValueType(0);
10271   unsigned Opc = N->getOpcode();
10272   SDValue Op0 = N->getOperand(0);
10273   SDValue Op1 = N->getOperand(1);
10274 
10275   // Only do this if the inner op has one use since this will just increases
10276   // register pressure for no benefit.
10277 
10278   if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
10279       !VT.isVector() &&
10280       (VT == MVT::i32 || VT == MVT::f32 ||
10281        ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) {
10282     // max(max(a, b), c) -> max3(a, b, c)
10283     // min(min(a, b), c) -> min3(a, b, c)
10284     if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
10285       SDLoc DL(N);
10286       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
10287                          DL,
10288                          N->getValueType(0),
10289                          Op0.getOperand(0),
10290                          Op0.getOperand(1),
10291                          Op1);
10292     }
10293 
10294     // Try commuted.
10295     // max(a, max(b, c)) -> max3(a, b, c)
10296     // min(a, min(b, c)) -> min3(a, b, c)
10297     if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
10298       SDLoc DL(N);
10299       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
10300                          DL,
10301                          N->getValueType(0),
10302                          Op0,
10303                          Op1.getOperand(0),
10304                          Op1.getOperand(1));
10305     }
10306   }
10307 
10308   // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
10309   if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
10310     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
10311       return Med3;
10312   }
10313 
10314   if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
10315     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
10316       return Med3;
10317   }
10318 
10319   // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
10320   if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
10321        (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) ||
10322        (Opc == AMDGPUISD::FMIN_LEGACY &&
10323         Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
10324       (VT == MVT::f32 || VT == MVT::f64 ||
10325        (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
10326        (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
10327       Op0.hasOneUse()) {
10328     if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
10329       return Res;
10330   }
10331 
10332   return SDValue();
10333 }
10334 
10335 static bool isClampZeroToOne(SDValue A, SDValue B) {
10336   if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
10337     if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
10338       // FIXME: Should this be allowing -0.0?
10339       return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
10340              (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
10341     }
10342   }
10343 
10344   return false;
10345 }
10346 
10347 // FIXME: Should only worry about snans for version with chain.
10348 SDValue SITargetLowering::performFMed3Combine(SDNode *N,
10349                                               DAGCombinerInfo &DCI) const {
10350   EVT VT = N->getValueType(0);
10351   // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
10352   // NaNs. With a NaN input, the order of the operands may change the result.
10353 
10354   SelectionDAG &DAG = DCI.DAG;
10355   SDLoc SL(N);
10356 
10357   SDValue Src0 = N->getOperand(0);
10358   SDValue Src1 = N->getOperand(1);
10359   SDValue Src2 = N->getOperand(2);
10360 
10361   if (isClampZeroToOne(Src0, Src1)) {
10362     // const_a, const_b, x -> clamp is safe in all cases including signaling
10363     // nans.
10364     // FIXME: Should this be allowing -0.0?
10365     return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
10366   }
10367 
10368   const MachineFunction &MF = DAG.getMachineFunction();
10369   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10370 
10371   // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
10372   // handling no dx10-clamp?
10373   if (Info->getMode().DX10Clamp) {
10374     // If NaNs is clamped to 0, we are free to reorder the inputs.
10375 
10376     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
10377       std::swap(Src0, Src1);
10378 
10379     if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
10380       std::swap(Src1, Src2);
10381 
10382     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
10383       std::swap(Src0, Src1);
10384 
10385     if (isClampZeroToOne(Src1, Src2))
10386       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
10387   }
10388 
10389   return SDValue();
10390 }
10391 
10392 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
10393                                                  DAGCombinerInfo &DCI) const {
10394   SDValue Src0 = N->getOperand(0);
10395   SDValue Src1 = N->getOperand(1);
10396   if (Src0.isUndef() && Src1.isUndef())
10397     return DCI.DAG.getUNDEF(N->getValueType(0));
10398   return SDValue();
10399 }
10400 
10401 // Check if EXTRACT_VECTOR_ELT/INSERT_VECTOR_ELT (<n x e>, var-idx) should be
10402 // expanded into a set of cmp/select instructions.
10403 bool SITargetLowering::shouldExpandVectorDynExt(unsigned EltSize,
10404                                                 unsigned NumElem,
10405                                                 bool IsDivergentIdx) {
10406   if (UseDivergentRegisterIndexing)
10407     return false;
10408 
10409   unsigned VecSize = EltSize * NumElem;
10410 
10411   // Sub-dword vectors of size 2 dword or less have better implementation.
10412   if (VecSize <= 64 && EltSize < 32)
10413     return false;
10414 
10415   // Always expand the rest of sub-dword instructions, otherwise it will be
10416   // lowered via memory.
10417   if (EltSize < 32)
10418     return true;
10419 
10420   // Always do this if var-idx is divergent, otherwise it will become a loop.
10421   if (IsDivergentIdx)
10422     return true;
10423 
10424   // Large vectors would yield too many compares and v_cndmask_b32 instructions.
10425   unsigned NumInsts = NumElem /* Number of compares */ +
10426                       ((EltSize + 31) / 32) * NumElem /* Number of cndmasks */;
10427   return NumInsts <= 16;
10428 }
10429 
10430 static bool shouldExpandVectorDynExt(SDNode *N) {
10431   SDValue Idx = N->getOperand(N->getNumOperands() - 1);
10432   if (isa<ConstantSDNode>(Idx))
10433     return false;
10434 
10435   SDValue Vec = N->getOperand(0);
10436   EVT VecVT = Vec.getValueType();
10437   EVT EltVT = VecVT.getVectorElementType();
10438   unsigned EltSize = EltVT.getSizeInBits();
10439   unsigned NumElem = VecVT.getVectorNumElements();
10440 
10441   return SITargetLowering::shouldExpandVectorDynExt(EltSize, NumElem,
10442                                                     Idx->isDivergent());
10443 }
10444 
10445 SDValue SITargetLowering::performExtractVectorEltCombine(
10446   SDNode *N, DAGCombinerInfo &DCI) const {
10447   SDValue Vec = N->getOperand(0);
10448   SelectionDAG &DAG = DCI.DAG;
10449 
10450   EVT VecVT = Vec.getValueType();
10451   EVT EltVT = VecVT.getVectorElementType();
10452 
10453   if ((Vec.getOpcode() == ISD::FNEG ||
10454        Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
10455     SDLoc SL(N);
10456     EVT EltVT = N->getValueType(0);
10457     SDValue Idx = N->getOperand(1);
10458     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
10459                               Vec.getOperand(0), Idx);
10460     return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
10461   }
10462 
10463   // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
10464   //    =>
10465   // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
10466   // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
10467   // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
10468   if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
10469     SDLoc SL(N);
10470     EVT EltVT = N->getValueType(0);
10471     SDValue Idx = N->getOperand(1);
10472     unsigned Opc = Vec.getOpcode();
10473 
10474     switch(Opc) {
10475     default:
10476       break;
10477       // TODO: Support other binary operations.
10478     case ISD::FADD:
10479     case ISD::FSUB:
10480     case ISD::FMUL:
10481     case ISD::ADD:
10482     case ISD::UMIN:
10483     case ISD::UMAX:
10484     case ISD::SMIN:
10485     case ISD::SMAX:
10486     case ISD::FMAXNUM:
10487     case ISD::FMINNUM:
10488     case ISD::FMAXNUM_IEEE:
10489     case ISD::FMINNUM_IEEE: {
10490       SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
10491                                  Vec.getOperand(0), Idx);
10492       SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
10493                                  Vec.getOperand(1), Idx);
10494 
10495       DCI.AddToWorklist(Elt0.getNode());
10496       DCI.AddToWorklist(Elt1.getNode());
10497       return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags());
10498     }
10499     }
10500   }
10501 
10502   unsigned VecSize = VecVT.getSizeInBits();
10503   unsigned EltSize = EltVT.getSizeInBits();
10504 
10505   // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx)
10506   if (::shouldExpandVectorDynExt(N)) {
10507     SDLoc SL(N);
10508     SDValue Idx = N->getOperand(1);
10509     SDValue V;
10510     for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
10511       SDValue IC = DAG.getVectorIdxConstant(I, SL);
10512       SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
10513       if (I == 0)
10514         V = Elt;
10515       else
10516         V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ);
10517     }
10518     return V;
10519   }
10520 
10521   if (!DCI.isBeforeLegalize())
10522     return SDValue();
10523 
10524   // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
10525   // elements. This exposes more load reduction opportunities by replacing
10526   // multiple small extract_vector_elements with a single 32-bit extract.
10527   auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
10528   if (isa<MemSDNode>(Vec) &&
10529       EltSize <= 16 &&
10530       EltVT.isByteSized() &&
10531       VecSize > 32 &&
10532       VecSize % 32 == 0 &&
10533       Idx) {
10534     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
10535 
10536     unsigned BitIndex = Idx->getZExtValue() * EltSize;
10537     unsigned EltIdx = BitIndex / 32;
10538     unsigned LeftoverBitIdx = BitIndex % 32;
10539     SDLoc SL(N);
10540 
10541     SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
10542     DCI.AddToWorklist(Cast.getNode());
10543 
10544     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
10545                               DAG.getConstant(EltIdx, SL, MVT::i32));
10546     DCI.AddToWorklist(Elt.getNode());
10547     SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
10548                               DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
10549     DCI.AddToWorklist(Srl.getNode());
10550 
10551     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
10552     DCI.AddToWorklist(Trunc.getNode());
10553     return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
10554   }
10555 
10556   return SDValue();
10557 }
10558 
10559 SDValue
10560 SITargetLowering::performInsertVectorEltCombine(SDNode *N,
10561                                                 DAGCombinerInfo &DCI) const {
10562   SDValue Vec = N->getOperand(0);
10563   SDValue Idx = N->getOperand(2);
10564   EVT VecVT = Vec.getValueType();
10565   EVT EltVT = VecVT.getVectorElementType();
10566 
10567   // INSERT_VECTOR_ELT (<n x e>, var-idx)
10568   // => BUILD_VECTOR n x select (e, const-idx)
10569   if (!::shouldExpandVectorDynExt(N))
10570     return SDValue();
10571 
10572   SelectionDAG &DAG = DCI.DAG;
10573   SDLoc SL(N);
10574   SDValue Ins = N->getOperand(1);
10575   EVT IdxVT = Idx.getValueType();
10576 
10577   SmallVector<SDValue, 16> Ops;
10578   for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
10579     SDValue IC = DAG.getConstant(I, SL, IdxVT);
10580     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
10581     SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ);
10582     Ops.push_back(V);
10583   }
10584 
10585   return DAG.getBuildVector(VecVT, SL, Ops);
10586 }
10587 
10588 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
10589                                           const SDNode *N0,
10590                                           const SDNode *N1) const {
10591   EVT VT = N0->getValueType(0);
10592 
10593   // Only do this if we are not trying to support denormals. v_mad_f32 does not
10594   // support denormals ever.
10595   if (((VT == MVT::f32 && !hasFP32Denormals(DAG.getMachineFunction())) ||
10596        (VT == MVT::f16 && !hasFP64FP16Denormals(DAG.getMachineFunction()) &&
10597         getSubtarget()->hasMadF16())) &&
10598        isOperationLegal(ISD::FMAD, VT))
10599     return ISD::FMAD;
10600 
10601   const TargetOptions &Options = DAG.getTarget().Options;
10602   if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
10603        (N0->getFlags().hasAllowContract() &&
10604         N1->getFlags().hasAllowContract())) &&
10605       isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
10606     return ISD::FMA;
10607   }
10608 
10609   return 0;
10610 }
10611 
10612 // For a reassociatable opcode perform:
10613 // op x, (op y, z) -> op (op x, z), y, if x and z are uniform
10614 SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
10615                                                SelectionDAG &DAG) const {
10616   EVT VT = N->getValueType(0);
10617   if (VT != MVT::i32 && VT != MVT::i64)
10618     return SDValue();
10619 
10620   if (DAG.isBaseWithConstantOffset(SDValue(N, 0)))
10621     return SDValue();
10622 
10623   unsigned Opc = N->getOpcode();
10624   SDValue Op0 = N->getOperand(0);
10625   SDValue Op1 = N->getOperand(1);
10626 
10627   if (!(Op0->isDivergent() ^ Op1->isDivergent()))
10628     return SDValue();
10629 
10630   if (Op0->isDivergent())
10631     std::swap(Op0, Op1);
10632 
10633   if (Op1.getOpcode() != Opc || !Op1.hasOneUse())
10634     return SDValue();
10635 
10636   SDValue Op2 = Op1.getOperand(1);
10637   Op1 = Op1.getOperand(0);
10638   if (!(Op1->isDivergent() ^ Op2->isDivergent()))
10639     return SDValue();
10640 
10641   if (Op1->isDivergent())
10642     std::swap(Op1, Op2);
10643 
10644   SDLoc SL(N);
10645   SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1);
10646   return DAG.getNode(Opc, SL, VT, Add1, Op2);
10647 }
10648 
10649 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
10650                            EVT VT,
10651                            SDValue N0, SDValue N1, SDValue N2,
10652                            bool Signed) {
10653   unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
10654   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
10655   SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
10656   return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
10657 }
10658 
10659 // Fold (add (mul x, y), z) --> (mad_[iu]64_[iu]32 x, y, z) plus high
10660 // multiplies, if any.
10661 //
10662 // Full 64-bit multiplies that feed into an addition are lowered here instead
10663 // of using the generic expansion. The generic expansion ends up with
10664 // a tree of ADD nodes that prevents us from using the "add" part of the
10665 // MAD instruction. The expansion produced here results in a chain of ADDs
10666 // instead of a tree.
10667 SDValue SITargetLowering::tryFoldToMad64_32(SDNode *N,
10668                                             DAGCombinerInfo &DCI) const {
10669   assert(N->getOpcode() == ISD::ADD);
10670 
10671   SelectionDAG &DAG = DCI.DAG;
10672   EVT VT = N->getValueType(0);
10673   SDLoc SL(N);
10674   SDValue LHS = N->getOperand(0);
10675   SDValue RHS = N->getOperand(1);
10676 
10677   if (VT.isVector())
10678     return SDValue();
10679 
10680   // S_MUL_HI_[IU]32 was added in gfx9, which allows us to keep the overall
10681   // result in scalar registers for uniform values.
10682   if (!N->isDivergent() && Subtarget->hasSMulHi())
10683     return SDValue();
10684 
10685   unsigned NumBits = VT.getScalarSizeInBits();
10686   if (NumBits <= 32 || NumBits > 64)
10687     return SDValue();
10688 
10689   if (LHS.getOpcode() != ISD::MUL) {
10690     assert(RHS.getOpcode() == ISD::MUL);
10691     std::swap(LHS, RHS);
10692   }
10693 
10694   // Avoid the fold if it would unduly increase the number of multiplies due to
10695   // multiple uses, except on hardware with full-rate multiply-add (which is
10696   // part of full-rate 64-bit ops).
10697   if (!Subtarget->hasFullRate64Ops()) {
10698     unsigned NumUsers = 0;
10699     for (SDNode *Use : LHS->uses()) {
10700       // There is a use that does not feed into addition, so the multiply can't
10701       // be removed. We prefer MUL + ADD + ADDC over MAD + MUL.
10702       if (Use->getOpcode() != ISD::ADD)
10703         return SDValue();
10704 
10705       // We prefer 2xMAD over MUL + 2xADD + 2xADDC (code density), and prefer
10706       // MUL + 3xADD + 3xADDC over 3xMAD.
10707       ++NumUsers;
10708       if (NumUsers >= 3)
10709         return SDValue();
10710     }
10711   }
10712 
10713   SDValue MulLHS = LHS.getOperand(0);
10714   SDValue MulRHS = LHS.getOperand(1);
10715   SDValue AddRHS = RHS;
10716 
10717   // Always check whether operands are small unsigned values, since that
10718   // knowledge is useful in more cases. Check for small signed values only if
10719   // doing so can unlock a shorter code sequence.
10720   bool MulLHSUnsigned32 = numBitsUnsigned(MulLHS, DAG) <= 32;
10721   bool MulRHSUnsigned32 = numBitsUnsigned(MulRHS, DAG) <= 32;
10722 
10723   bool MulSignedLo = false;
10724   if (!MulLHSUnsigned32 || !MulRHSUnsigned32) {
10725     MulSignedLo = numBitsSigned(MulLHS, DAG) <= 32 &&
10726                   numBitsSigned(MulRHS, DAG) <= 32;
10727   }
10728 
10729   // The operands and final result all have the same number of bits. If
10730   // operands need to be extended, they can be extended with garbage. The
10731   // resulting garbage in the high bits of the mad_[iu]64_[iu]32 result is
10732   // truncated away in the end.
10733   if (VT != MVT::i64) {
10734     MulLHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i64, MulLHS);
10735     MulRHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i64, MulRHS);
10736     AddRHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i64, AddRHS);
10737   }
10738 
10739   // The basic code generated is conceptually straightforward. Pseudo code:
10740   //
10741   //   accum = mad_64_32 lhs.lo, rhs.lo, accum
10742   //   accum.hi = add (mul lhs.hi, rhs.lo), accum.hi
10743   //   accum.hi = add (mul lhs.lo, rhs.hi), accum.hi
10744   //
10745   // The second and third lines are optional, depending on whether the factors
10746   // are {sign,zero}-extended or not.
10747   //
10748   // The actual DAG is noisier than the pseudo code, but only due to
10749   // instructions that disassemble values into low and high parts, and
10750   // assemble the final result.
10751   SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
10752   SDValue One = DAG.getConstant(1, SL, MVT::i32);
10753 
10754   auto MulLHSLo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, MulLHS);
10755   auto MulRHSLo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, MulRHS);
10756   SDValue Accum =
10757       getMad64_32(DAG, SL, MVT::i64, MulLHSLo, MulRHSLo, AddRHS, MulSignedLo);
10758 
10759   if (!MulSignedLo && (!MulLHSUnsigned32 || !MulRHSUnsigned32)) {
10760     auto AccumLo = DAG.getNode(ISD::EXTRACT_ELEMENT, SL, MVT::i32, Accum, Zero);
10761     auto AccumHi = DAG.getNode(ISD::EXTRACT_ELEMENT, SL, MVT::i32, Accum, One);
10762 
10763     if (!MulLHSUnsigned32) {
10764       auto MulLHSHi =
10765           DAG.getNode(ISD::EXTRACT_ELEMENT, SL, MVT::i32, MulLHS, One);
10766       SDValue MulHi = DAG.getNode(ISD::MUL, SL, MVT::i32, MulLHSHi, MulRHSLo);
10767       AccumHi = DAG.getNode(ISD::ADD, SL, MVT::i32, MulHi, AccumHi);
10768     }
10769 
10770     if (!MulRHSUnsigned32) {
10771       auto MulRHSHi =
10772           DAG.getNode(ISD::EXTRACT_ELEMENT, SL, MVT::i32, MulRHS, One);
10773       SDValue MulHi = DAG.getNode(ISD::MUL, SL, MVT::i32, MulLHSLo, MulRHSHi);
10774       AccumHi = DAG.getNode(ISD::ADD, SL, MVT::i32, MulHi, AccumHi);
10775     }
10776 
10777     Accum = DAG.getBuildVector(MVT::v2i32, SL, {AccumLo, AccumHi});
10778     Accum = DAG.getBitcast(MVT::i64, Accum);
10779   }
10780 
10781   if (VT != MVT::i64)
10782     Accum = DAG.getNode(ISD::TRUNCATE, SL, VT, Accum);
10783   return Accum;
10784 }
10785 
10786 SDValue SITargetLowering::performAddCombine(SDNode *N,
10787                                             DAGCombinerInfo &DCI) const {
10788   SelectionDAG &DAG = DCI.DAG;
10789   EVT VT = N->getValueType(0);
10790   SDLoc SL(N);
10791   SDValue LHS = N->getOperand(0);
10792   SDValue RHS = N->getOperand(1);
10793 
10794   if (LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL) {
10795     if (Subtarget->hasMad64_32()) {
10796       if (SDValue Folded = tryFoldToMad64_32(N, DCI))
10797         return Folded;
10798     }
10799 
10800     return SDValue();
10801   }
10802 
10803   if (SDValue V = reassociateScalarOps(N, DAG)) {
10804     return V;
10805   }
10806 
10807   if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
10808     return SDValue();
10809 
10810   // add x, zext (setcc) => addcarry x, 0, setcc
10811   // add x, sext (setcc) => subcarry x, 0, setcc
10812   unsigned Opc = LHS.getOpcode();
10813   if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
10814       Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
10815     std::swap(RHS, LHS);
10816 
10817   Opc = RHS.getOpcode();
10818   switch (Opc) {
10819   default: break;
10820   case ISD::ZERO_EXTEND:
10821   case ISD::SIGN_EXTEND:
10822   case ISD::ANY_EXTEND: {
10823     auto Cond = RHS.getOperand(0);
10824     // If this won't be a real VOPC output, we would still need to insert an
10825     // extra instruction anyway.
10826     if (!isBoolSGPR(Cond))
10827       break;
10828     SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
10829     SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
10830     Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
10831     return DAG.getNode(Opc, SL, VTList, Args);
10832   }
10833   case ISD::ADDCARRY: {
10834     // add x, (addcarry y, 0, cc) => addcarry x, y, cc
10835     auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
10836     if (!C || C->getZExtValue() != 0) break;
10837     SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
10838     return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
10839   }
10840   }
10841   return SDValue();
10842 }
10843 
10844 SDValue SITargetLowering::performSubCombine(SDNode *N,
10845                                             DAGCombinerInfo &DCI) const {
10846   SelectionDAG &DAG = DCI.DAG;
10847   EVT VT = N->getValueType(0);
10848 
10849   if (VT != MVT::i32)
10850     return SDValue();
10851 
10852   SDLoc SL(N);
10853   SDValue LHS = N->getOperand(0);
10854   SDValue RHS = N->getOperand(1);
10855 
10856   // sub x, zext (setcc) => subcarry x, 0, setcc
10857   // sub x, sext (setcc) => addcarry x, 0, setcc
10858   unsigned Opc = RHS.getOpcode();
10859   switch (Opc) {
10860   default: break;
10861   case ISD::ZERO_EXTEND:
10862   case ISD::SIGN_EXTEND:
10863   case ISD::ANY_EXTEND: {
10864     auto Cond = RHS.getOperand(0);
10865     // If this won't be a real VOPC output, we would still need to insert an
10866     // extra instruction anyway.
10867     if (!isBoolSGPR(Cond))
10868       break;
10869     SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
10870     SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
10871     Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::ADDCARRY : ISD::SUBCARRY;
10872     return DAG.getNode(Opc, SL, VTList, Args);
10873   }
10874   }
10875 
10876   if (LHS.getOpcode() == ISD::SUBCARRY) {
10877     // sub (subcarry x, 0, cc), y => subcarry x, y, cc
10878     auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
10879     if (!C || !C->isZero())
10880       return SDValue();
10881     SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
10882     return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
10883   }
10884   return SDValue();
10885 }
10886 
10887 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
10888   DAGCombinerInfo &DCI) const {
10889 
10890   if (N->getValueType(0) != MVT::i32)
10891     return SDValue();
10892 
10893   auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
10894   if (!C || C->getZExtValue() != 0)
10895     return SDValue();
10896 
10897   SelectionDAG &DAG = DCI.DAG;
10898   SDValue LHS = N->getOperand(0);
10899 
10900   // addcarry (add x, y), 0, cc => addcarry x, y, cc
10901   // subcarry (sub x, y), 0, cc => subcarry x, y, cc
10902   unsigned LHSOpc = LHS.getOpcode();
10903   unsigned Opc = N->getOpcode();
10904   if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
10905       (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
10906     SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
10907     return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
10908   }
10909   return SDValue();
10910 }
10911 
10912 SDValue SITargetLowering::performFAddCombine(SDNode *N,
10913                                              DAGCombinerInfo &DCI) const {
10914   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
10915     return SDValue();
10916 
10917   SelectionDAG &DAG = DCI.DAG;
10918   EVT VT = N->getValueType(0);
10919 
10920   SDLoc SL(N);
10921   SDValue LHS = N->getOperand(0);
10922   SDValue RHS = N->getOperand(1);
10923 
10924   // These should really be instruction patterns, but writing patterns with
10925   // source modifiers is a pain.
10926 
10927   // fadd (fadd (a, a), b) -> mad 2.0, a, b
10928   if (LHS.getOpcode() == ISD::FADD) {
10929     SDValue A = LHS.getOperand(0);
10930     if (A == LHS.getOperand(1)) {
10931       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
10932       if (FusedOp != 0) {
10933         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
10934         return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
10935       }
10936     }
10937   }
10938 
10939   // fadd (b, fadd (a, a)) -> mad 2.0, a, b
10940   if (RHS.getOpcode() == ISD::FADD) {
10941     SDValue A = RHS.getOperand(0);
10942     if (A == RHS.getOperand(1)) {
10943       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
10944       if (FusedOp != 0) {
10945         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
10946         return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
10947       }
10948     }
10949   }
10950 
10951   return SDValue();
10952 }
10953 
10954 SDValue SITargetLowering::performFSubCombine(SDNode *N,
10955                                              DAGCombinerInfo &DCI) const {
10956   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
10957     return SDValue();
10958 
10959   SelectionDAG &DAG = DCI.DAG;
10960   SDLoc SL(N);
10961   EVT VT = N->getValueType(0);
10962   assert(!VT.isVector());
10963 
10964   // Try to get the fneg to fold into the source modifier. This undoes generic
10965   // DAG combines and folds them into the mad.
10966   //
10967   // Only do this if we are not trying to support denormals. v_mad_f32 does
10968   // not support denormals ever.
10969   SDValue LHS = N->getOperand(0);
10970   SDValue RHS = N->getOperand(1);
10971   if (LHS.getOpcode() == ISD::FADD) {
10972     // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
10973     SDValue A = LHS.getOperand(0);
10974     if (A == LHS.getOperand(1)) {
10975       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
10976       if (FusedOp != 0){
10977         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
10978         SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
10979 
10980         return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
10981       }
10982     }
10983   }
10984 
10985   if (RHS.getOpcode() == ISD::FADD) {
10986     // (fsub c, (fadd a, a)) -> mad -2.0, a, c
10987 
10988     SDValue A = RHS.getOperand(0);
10989     if (A == RHS.getOperand(1)) {
10990       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
10991       if (FusedOp != 0){
10992         const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
10993         return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
10994       }
10995     }
10996   }
10997 
10998   return SDValue();
10999 }
11000 
11001 SDValue SITargetLowering::performFMACombine(SDNode *N,
11002                                             DAGCombinerInfo &DCI) const {
11003   SelectionDAG &DAG = DCI.DAG;
11004   EVT VT = N->getValueType(0);
11005   SDLoc SL(N);
11006 
11007   if (!Subtarget->hasDot7Insts() || VT != MVT::f32)
11008     return SDValue();
11009 
11010   // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) ->
11011   //   FDOT2((V2F16)S0, (V2F16)S1, (F32)z))
11012   SDValue Op1 = N->getOperand(0);
11013   SDValue Op2 = N->getOperand(1);
11014   SDValue FMA = N->getOperand(2);
11015 
11016   if (FMA.getOpcode() != ISD::FMA ||
11017       Op1.getOpcode() != ISD::FP_EXTEND ||
11018       Op2.getOpcode() != ISD::FP_EXTEND)
11019     return SDValue();
11020 
11021   // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
11022   // regardless of the denorm mode setting. Therefore,
11023   // unsafe-fp-math/fp-contract is sufficient to allow generating fdot2.
11024   const TargetOptions &Options = DAG.getTarget().Options;
11025   if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
11026       (N->getFlags().hasAllowContract() &&
11027        FMA->getFlags().hasAllowContract())) {
11028     Op1 = Op1.getOperand(0);
11029     Op2 = Op2.getOperand(0);
11030     if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
11031         Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11032       return SDValue();
11033 
11034     SDValue Vec1 = Op1.getOperand(0);
11035     SDValue Idx1 = Op1.getOperand(1);
11036     SDValue Vec2 = Op2.getOperand(0);
11037 
11038     SDValue FMAOp1 = FMA.getOperand(0);
11039     SDValue FMAOp2 = FMA.getOperand(1);
11040     SDValue FMAAcc = FMA.getOperand(2);
11041 
11042     if (FMAOp1.getOpcode() != ISD::FP_EXTEND ||
11043         FMAOp2.getOpcode() != ISD::FP_EXTEND)
11044       return SDValue();
11045 
11046     FMAOp1 = FMAOp1.getOperand(0);
11047     FMAOp2 = FMAOp2.getOperand(0);
11048     if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
11049         FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11050       return SDValue();
11051 
11052     SDValue Vec3 = FMAOp1.getOperand(0);
11053     SDValue Vec4 = FMAOp2.getOperand(0);
11054     SDValue Idx2 = FMAOp1.getOperand(1);
11055 
11056     if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) ||
11057         // Idx1 and Idx2 cannot be the same.
11058         Idx1 == Idx2)
11059       return SDValue();
11060 
11061     if (Vec1 == Vec2 || Vec3 == Vec4)
11062       return SDValue();
11063 
11064     if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16)
11065       return SDValue();
11066 
11067     if ((Vec1 == Vec3 && Vec2 == Vec4) ||
11068         (Vec1 == Vec4 && Vec2 == Vec3)) {
11069       return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc,
11070                          DAG.getTargetConstant(0, SL, MVT::i1));
11071     }
11072   }
11073   return SDValue();
11074 }
11075 
11076 SDValue SITargetLowering::performSetCCCombine(SDNode *N,
11077                                               DAGCombinerInfo &DCI) const {
11078   SelectionDAG &DAG = DCI.DAG;
11079   SDLoc SL(N);
11080 
11081   SDValue LHS = N->getOperand(0);
11082   SDValue RHS = N->getOperand(1);
11083   EVT VT = LHS.getValueType();
11084   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
11085 
11086   auto CRHS = dyn_cast<ConstantSDNode>(RHS);
11087   if (!CRHS) {
11088     CRHS = dyn_cast<ConstantSDNode>(LHS);
11089     if (CRHS) {
11090       std::swap(LHS, RHS);
11091       CC = getSetCCSwappedOperands(CC);
11092     }
11093   }
11094 
11095   if (CRHS) {
11096     if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
11097         isBoolSGPR(LHS.getOperand(0))) {
11098       // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
11099       // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
11100       // setcc (sext from i1 cc),  0, eq|sge|ule) => not cc => xor cc, -1
11101       // setcc (sext from i1 cc),  0, ne|ugt|slt) => cc
11102       if ((CRHS->isAllOnes() &&
11103            (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
11104           (CRHS->isZero() &&
11105            (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
11106         return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
11107                            DAG.getConstant(-1, SL, MVT::i1));
11108       if ((CRHS->isAllOnes() &&
11109            (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
11110           (CRHS->isZero() &&
11111            (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
11112         return LHS.getOperand(0);
11113     }
11114 
11115     const APInt &CRHSVal = CRHS->getAPIntValue();
11116     if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
11117         LHS.getOpcode() == ISD::SELECT &&
11118         isa<ConstantSDNode>(LHS.getOperand(1)) &&
11119         isa<ConstantSDNode>(LHS.getOperand(2)) &&
11120         LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) &&
11121         isBoolSGPR(LHS.getOperand(0))) {
11122       // Given CT != FT:
11123       // setcc (select cc, CT, CF), CF, eq => xor cc, -1
11124       // setcc (select cc, CT, CF), CF, ne => cc
11125       // setcc (select cc, CT, CF), CT, ne => xor cc, -1
11126       // setcc (select cc, CT, CF), CT, eq => cc
11127       const APInt &CT = LHS.getConstantOperandAPInt(1);
11128       const APInt &CF = LHS.getConstantOperandAPInt(2);
11129 
11130       if ((CF == CRHSVal && CC == ISD::SETEQ) ||
11131           (CT == CRHSVal && CC == ISD::SETNE))
11132         return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
11133                            DAG.getConstant(-1, SL, MVT::i1));
11134       if ((CF == CRHSVal && CC == ISD::SETNE) ||
11135           (CT == CRHSVal && CC == ISD::SETEQ))
11136         return LHS.getOperand(0);
11137     }
11138   }
11139 
11140   if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
11141                                            VT != MVT::f16))
11142     return SDValue();
11143 
11144   // Match isinf/isfinite pattern
11145   // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
11146   // (fcmp one (fabs x), inf) -> (fp_class x,
11147   // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero)
11148   if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) {
11149     const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
11150     if (!CRHS)
11151       return SDValue();
11152 
11153     const APFloat &APF = CRHS->getValueAPF();
11154     if (APF.isInfinity() && !APF.isNegative()) {
11155       const unsigned IsInfMask = SIInstrFlags::P_INFINITY |
11156                                  SIInstrFlags::N_INFINITY;
11157       const unsigned IsFiniteMask = SIInstrFlags::N_ZERO |
11158                                     SIInstrFlags::P_ZERO |
11159                                     SIInstrFlags::N_NORMAL |
11160                                     SIInstrFlags::P_NORMAL |
11161                                     SIInstrFlags::N_SUBNORMAL |
11162                                     SIInstrFlags::P_SUBNORMAL;
11163       unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask;
11164       return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
11165                          DAG.getConstant(Mask, SL, MVT::i32));
11166     }
11167   }
11168 
11169   return SDValue();
11170 }
11171 
11172 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
11173                                                      DAGCombinerInfo &DCI) const {
11174   SelectionDAG &DAG = DCI.DAG;
11175   SDLoc SL(N);
11176   unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
11177 
11178   SDValue Src = N->getOperand(0);
11179   SDValue Shift = N->getOperand(0);
11180 
11181   // TODO: Extend type shouldn't matter (assuming legal types).
11182   if (Shift.getOpcode() == ISD::ZERO_EXTEND)
11183     Shift = Shift.getOperand(0);
11184 
11185   if (Shift.getOpcode() == ISD::SRL || Shift.getOpcode() == ISD::SHL) {
11186     // cvt_f32_ubyte1 (shl x,  8) -> cvt_f32_ubyte0 x
11187     // cvt_f32_ubyte3 (shl x, 16) -> cvt_f32_ubyte1 x
11188     // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
11189     // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
11190     // cvt_f32_ubyte0 (srl x,  8) -> cvt_f32_ubyte1 x
11191     if (auto *C = dyn_cast<ConstantSDNode>(Shift.getOperand(1))) {
11192       SDValue Shifted = DAG.getZExtOrTrunc(Shift.getOperand(0),
11193                                  SDLoc(Shift.getOperand(0)), MVT::i32);
11194 
11195       unsigned ShiftOffset = 8 * Offset;
11196       if (Shift.getOpcode() == ISD::SHL)
11197         ShiftOffset -= C->getZExtValue();
11198       else
11199         ShiftOffset += C->getZExtValue();
11200 
11201       if (ShiftOffset < 32 && (ShiftOffset % 8) == 0) {
11202         return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + ShiftOffset / 8, SL,
11203                            MVT::f32, Shifted);
11204       }
11205     }
11206   }
11207 
11208   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11209   APInt DemandedBits = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
11210   if (TLI.SimplifyDemandedBits(Src, DemandedBits, DCI)) {
11211     // We simplified Src. If this node is not dead, visit it again so it is
11212     // folded properly.
11213     if (N->getOpcode() != ISD::DELETED_NODE)
11214       DCI.AddToWorklist(N);
11215     return SDValue(N, 0);
11216   }
11217 
11218   // Handle (or x, (srl y, 8)) pattern when known bits are zero.
11219   if (SDValue DemandedSrc =
11220           TLI.SimplifyMultipleUseDemandedBits(Src, DemandedBits, DAG))
11221     return DAG.getNode(N->getOpcode(), SL, MVT::f32, DemandedSrc);
11222 
11223   return SDValue();
11224 }
11225 
11226 SDValue SITargetLowering::performClampCombine(SDNode *N,
11227                                               DAGCombinerInfo &DCI) const {
11228   ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
11229   if (!CSrc)
11230     return SDValue();
11231 
11232   const MachineFunction &MF = DCI.DAG.getMachineFunction();
11233   const APFloat &F = CSrc->getValueAPF();
11234   APFloat Zero = APFloat::getZero(F.getSemantics());
11235   if (F < Zero ||
11236       (F.isNaN() && MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) {
11237     return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
11238   }
11239 
11240   APFloat One(F.getSemantics(), "1.0");
11241   if (F > One)
11242     return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
11243 
11244   return SDValue(CSrc, 0);
11245 }
11246 
11247 
11248 SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
11249                                             DAGCombinerInfo &DCI) const {
11250   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
11251     return SDValue();
11252   switch (N->getOpcode()) {
11253   case ISD::ADD:
11254     return performAddCombine(N, DCI);
11255   case ISD::SUB:
11256     return performSubCombine(N, DCI);
11257   case ISD::ADDCARRY:
11258   case ISD::SUBCARRY:
11259     return performAddCarrySubCarryCombine(N, DCI);
11260   case ISD::FADD:
11261     return performFAddCombine(N, DCI);
11262   case ISD::FSUB:
11263     return performFSubCombine(N, DCI);
11264   case ISD::SETCC:
11265     return performSetCCCombine(N, DCI);
11266   case ISD::FMAXNUM:
11267   case ISD::FMINNUM:
11268   case ISD::FMAXNUM_IEEE:
11269   case ISD::FMINNUM_IEEE:
11270   case ISD::SMAX:
11271   case ISD::SMIN:
11272   case ISD::UMAX:
11273   case ISD::UMIN:
11274   case AMDGPUISD::FMIN_LEGACY:
11275   case AMDGPUISD::FMAX_LEGACY:
11276     return performMinMaxCombine(N, DCI);
11277   case ISD::FMA:
11278     return performFMACombine(N, DCI);
11279   case ISD::AND:
11280     return performAndCombine(N, DCI);
11281   case ISD::OR:
11282     return performOrCombine(N, DCI);
11283   case ISD::XOR:
11284     return performXorCombine(N, DCI);
11285   case ISD::ZERO_EXTEND:
11286     return performZeroExtendCombine(N, DCI);
11287   case ISD::SIGN_EXTEND_INREG:
11288     return performSignExtendInRegCombine(N , DCI);
11289   case AMDGPUISD::FP_CLASS:
11290     return performClassCombine(N, DCI);
11291   case ISD::FCANONICALIZE:
11292     return performFCanonicalizeCombine(N, DCI);
11293   case AMDGPUISD::RCP:
11294     return performRcpCombine(N, DCI);
11295   case AMDGPUISD::FRACT:
11296   case AMDGPUISD::RSQ:
11297   case AMDGPUISD::RCP_LEGACY:
11298   case AMDGPUISD::RCP_IFLAG:
11299   case AMDGPUISD::RSQ_CLAMP:
11300   case AMDGPUISD::LDEXP: {
11301     // FIXME: This is probably wrong. If src is an sNaN, it won't be quieted
11302     SDValue Src = N->getOperand(0);
11303     if (Src.isUndef())
11304       return Src;
11305     break;
11306   }
11307   case ISD::SINT_TO_FP:
11308   case ISD::UINT_TO_FP:
11309     return performUCharToFloatCombine(N, DCI);
11310   case AMDGPUISD::CVT_F32_UBYTE0:
11311   case AMDGPUISD::CVT_F32_UBYTE1:
11312   case AMDGPUISD::CVT_F32_UBYTE2:
11313   case AMDGPUISD::CVT_F32_UBYTE3:
11314     return performCvtF32UByteNCombine(N, DCI);
11315   case AMDGPUISD::FMED3:
11316     return performFMed3Combine(N, DCI);
11317   case AMDGPUISD::CVT_PKRTZ_F16_F32:
11318     return performCvtPkRTZCombine(N, DCI);
11319   case AMDGPUISD::CLAMP:
11320     return performClampCombine(N, DCI);
11321   case ISD::SCALAR_TO_VECTOR: {
11322     SelectionDAG &DAG = DCI.DAG;
11323     EVT VT = N->getValueType(0);
11324 
11325     // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
11326     if (VT == MVT::v2i16 || VT == MVT::v2f16) {
11327       SDLoc SL(N);
11328       SDValue Src = N->getOperand(0);
11329       EVT EltVT = Src.getValueType();
11330       if (EltVT == MVT::f16)
11331         Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
11332 
11333       SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
11334       return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
11335     }
11336 
11337     break;
11338   }
11339   case ISD::EXTRACT_VECTOR_ELT:
11340     return performExtractVectorEltCombine(N, DCI);
11341   case ISD::INSERT_VECTOR_ELT:
11342     return performInsertVectorEltCombine(N, DCI);
11343   case ISD::LOAD: {
11344     if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
11345       return Widended;
11346     LLVM_FALLTHROUGH;
11347   }
11348   default: {
11349     if (!DCI.isBeforeLegalize()) {
11350       if (MemSDNode *MemNode = dyn_cast<MemSDNode>(N))
11351         return performMemSDNodeCombine(MemNode, DCI);
11352     }
11353 
11354     break;
11355   }
11356   }
11357 
11358   return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
11359 }
11360 
11361 /// Helper function for adjustWritemask
11362 static unsigned SubIdx2Lane(unsigned Idx) {
11363   switch (Idx) {
11364   default: return ~0u;
11365   case AMDGPU::sub0: return 0;
11366   case AMDGPU::sub1: return 1;
11367   case AMDGPU::sub2: return 2;
11368   case AMDGPU::sub3: return 3;
11369   case AMDGPU::sub4: return 4; // Possible with TFE/LWE
11370   }
11371 }
11372 
11373 /// Adjust the writemask of MIMG instructions
11374 SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
11375                                           SelectionDAG &DAG) const {
11376   unsigned Opcode = Node->getMachineOpcode();
11377 
11378   // Subtract 1 because the vdata output is not a MachineSDNode operand.
11379   int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1;
11380   if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
11381     return Node; // not implemented for D16
11382 
11383   SDNode *Users[5] = { nullptr };
11384   unsigned Lane = 0;
11385   unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
11386   unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
11387   unsigned NewDmask = 0;
11388   unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1;
11389   unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1;
11390   bool UsesTFC = ((int(TFEIdx) >= 0 && Node->getConstantOperandVal(TFEIdx)) ||
11391                   Node->getConstantOperandVal(LWEIdx))
11392                      ? true
11393                      : false;
11394   unsigned TFCLane = 0;
11395   bool HasChain = Node->getNumValues() > 1;
11396 
11397   if (OldDmask == 0) {
11398     // These are folded out, but on the chance it happens don't assert.
11399     return Node;
11400   }
11401 
11402   unsigned OldBitsSet = countPopulation(OldDmask);
11403   // Work out which is the TFE/LWE lane if that is enabled.
11404   if (UsesTFC) {
11405     TFCLane = OldBitsSet;
11406   }
11407 
11408   // Try to figure out the used register components
11409   for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
11410        I != E; ++I) {
11411 
11412     // Don't look at users of the chain.
11413     if (I.getUse().getResNo() != 0)
11414       continue;
11415 
11416     // Abort if we can't understand the usage
11417     if (!I->isMachineOpcode() ||
11418         I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
11419       return Node;
11420 
11421     // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
11422     // Note that subregs are packed, i.e. Lane==0 is the first bit set
11423     // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
11424     // set, etc.
11425     Lane = SubIdx2Lane(I->getConstantOperandVal(1));
11426     if (Lane == ~0u)
11427       return Node;
11428 
11429     // Check if the use is for the TFE/LWE generated result at VGPRn+1.
11430     if (UsesTFC && Lane == TFCLane) {
11431       Users[Lane] = *I;
11432     } else {
11433       // Set which texture component corresponds to the lane.
11434       unsigned Comp;
11435       for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) {
11436         Comp = countTrailingZeros(Dmask);
11437         Dmask &= ~(1 << Comp);
11438       }
11439 
11440       // Abort if we have more than one user per component.
11441       if (Users[Lane])
11442         return Node;
11443 
11444       Users[Lane] = *I;
11445       NewDmask |= 1 << Comp;
11446     }
11447   }
11448 
11449   // Don't allow 0 dmask, as hardware assumes one channel enabled.
11450   bool NoChannels = !NewDmask;
11451   if (NoChannels) {
11452     if (!UsesTFC) {
11453       // No uses of the result and not using TFC. Then do nothing.
11454       return Node;
11455     }
11456     // If the original dmask has one channel - then nothing to do
11457     if (OldBitsSet == 1)
11458       return Node;
11459     // Use an arbitrary dmask - required for the instruction to work
11460     NewDmask = 1;
11461   }
11462   // Abort if there's no change
11463   if (NewDmask == OldDmask)
11464     return Node;
11465 
11466   unsigned BitsSet = countPopulation(NewDmask);
11467 
11468   // Check for TFE or LWE - increase the number of channels by one to account
11469   // for the extra return value
11470   // This will need adjustment for D16 if this is also included in
11471   // adjustWriteMask (this function) but at present D16 are excluded.
11472   unsigned NewChannels = BitsSet + UsesTFC;
11473 
11474   int NewOpcode =
11475       AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels);
11476   assert(NewOpcode != -1 &&
11477          NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
11478          "failed to find equivalent MIMG op");
11479 
11480   // Adjust the writemask in the node
11481   SmallVector<SDValue, 12> Ops;
11482   Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
11483   Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
11484   Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
11485 
11486   MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
11487 
11488   MVT ResultVT = NewChannels == 1 ?
11489     SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 :
11490                            NewChannels == 5 ? 8 : NewChannels);
11491   SDVTList NewVTList = HasChain ?
11492     DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
11493 
11494 
11495   MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
11496                                               NewVTList, Ops);
11497 
11498   if (HasChain) {
11499     // Update chain.
11500     DAG.setNodeMemRefs(NewNode, Node->memoperands());
11501     DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
11502   }
11503 
11504   if (NewChannels == 1) {
11505     assert(Node->hasNUsesOfValue(1, 0));
11506     SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
11507                                       SDLoc(Node), Users[Lane]->getValueType(0),
11508                                       SDValue(NewNode, 0));
11509     DAG.ReplaceAllUsesWith(Users[Lane], Copy);
11510     return nullptr;
11511   }
11512 
11513   // Update the users of the node with the new indices
11514   for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) {
11515     SDNode *User = Users[i];
11516     if (!User) {
11517       // Handle the special case of NoChannels. We set NewDmask to 1 above, but
11518       // Users[0] is still nullptr because channel 0 doesn't really have a use.
11519       if (i || !NoChannels)
11520         continue;
11521     } else {
11522       SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
11523       DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
11524     }
11525 
11526     switch (Idx) {
11527     default: break;
11528     case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
11529     case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
11530     case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
11531     case AMDGPU::sub3: Idx = AMDGPU::sub4; break;
11532     }
11533   }
11534 
11535   DAG.RemoveDeadNode(Node);
11536   return nullptr;
11537 }
11538 
11539 static bool isFrameIndexOp(SDValue Op) {
11540   if (Op.getOpcode() == ISD::AssertZext)
11541     Op = Op.getOperand(0);
11542 
11543   return isa<FrameIndexSDNode>(Op);
11544 }
11545 
11546 /// Legalize target independent instructions (e.g. INSERT_SUBREG)
11547 /// with frame index operands.
11548 /// LLVM assumes that inputs are to these instructions are registers.
11549 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
11550                                                         SelectionDAG &DAG) const {
11551   if (Node->getOpcode() == ISD::CopyToReg) {
11552     RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
11553     SDValue SrcVal = Node->getOperand(2);
11554 
11555     // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
11556     // to try understanding copies to physical registers.
11557     if (SrcVal.getValueType() == MVT::i1 && DestReg->getReg().isPhysical()) {
11558       SDLoc SL(Node);
11559       MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
11560       SDValue VReg = DAG.getRegister(
11561         MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
11562 
11563       SDNode *Glued = Node->getGluedNode();
11564       SDValue ToVReg
11565         = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
11566                          SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
11567       SDValue ToResultReg
11568         = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
11569                            VReg, ToVReg.getValue(1));
11570       DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
11571       DAG.RemoveDeadNode(Node);
11572       return ToResultReg.getNode();
11573     }
11574   }
11575 
11576   SmallVector<SDValue, 8> Ops;
11577   for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
11578     if (!isFrameIndexOp(Node->getOperand(i))) {
11579       Ops.push_back(Node->getOperand(i));
11580       continue;
11581     }
11582 
11583     SDLoc DL(Node);
11584     Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
11585                                      Node->getOperand(i).getValueType(),
11586                                      Node->getOperand(i)), 0));
11587   }
11588 
11589   return DAG.UpdateNodeOperands(Node, Ops);
11590 }
11591 
11592 /// Fold the instructions after selecting them.
11593 /// Returns null if users were already updated.
11594 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
11595                                           SelectionDAG &DAG) const {
11596   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
11597   unsigned Opcode = Node->getMachineOpcode();
11598 
11599   if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
11600       !TII->isGather4(Opcode) &&
11601       AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) != -1) {
11602     return adjustWritemask(Node, DAG);
11603   }
11604 
11605   if (Opcode == AMDGPU::INSERT_SUBREG ||
11606       Opcode == AMDGPU::REG_SEQUENCE) {
11607     legalizeTargetIndependentNode(Node, DAG);
11608     return Node;
11609   }
11610 
11611   switch (Opcode) {
11612   case AMDGPU::V_DIV_SCALE_F32_e64:
11613   case AMDGPU::V_DIV_SCALE_F64_e64: {
11614     // Satisfy the operand register constraint when one of the inputs is
11615     // undefined. Ordinarily each undef value will have its own implicit_def of
11616     // a vreg, so force these to use a single register.
11617     SDValue Src0 = Node->getOperand(1);
11618     SDValue Src1 = Node->getOperand(3);
11619     SDValue Src2 = Node->getOperand(5);
11620 
11621     if ((Src0.isMachineOpcode() &&
11622          Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
11623         (Src0 == Src1 || Src0 == Src2))
11624       break;
11625 
11626     MVT VT = Src0.getValueType().getSimpleVT();
11627     const TargetRegisterClass *RC =
11628         getRegClassFor(VT, Src0.getNode()->isDivergent());
11629 
11630     MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
11631     SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
11632 
11633     SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
11634                                       UndefReg, Src0, SDValue());
11635 
11636     // src0 must be the same register as src1 or src2, even if the value is
11637     // undefined, so make sure we don't violate this constraint.
11638     if (Src0.isMachineOpcode() &&
11639         Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
11640       if (Src1.isMachineOpcode() &&
11641           Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
11642         Src0 = Src1;
11643       else if (Src2.isMachineOpcode() &&
11644                Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
11645         Src0 = Src2;
11646       else {
11647         assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
11648         Src0 = UndefReg;
11649         Src1 = UndefReg;
11650       }
11651     } else
11652       break;
11653 
11654     SmallVector<SDValue, 9> Ops(Node->op_begin(), Node->op_end());
11655     Ops[1] = Src0;
11656     Ops[3] = Src1;
11657     Ops[5] = Src2;
11658     Ops.push_back(ImpDef.getValue(1));
11659     return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
11660   }
11661   default:
11662     break;
11663   }
11664 
11665   return Node;
11666 }
11667 
11668 // Any MIMG instructions that use tfe or lwe require an initialization of the
11669 // result register that will be written in the case of a memory access failure.
11670 // The required code is also added to tie this init code to the result of the
11671 // img instruction.
11672 void SITargetLowering::AddIMGInit(MachineInstr &MI) const {
11673   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
11674   const SIRegisterInfo &TRI = TII->getRegisterInfo();
11675   MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
11676   MachineBasicBlock &MBB = *MI.getParent();
11677 
11678   MachineOperand *TFE = TII->getNamedOperand(MI, AMDGPU::OpName::tfe);
11679   MachineOperand *LWE = TII->getNamedOperand(MI, AMDGPU::OpName::lwe);
11680   MachineOperand *D16 = TII->getNamedOperand(MI, AMDGPU::OpName::d16);
11681 
11682   if (!TFE && !LWE) // intersect_ray
11683     return;
11684 
11685   unsigned TFEVal = TFE ? TFE->getImm() : 0;
11686   unsigned LWEVal = LWE->getImm();
11687   unsigned D16Val = D16 ? D16->getImm() : 0;
11688 
11689   if (!TFEVal && !LWEVal)
11690     return;
11691 
11692   // At least one of TFE or LWE are non-zero
11693   // We have to insert a suitable initialization of the result value and
11694   // tie this to the dest of the image instruction.
11695 
11696   const DebugLoc &DL = MI.getDebugLoc();
11697 
11698   int DstIdx =
11699       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata);
11700 
11701   // Calculate which dword we have to initialize to 0.
11702   MachineOperand *MO_Dmask = TII->getNamedOperand(MI, AMDGPU::OpName::dmask);
11703 
11704   // check that dmask operand is found.
11705   assert(MO_Dmask && "Expected dmask operand in instruction");
11706 
11707   unsigned dmask = MO_Dmask->getImm();
11708   // Determine the number of active lanes taking into account the
11709   // Gather4 special case
11710   unsigned ActiveLanes = TII->isGather4(MI) ? 4 : countPopulation(dmask);
11711 
11712   bool Packed = !Subtarget->hasUnpackedD16VMem();
11713 
11714   unsigned InitIdx =
11715       D16Val && Packed ? ((ActiveLanes + 1) >> 1) + 1 : ActiveLanes + 1;
11716 
11717   // Abandon attempt if the dst size isn't large enough
11718   // - this is in fact an error but this is picked up elsewhere and
11719   // reported correctly.
11720   uint32_t DstSize = TRI.getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32;
11721   if (DstSize < InitIdx)
11722     return;
11723 
11724   // Create a register for the initialization value.
11725   Register PrevDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx));
11726   unsigned NewDst = 0; // Final initialized value will be in here
11727 
11728   // If PRTStrictNull feature is enabled (the default) then initialize
11729   // all the result registers to 0, otherwise just the error indication
11730   // register (VGPRn+1)
11731   unsigned SizeLeft = Subtarget->usePRTStrictNull() ? InitIdx : 1;
11732   unsigned CurrIdx = Subtarget->usePRTStrictNull() ? 0 : (InitIdx - 1);
11733 
11734   BuildMI(MBB, MI, DL, TII->get(AMDGPU::IMPLICIT_DEF), PrevDst);
11735   for (; SizeLeft; SizeLeft--, CurrIdx++) {
11736     NewDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx));
11737     // Initialize dword
11738     Register SubReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
11739     BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), SubReg)
11740       .addImm(0);
11741     // Insert into the super-reg
11742     BuildMI(MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewDst)
11743       .addReg(PrevDst)
11744       .addReg(SubReg)
11745       .addImm(SIRegisterInfo::getSubRegFromChannel(CurrIdx));
11746 
11747     PrevDst = NewDst;
11748   }
11749 
11750   // Add as an implicit operand
11751   MI.addOperand(MachineOperand::CreateReg(NewDst, false, true));
11752 
11753   // Tie the just added implicit operand to the dst
11754   MI.tieOperands(DstIdx, MI.getNumOperands() - 1);
11755 }
11756 
11757 /// Assign the register class depending on the number of
11758 /// bits set in the writemask
11759 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
11760                                                      SDNode *Node) const {
11761   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
11762 
11763   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
11764 
11765   if (TII->isVOP3(MI.getOpcode())) {
11766     // Make sure constant bus requirements are respected.
11767     TII->legalizeOperandsVOP3(MRI, MI);
11768 
11769     // Prefer VGPRs over AGPRs in mAI instructions where possible.
11770     // This saves a chain-copy of registers and better balance register
11771     // use between vgpr and agpr as agpr tuples tend to be big.
11772     if (MI.getDesc().OpInfo) {
11773       unsigned Opc = MI.getOpcode();
11774       const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
11775       for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
11776                       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) {
11777         if (I == -1)
11778           break;
11779         MachineOperand &Op = MI.getOperand(I);
11780         if (!Op.isReg() || !Op.getReg().isVirtual())
11781           continue;
11782         auto *RC = TRI->getRegClassForReg(MRI, Op.getReg());
11783         if (!TRI->hasAGPRs(RC))
11784           continue;
11785         auto *Src = MRI.getUniqueVRegDef(Op.getReg());
11786         if (!Src || !Src->isCopy() ||
11787             !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg()))
11788           continue;
11789         auto *NewRC = TRI->getEquivalentVGPRClass(RC);
11790         // All uses of agpr64 and agpr32 can also accept vgpr except for
11791         // v_accvgpr_read, but we do not produce agpr reads during selection,
11792         // so no use checks are needed.
11793         MRI.setRegClass(Op.getReg(), NewRC);
11794       }
11795 
11796       // Resolve the rest of AV operands to AGPRs.
11797       if (auto *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2)) {
11798         if (Src2->isReg() && Src2->getReg().isVirtual()) {
11799           auto *RC = TRI->getRegClassForReg(MRI, Src2->getReg());
11800           if (TRI->isVectorSuperClass(RC)) {
11801             auto *NewRC = TRI->getEquivalentAGPRClass(RC);
11802             MRI.setRegClass(Src2->getReg(), NewRC);
11803             if (Src2->isTied())
11804               MRI.setRegClass(MI.getOperand(0).getReg(), NewRC);
11805           }
11806         }
11807       }
11808     }
11809 
11810     return;
11811   }
11812 
11813   if (TII->isMIMG(MI)) {
11814     if (!MI.mayStore())
11815       AddIMGInit(MI);
11816     TII->enforceOperandRCAlignment(MI, AMDGPU::OpName::vaddr);
11817   }
11818 }
11819 
11820 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
11821                               uint64_t Val) {
11822   SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
11823   return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
11824 }
11825 
11826 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
11827                                                 const SDLoc &DL,
11828                                                 SDValue Ptr) const {
11829   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
11830 
11831   // Build the half of the subregister with the constants before building the
11832   // full 128-bit register. If we are building multiple resource descriptors,
11833   // this will allow CSEing of the 2-component register.
11834   const SDValue Ops0[] = {
11835     DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
11836     buildSMovImm32(DAG, DL, 0),
11837     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
11838     buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
11839     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
11840   };
11841 
11842   SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
11843                                                 MVT::v2i32, Ops0), 0);
11844 
11845   // Combine the constants and the pointer.
11846   const SDValue Ops1[] = {
11847     DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32),
11848     Ptr,
11849     DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
11850     SubRegHi,
11851     DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
11852   };
11853 
11854   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
11855 }
11856 
11857 /// Return a resource descriptor with the 'Add TID' bit enabled
11858 ///        The TID (Thread ID) is multiplied by the stride value (bits [61:48]
11859 ///        of the resource descriptor) to create an offset, which is added to
11860 ///        the resource pointer.
11861 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
11862                                            SDValue Ptr, uint32_t RsrcDword1,
11863                                            uint64_t RsrcDword2And3) const {
11864   SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
11865   SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
11866   if (RsrcDword1) {
11867     PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
11868                                      DAG.getConstant(RsrcDword1, DL, MVT::i32)),
11869                     0);
11870   }
11871 
11872   SDValue DataLo = buildSMovImm32(DAG, DL,
11873                                   RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
11874   SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
11875 
11876   const SDValue Ops[] = {
11877     DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32),
11878     PtrLo,
11879     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
11880     PtrHi,
11881     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
11882     DataLo,
11883     DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
11884     DataHi,
11885     DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
11886   };
11887 
11888   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
11889 }
11890 
11891 //===----------------------------------------------------------------------===//
11892 //                         SI Inline Assembly Support
11893 //===----------------------------------------------------------------------===//
11894 
11895 std::pair<unsigned, const TargetRegisterClass *>
11896 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_,
11897                                                StringRef Constraint,
11898                                                MVT VT) const {
11899   const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(TRI_);
11900 
11901   const TargetRegisterClass *RC = nullptr;
11902   if (Constraint.size() == 1) {
11903     const unsigned BitWidth = VT.getSizeInBits();
11904     switch (Constraint[0]) {
11905     default:
11906       return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
11907     case 's':
11908     case 'r':
11909       switch (BitWidth) {
11910       case 16:
11911         RC = &AMDGPU::SReg_32RegClass;
11912         break;
11913       case 64:
11914         RC = &AMDGPU::SGPR_64RegClass;
11915         break;
11916       default:
11917         RC = SIRegisterInfo::getSGPRClassForBitWidth(BitWidth);
11918         if (!RC)
11919           return std::make_pair(0U, nullptr);
11920         break;
11921       }
11922       break;
11923     case 'v':
11924       switch (BitWidth) {
11925       case 16:
11926         RC = &AMDGPU::VGPR_32RegClass;
11927         break;
11928       default:
11929         RC = TRI->getVGPRClassForBitWidth(BitWidth);
11930         if (!RC)
11931           return std::make_pair(0U, nullptr);
11932         break;
11933       }
11934       break;
11935     case 'a':
11936       if (!Subtarget->hasMAIInsts())
11937         break;
11938       switch (BitWidth) {
11939       case 16:
11940         RC = &AMDGPU::AGPR_32RegClass;
11941         break;
11942       default:
11943         RC = TRI->getAGPRClassForBitWidth(BitWidth);
11944         if (!RC)
11945           return std::make_pair(0U, nullptr);
11946         break;
11947       }
11948       break;
11949     }
11950     // We actually support i128, i16 and f16 as inline parameters
11951     // even if they are not reported as legal
11952     if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
11953                VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
11954       return std::make_pair(0U, RC);
11955   }
11956 
11957   if (Constraint.startswith("{") && Constraint.endswith("}")) {
11958     StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
11959     if (RegName.consume_front("v")) {
11960       RC = &AMDGPU::VGPR_32RegClass;
11961     } else if (RegName.consume_front("s")) {
11962       RC = &AMDGPU::SGPR_32RegClass;
11963     } else if (RegName.consume_front("a")) {
11964       RC = &AMDGPU::AGPR_32RegClass;
11965     }
11966 
11967     if (RC) {
11968       uint32_t Idx;
11969       if (RegName.consume_front("[")) {
11970         uint32_t End;
11971         bool Failed = RegName.consumeInteger(10, Idx);
11972         Failed |= !RegName.consume_front(":");
11973         Failed |= RegName.consumeInteger(10, End);
11974         Failed |= !RegName.consume_back("]");
11975         if (!Failed) {
11976           uint32_t Width = (End - Idx + 1) * 32;
11977           MCRegister Reg = RC->getRegister(Idx);
11978           if (SIRegisterInfo::isVGPRClass(RC))
11979             RC = TRI->getVGPRClassForBitWidth(Width);
11980           else if (SIRegisterInfo::isSGPRClass(RC))
11981             RC = TRI->getSGPRClassForBitWidth(Width);
11982           else if (SIRegisterInfo::isAGPRClass(RC))
11983             RC = TRI->getAGPRClassForBitWidth(Width);
11984           if (RC) {
11985             Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, RC);
11986             return std::make_pair(Reg, RC);
11987           }
11988         }
11989       } else {
11990         bool Failed = RegName.getAsInteger(10, Idx);
11991         if (!Failed && Idx < RC->getNumRegs())
11992           return std::make_pair(RC->getRegister(Idx), RC);
11993       }
11994     }
11995   }
11996 
11997   auto Ret = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
11998   if (Ret.first)
11999     Ret.second = TRI->getPhysRegClass(Ret.first);
12000 
12001   return Ret;
12002 }
12003 
12004 static bool isImmConstraint(StringRef Constraint) {
12005   if (Constraint.size() == 1) {
12006     switch (Constraint[0]) {
12007     default: break;
12008     case 'I':
12009     case 'J':
12010     case 'A':
12011     case 'B':
12012     case 'C':
12013       return true;
12014     }
12015   } else if (Constraint == "DA" ||
12016              Constraint == "DB") {
12017     return true;
12018   }
12019   return false;
12020 }
12021 
12022 SITargetLowering::ConstraintType
12023 SITargetLowering::getConstraintType(StringRef Constraint) const {
12024   if (Constraint.size() == 1) {
12025     switch (Constraint[0]) {
12026     default: break;
12027     case 's':
12028     case 'v':
12029     case 'a':
12030       return C_RegisterClass;
12031     }
12032   }
12033   if (isImmConstraint(Constraint)) {
12034     return C_Other;
12035   }
12036   return TargetLowering::getConstraintType(Constraint);
12037 }
12038 
12039 static uint64_t clearUnusedBits(uint64_t Val, unsigned Size) {
12040   if (!AMDGPU::isInlinableIntLiteral(Val)) {
12041     Val = Val & maskTrailingOnes<uint64_t>(Size);
12042   }
12043   return Val;
12044 }
12045 
12046 void SITargetLowering::LowerAsmOperandForConstraint(SDValue Op,
12047                                                     std::string &Constraint,
12048                                                     std::vector<SDValue> &Ops,
12049                                                     SelectionDAG &DAG) const {
12050   if (isImmConstraint(Constraint)) {
12051     uint64_t Val;
12052     if (getAsmOperandConstVal(Op, Val) &&
12053         checkAsmConstraintVal(Op, Constraint, Val)) {
12054       Val = clearUnusedBits(Val, Op.getScalarValueSizeInBits());
12055       Ops.push_back(DAG.getTargetConstant(Val, SDLoc(Op), MVT::i64));
12056     }
12057   } else {
12058     TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
12059   }
12060 }
12061 
12062 bool SITargetLowering::getAsmOperandConstVal(SDValue Op, uint64_t &Val) const {
12063   unsigned Size = Op.getScalarValueSizeInBits();
12064   if (Size > 64)
12065     return false;
12066 
12067   if (Size == 16 && !Subtarget->has16BitInsts())
12068     return false;
12069 
12070   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
12071     Val = C->getSExtValue();
12072     return true;
12073   }
12074   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
12075     Val = C->getValueAPF().bitcastToAPInt().getSExtValue();
12076     return true;
12077   }
12078   if (BuildVectorSDNode *V = dyn_cast<BuildVectorSDNode>(Op)) {
12079     if (Size != 16 || Op.getNumOperands() != 2)
12080       return false;
12081     if (Op.getOperand(0).isUndef() || Op.getOperand(1).isUndef())
12082       return false;
12083     if (ConstantSDNode *C = V->getConstantSplatNode()) {
12084       Val = C->getSExtValue();
12085       return true;
12086     }
12087     if (ConstantFPSDNode *C = V->getConstantFPSplatNode()) {
12088       Val = C->getValueAPF().bitcastToAPInt().getSExtValue();
12089       return true;
12090     }
12091   }
12092 
12093   return false;
12094 }
12095 
12096 bool SITargetLowering::checkAsmConstraintVal(SDValue Op,
12097                                              const std::string &Constraint,
12098                                              uint64_t Val) const {
12099   if (Constraint.size() == 1) {
12100     switch (Constraint[0]) {
12101     case 'I':
12102       return AMDGPU::isInlinableIntLiteral(Val);
12103     case 'J':
12104       return isInt<16>(Val);
12105     case 'A':
12106       return checkAsmConstraintValA(Op, Val);
12107     case 'B':
12108       return isInt<32>(Val);
12109     case 'C':
12110       return isUInt<32>(clearUnusedBits(Val, Op.getScalarValueSizeInBits())) ||
12111              AMDGPU::isInlinableIntLiteral(Val);
12112     default:
12113       break;
12114     }
12115   } else if (Constraint.size() == 2) {
12116     if (Constraint == "DA") {
12117       int64_t HiBits = static_cast<int32_t>(Val >> 32);
12118       int64_t LoBits = static_cast<int32_t>(Val);
12119       return checkAsmConstraintValA(Op, HiBits, 32) &&
12120              checkAsmConstraintValA(Op, LoBits, 32);
12121     }
12122     if (Constraint == "DB") {
12123       return true;
12124     }
12125   }
12126   llvm_unreachable("Invalid asm constraint");
12127 }
12128 
12129 bool SITargetLowering::checkAsmConstraintValA(SDValue Op,
12130                                               uint64_t Val,
12131                                               unsigned MaxSize) const {
12132   unsigned Size = std::min<unsigned>(Op.getScalarValueSizeInBits(), MaxSize);
12133   bool HasInv2Pi = Subtarget->hasInv2PiInlineImm();
12134   if ((Size == 16 && AMDGPU::isInlinableLiteral16(Val, HasInv2Pi)) ||
12135       (Size == 32 && AMDGPU::isInlinableLiteral32(Val, HasInv2Pi)) ||
12136       (Size == 64 && AMDGPU::isInlinableLiteral64(Val, HasInv2Pi))) {
12137     return true;
12138   }
12139   return false;
12140 }
12141 
12142 static int getAlignedAGPRClassID(unsigned UnalignedClassID) {
12143   switch (UnalignedClassID) {
12144   case AMDGPU::VReg_64RegClassID:
12145     return AMDGPU::VReg_64_Align2RegClassID;
12146   case AMDGPU::VReg_96RegClassID:
12147     return AMDGPU::VReg_96_Align2RegClassID;
12148   case AMDGPU::VReg_128RegClassID:
12149     return AMDGPU::VReg_128_Align2RegClassID;
12150   case AMDGPU::VReg_160RegClassID:
12151     return AMDGPU::VReg_160_Align2RegClassID;
12152   case AMDGPU::VReg_192RegClassID:
12153     return AMDGPU::VReg_192_Align2RegClassID;
12154   case AMDGPU::VReg_224RegClassID:
12155     return AMDGPU::VReg_224_Align2RegClassID;
12156   case AMDGPU::VReg_256RegClassID:
12157     return AMDGPU::VReg_256_Align2RegClassID;
12158   case AMDGPU::VReg_512RegClassID:
12159     return AMDGPU::VReg_512_Align2RegClassID;
12160   case AMDGPU::VReg_1024RegClassID:
12161     return AMDGPU::VReg_1024_Align2RegClassID;
12162   case AMDGPU::AReg_64RegClassID:
12163     return AMDGPU::AReg_64_Align2RegClassID;
12164   case AMDGPU::AReg_96RegClassID:
12165     return AMDGPU::AReg_96_Align2RegClassID;
12166   case AMDGPU::AReg_128RegClassID:
12167     return AMDGPU::AReg_128_Align2RegClassID;
12168   case AMDGPU::AReg_160RegClassID:
12169     return AMDGPU::AReg_160_Align2RegClassID;
12170   case AMDGPU::AReg_192RegClassID:
12171     return AMDGPU::AReg_192_Align2RegClassID;
12172   case AMDGPU::AReg_256RegClassID:
12173     return AMDGPU::AReg_256_Align2RegClassID;
12174   case AMDGPU::AReg_512RegClassID:
12175     return AMDGPU::AReg_512_Align2RegClassID;
12176   case AMDGPU::AReg_1024RegClassID:
12177     return AMDGPU::AReg_1024_Align2RegClassID;
12178   default:
12179     return -1;
12180   }
12181 }
12182 
12183 // Figure out which registers should be reserved for stack access. Only after
12184 // the function is legalized do we know all of the non-spill stack objects or if
12185 // calls are present.
12186 void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
12187   MachineRegisterInfo &MRI = MF.getRegInfo();
12188   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
12189   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
12190   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
12191   const SIInstrInfo *TII = ST.getInstrInfo();
12192 
12193   if (Info->isEntryFunction()) {
12194     // Callable functions have fixed registers used for stack access.
12195     reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
12196   }
12197 
12198   assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
12199                              Info->getStackPtrOffsetReg()));
12200   if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG)
12201     MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
12202 
12203   // We need to worry about replacing the default register with itself in case
12204   // of MIR testcases missing the MFI.
12205   if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG)
12206     MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
12207 
12208   if (Info->getFrameOffsetReg() != AMDGPU::FP_REG)
12209     MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
12210 
12211   Info->limitOccupancy(MF);
12212 
12213   if (ST.isWave32() && !MF.empty()) {
12214     for (auto &MBB : MF) {
12215       for (auto &MI : MBB) {
12216         TII->fixImplicitOperands(MI);
12217       }
12218     }
12219   }
12220 
12221   // FIXME: This is a hack to fixup AGPR classes to use the properly aligned
12222   // classes if required. Ideally the register class constraints would differ
12223   // per-subtarget, but there's no easy way to achieve that right now. This is
12224   // not a problem for VGPRs because the correctly aligned VGPR class is implied
12225   // from using them as the register class for legal types.
12226   if (ST.needsAlignedVGPRs()) {
12227     for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
12228       const Register Reg = Register::index2VirtReg(I);
12229       const TargetRegisterClass *RC = MRI.getRegClassOrNull(Reg);
12230       if (!RC)
12231         continue;
12232       int NewClassID = getAlignedAGPRClassID(RC->getID());
12233       if (NewClassID != -1)
12234         MRI.setRegClass(Reg, TRI->getRegClass(NewClassID));
12235     }
12236   }
12237 
12238   TargetLoweringBase::finalizeLowering(MF);
12239 }
12240 
12241 void SITargetLowering::computeKnownBitsForFrameIndex(
12242   const int FI, KnownBits &Known, const MachineFunction &MF) const {
12243   TargetLowering::computeKnownBitsForFrameIndex(FI, Known, MF);
12244 
12245   // Set the high bits to zero based on the maximum allowed scratch size per
12246   // wave. We can't use vaddr in MUBUF instructions if we don't know the address
12247   // calculation won't overflow, so assume the sign bit is never set.
12248   Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
12249 }
12250 
12251 static void knownBitsForWorkitemID(const GCNSubtarget &ST, GISelKnownBits &KB,
12252                                    KnownBits &Known, unsigned Dim) {
12253   unsigned MaxValue =
12254       ST.getMaxWorkitemID(KB.getMachineFunction().getFunction(), Dim);
12255   Known.Zero.setHighBits(countLeadingZeros(MaxValue));
12256 }
12257 
12258 void SITargetLowering::computeKnownBitsForTargetInstr(
12259     GISelKnownBits &KB, Register R, KnownBits &Known, const APInt &DemandedElts,
12260     const MachineRegisterInfo &MRI, unsigned Depth) const {
12261   const MachineInstr *MI = MRI.getVRegDef(R);
12262   switch (MI->getOpcode()) {
12263   case AMDGPU::G_INTRINSIC: {
12264     switch (MI->getIntrinsicID()) {
12265     case Intrinsic::amdgcn_workitem_id_x:
12266       knownBitsForWorkitemID(*getSubtarget(), KB, Known, 0);
12267       break;
12268     case Intrinsic::amdgcn_workitem_id_y:
12269       knownBitsForWorkitemID(*getSubtarget(), KB, Known, 1);
12270       break;
12271     case Intrinsic::amdgcn_workitem_id_z:
12272       knownBitsForWorkitemID(*getSubtarget(), KB, Known, 2);
12273       break;
12274     case Intrinsic::amdgcn_mbcnt_lo:
12275     case Intrinsic::amdgcn_mbcnt_hi: {
12276       // These return at most the wavefront size - 1.
12277       unsigned Size = MRI.getType(R).getSizeInBits();
12278       Known.Zero.setHighBits(Size - getSubtarget()->getWavefrontSizeLog2());
12279       break;
12280     }
12281     case Intrinsic::amdgcn_groupstaticsize: {
12282       // We can report everything over the maximum size as 0. We can't report
12283       // based on the actual size because we don't know if it's accurate or not
12284       // at any given point.
12285       Known.Zero.setHighBits(countLeadingZeros(getSubtarget()->getLocalMemorySize()));
12286       break;
12287     }
12288     }
12289     break;
12290   }
12291   case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
12292     Known.Zero.setHighBits(24);
12293     break;
12294   case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
12295     Known.Zero.setHighBits(16);
12296     break;
12297   }
12298 }
12299 
12300 Align SITargetLowering::computeKnownAlignForTargetInstr(
12301   GISelKnownBits &KB, Register R, const MachineRegisterInfo &MRI,
12302   unsigned Depth) const {
12303   const MachineInstr *MI = MRI.getVRegDef(R);
12304   switch (MI->getOpcode()) {
12305   case AMDGPU::G_INTRINSIC:
12306   case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: {
12307     // FIXME: Can this move to generic code? What about the case where the call
12308     // site specifies a lower alignment?
12309     Intrinsic::ID IID = MI->getIntrinsicID();
12310     LLVMContext &Ctx = KB.getMachineFunction().getFunction().getContext();
12311     AttributeList Attrs = Intrinsic::getAttributes(Ctx, IID);
12312     if (MaybeAlign RetAlign = Attrs.getRetAlignment())
12313       return *RetAlign;
12314     return Align(1);
12315   }
12316   default:
12317     return Align(1);
12318   }
12319 }
12320 
12321 Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
12322   const Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
12323   const Align CacheLineAlign = Align(64);
12324 
12325   // Pre-GFX10 target did not benefit from loop alignment
12326   if (!ML || DisableLoopAlignment ||
12327       (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
12328       getSubtarget()->hasInstFwdPrefetchBug())
12329     return PrefAlign;
12330 
12331   // On GFX10 I$ is 4 x 64 bytes cache lines.
12332   // By default prefetcher keeps one cache line behind and reads two ahead.
12333   // We can modify it with S_INST_PREFETCH for larger loops to have two lines
12334   // behind and one ahead.
12335   // Therefor we can benefit from aligning loop headers if loop fits 192 bytes.
12336   // If loop fits 64 bytes it always spans no more than two cache lines and
12337   // does not need an alignment.
12338   // Else if loop is less or equal 128 bytes we do not need to modify prefetch,
12339   // Else if loop is less or equal 192 bytes we need two lines behind.
12340 
12341   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
12342   const MachineBasicBlock *Header = ML->getHeader();
12343   if (Header->getAlignment() != PrefAlign)
12344     return Header->getAlignment(); // Already processed.
12345 
12346   unsigned LoopSize = 0;
12347   for (const MachineBasicBlock *MBB : ML->blocks()) {
12348     // If inner loop block is aligned assume in average half of the alignment
12349     // size to be added as nops.
12350     if (MBB != Header)
12351       LoopSize += MBB->getAlignment().value() / 2;
12352 
12353     for (const MachineInstr &MI : *MBB) {
12354       LoopSize += TII->getInstSizeInBytes(MI);
12355       if (LoopSize > 192)
12356         return PrefAlign;
12357     }
12358   }
12359 
12360   if (LoopSize <= 64)
12361     return PrefAlign;
12362 
12363   if (LoopSize <= 128)
12364     return CacheLineAlign;
12365 
12366   // If any of parent loops is surrounded by prefetch instructions do not
12367   // insert new for inner loop, which would reset parent's settings.
12368   for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) {
12369     if (MachineBasicBlock *Exit = P->getExitBlock()) {
12370       auto I = Exit->getFirstNonDebugInstr();
12371       if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
12372         return CacheLineAlign;
12373     }
12374   }
12375 
12376   MachineBasicBlock *Pre = ML->getLoopPreheader();
12377   MachineBasicBlock *Exit = ML->getExitBlock();
12378 
12379   if (Pre && Exit) {
12380     auto PreTerm = Pre->getFirstTerminator();
12381     if (PreTerm == Pre->begin() ||
12382         std::prev(PreTerm)->getOpcode() != AMDGPU::S_INST_PREFETCH)
12383       BuildMI(*Pre, PreTerm, DebugLoc(), TII->get(AMDGPU::S_INST_PREFETCH))
12384           .addImm(1); // prefetch 2 lines behind PC
12385 
12386     auto ExitHead = Exit->getFirstNonDebugInstr();
12387     if (ExitHead == Exit->end() ||
12388         ExitHead->getOpcode() != AMDGPU::S_INST_PREFETCH)
12389       BuildMI(*Exit, ExitHead, DebugLoc(), TII->get(AMDGPU::S_INST_PREFETCH))
12390           .addImm(2); // prefetch 1 line behind PC
12391   }
12392 
12393   return CacheLineAlign;
12394 }
12395 
12396 LLVM_ATTRIBUTE_UNUSED
12397 static bool isCopyFromRegOfInlineAsm(const SDNode *N) {
12398   assert(N->getOpcode() == ISD::CopyFromReg);
12399   do {
12400     // Follow the chain until we find an INLINEASM node.
12401     N = N->getOperand(0).getNode();
12402     if (N->getOpcode() == ISD::INLINEASM ||
12403         N->getOpcode() == ISD::INLINEASM_BR)
12404       return true;
12405   } while (N->getOpcode() == ISD::CopyFromReg);
12406   return false;
12407 }
12408 
12409 bool SITargetLowering::isSDNodeSourceOfDivergence(
12410     const SDNode *N, FunctionLoweringInfo *FLI,
12411     LegacyDivergenceAnalysis *KDA) const {
12412   switch (N->getOpcode()) {
12413   case ISD::CopyFromReg: {
12414     const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1));
12415     const MachineRegisterInfo &MRI = FLI->MF->getRegInfo();
12416     const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
12417     Register Reg = R->getReg();
12418 
12419     // FIXME: Why does this need to consider isLiveIn?
12420     if (Reg.isPhysical() || MRI.isLiveIn(Reg))
12421       return !TRI->isSGPRReg(MRI, Reg);
12422 
12423     if (const Value *V = FLI->getValueFromVirtualReg(R->getReg()))
12424       return KDA->isDivergent(V);
12425 
12426     assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N));
12427     return !TRI->isSGPRReg(MRI, Reg);
12428   }
12429   case ISD::LOAD: {
12430     const LoadSDNode *L = cast<LoadSDNode>(N);
12431     unsigned AS = L->getAddressSpace();
12432     // A flat load may access private memory.
12433     return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
12434   }
12435   case ISD::CALLSEQ_END:
12436     return true;
12437   case ISD::INTRINSIC_WO_CHAIN:
12438     return AMDGPU::isIntrinsicSourceOfDivergence(
12439         cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
12440   case ISD::INTRINSIC_W_CHAIN:
12441     return AMDGPU::isIntrinsicSourceOfDivergence(
12442         cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
12443   case AMDGPUISD::ATOMIC_CMP_SWAP:
12444   case AMDGPUISD::ATOMIC_INC:
12445   case AMDGPUISD::ATOMIC_DEC:
12446   case AMDGPUISD::ATOMIC_LOAD_FMIN:
12447   case AMDGPUISD::ATOMIC_LOAD_FMAX:
12448   case AMDGPUISD::BUFFER_ATOMIC_SWAP:
12449   case AMDGPUISD::BUFFER_ATOMIC_ADD:
12450   case AMDGPUISD::BUFFER_ATOMIC_SUB:
12451   case AMDGPUISD::BUFFER_ATOMIC_SMIN:
12452   case AMDGPUISD::BUFFER_ATOMIC_UMIN:
12453   case AMDGPUISD::BUFFER_ATOMIC_SMAX:
12454   case AMDGPUISD::BUFFER_ATOMIC_UMAX:
12455   case AMDGPUISD::BUFFER_ATOMIC_AND:
12456   case AMDGPUISD::BUFFER_ATOMIC_OR:
12457   case AMDGPUISD::BUFFER_ATOMIC_XOR:
12458   case AMDGPUISD::BUFFER_ATOMIC_INC:
12459   case AMDGPUISD::BUFFER_ATOMIC_DEC:
12460   case AMDGPUISD::BUFFER_ATOMIC_CMPSWAP:
12461   case AMDGPUISD::BUFFER_ATOMIC_CSUB:
12462   case AMDGPUISD::BUFFER_ATOMIC_FADD:
12463   case AMDGPUISD::BUFFER_ATOMIC_FMIN:
12464   case AMDGPUISD::BUFFER_ATOMIC_FMAX:
12465     // Target-specific read-modify-write atomics are sources of divergence.
12466     return true;
12467   default:
12468     if (auto *A = dyn_cast<AtomicSDNode>(N)) {
12469       // Generic read-modify-write atomics are sources of divergence.
12470       return A->readMem() && A->writeMem();
12471     }
12472     return false;
12473   }
12474 }
12475 
12476 bool SITargetLowering::denormalsEnabledForType(const SelectionDAG &DAG,
12477                                                EVT VT) const {
12478   switch (VT.getScalarType().getSimpleVT().SimpleTy) {
12479   case MVT::f32:
12480     return hasFP32Denormals(DAG.getMachineFunction());
12481   case MVT::f64:
12482   case MVT::f16:
12483     return hasFP64FP16Denormals(DAG.getMachineFunction());
12484   default:
12485     return false;
12486   }
12487 }
12488 
12489 bool SITargetLowering::denormalsEnabledForType(LLT Ty,
12490                                                MachineFunction &MF) const {
12491   switch (Ty.getScalarSizeInBits()) {
12492   case 32:
12493     return hasFP32Denormals(MF);
12494   case 64:
12495   case 16:
12496     return hasFP64FP16Denormals(MF);
12497   default:
12498     return false;
12499   }
12500 }
12501 
12502 bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
12503                                                     const SelectionDAG &DAG,
12504                                                     bool SNaN,
12505                                                     unsigned Depth) const {
12506   if (Op.getOpcode() == AMDGPUISD::CLAMP) {
12507     const MachineFunction &MF = DAG.getMachineFunction();
12508     const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
12509 
12510     if (Info->getMode().DX10Clamp)
12511       return true; // Clamped to 0.
12512     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
12513   }
12514 
12515   return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG,
12516                                                             SNaN, Depth);
12517 }
12518 
12519 // Global FP atomic instructions have a hardcoded FP mode and do not support
12520 // FP32 denormals, and only support v2f16 denormals.
12521 static bool fpModeMatchesGlobalFPAtomicMode(const AtomicRMWInst *RMW) {
12522   const fltSemantics &Flt = RMW->getType()->getScalarType()->getFltSemantics();
12523   auto DenormMode = RMW->getParent()->getParent()->getDenormalMode(Flt);
12524   if (&Flt == &APFloat::IEEEsingle())
12525     return DenormMode == DenormalMode::getPreserveSign();
12526   return DenormMode == DenormalMode::getIEEE();
12527 }
12528 
12529 TargetLowering::AtomicExpansionKind
12530 SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
12531   unsigned AS = RMW->getPointerAddressSpace();
12532   if (AS == AMDGPUAS::PRIVATE_ADDRESS)
12533     return AtomicExpansionKind::NotAtomic;
12534 
12535   auto ReportUnsafeHWInst = [&](TargetLowering::AtomicExpansionKind Kind) {
12536     OptimizationRemarkEmitter ORE(RMW->getFunction());
12537     LLVMContext &Ctx = RMW->getFunction()->getContext();
12538     SmallVector<StringRef> SSNs;
12539     Ctx.getSyncScopeNames(SSNs);
12540     auto MemScope = SSNs[RMW->getSyncScopeID()].empty()
12541                         ? "system"
12542                         : SSNs[RMW->getSyncScopeID()];
12543     ORE.emit([&]() {
12544       return OptimizationRemark(DEBUG_TYPE, "Passed", RMW)
12545              << "Hardware instruction generated for atomic "
12546              << RMW->getOperationName(RMW->getOperation())
12547              << " operation at memory scope " << MemScope
12548              << " due to an unsafe request.";
12549     });
12550     return Kind;
12551   };
12552 
12553   switch (RMW->getOperation()) {
12554   case AtomicRMWInst::FAdd: {
12555     Type *Ty = RMW->getType();
12556 
12557     // We don't have a way to support 16-bit atomics now, so just leave them
12558     // as-is.
12559     if (Ty->isHalfTy())
12560       return AtomicExpansionKind::None;
12561 
12562     if (!Ty->isFloatTy() && (!Subtarget->hasGFX90AInsts() || !Ty->isDoubleTy()))
12563       return AtomicExpansionKind::CmpXChg;
12564 
12565     if ((AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) &&
12566          Subtarget->hasAtomicFaddInsts()) {
12567       if (Subtarget->hasGFX940Insts())
12568         return AtomicExpansionKind::None;
12569 
12570       // The amdgpu-unsafe-fp-atomics attribute enables generation of unsafe
12571       // floating point atomic instructions. May generate more efficient code,
12572       // but may not respect rounding and denormal modes, and may give incorrect
12573       // results for certain memory destinations.
12574       if (RMW->getFunction()
12575               ->getFnAttribute("amdgpu-unsafe-fp-atomics")
12576               .getValueAsString() != "true")
12577         return AtomicExpansionKind::CmpXChg;
12578 
12579       if (Subtarget->hasGFX90AInsts()) {
12580         if (Ty->isFloatTy() && AS == AMDGPUAS::FLAT_ADDRESS)
12581           return AtomicExpansionKind::CmpXChg;
12582 
12583         auto SSID = RMW->getSyncScopeID();
12584         if (SSID == SyncScope::System ||
12585             SSID == RMW->getContext().getOrInsertSyncScopeID("one-as"))
12586           return AtomicExpansionKind::CmpXChg;
12587 
12588         return ReportUnsafeHWInst(AtomicExpansionKind::None);
12589       }
12590 
12591       if (AS == AMDGPUAS::FLAT_ADDRESS)
12592         return AtomicExpansionKind::CmpXChg;
12593 
12594       return RMW->use_empty() ? ReportUnsafeHWInst(AtomicExpansionKind::None)
12595                               : AtomicExpansionKind::CmpXChg;
12596     }
12597 
12598     // DS FP atomics do respect the denormal mode, but the rounding mode is
12599     // fixed to round-to-nearest-even.
12600     // The only exception is DS_ADD_F64 which never flushes regardless of mode.
12601     if (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomicAdd()) {
12602       if (!Ty->isDoubleTy())
12603         return AtomicExpansionKind::None;
12604 
12605       if (fpModeMatchesGlobalFPAtomicMode(RMW))
12606         return AtomicExpansionKind::None;
12607 
12608       return RMW->getFunction()
12609                          ->getFnAttribute("amdgpu-unsafe-fp-atomics")
12610                          .getValueAsString() == "true"
12611                  ? ReportUnsafeHWInst(AtomicExpansionKind::None)
12612                  : AtomicExpansionKind::CmpXChg;
12613     }
12614 
12615     return AtomicExpansionKind::CmpXChg;
12616   }
12617   default:
12618     break;
12619   }
12620 
12621   return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
12622 }
12623 
12624 TargetLowering::AtomicExpansionKind
12625 SITargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
12626   return LI->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS
12627              ? AtomicExpansionKind::NotAtomic
12628              : AtomicExpansionKind::None;
12629 }
12630 
12631 TargetLowering::AtomicExpansionKind
12632 SITargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
12633   return SI->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS
12634              ? AtomicExpansionKind::NotAtomic
12635              : AtomicExpansionKind::None;
12636 }
12637 
12638 TargetLowering::AtomicExpansionKind
12639 SITargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CmpX) const {
12640   return CmpX->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS
12641              ? AtomicExpansionKind::NotAtomic
12642              : AtomicExpansionKind::None;
12643 }
12644 
12645 const TargetRegisterClass *
12646 SITargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {
12647   const TargetRegisterClass *RC = TargetLoweringBase::getRegClassFor(VT, false);
12648   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
12649   if (RC == &AMDGPU::VReg_1RegClass && !isDivergent)
12650     return Subtarget->getWavefrontSize() == 64 ? &AMDGPU::SReg_64RegClass
12651                                                : &AMDGPU::SReg_32RegClass;
12652   if (!TRI->isSGPRClass(RC) && !isDivergent)
12653     return TRI->getEquivalentSGPRClass(RC);
12654   else if (TRI->isSGPRClass(RC) && isDivergent)
12655     return TRI->getEquivalentVGPRClass(RC);
12656 
12657   return RC;
12658 }
12659 
12660 // FIXME: This is a workaround for DivergenceAnalysis not understanding always
12661 // uniform values (as produced by the mask results of control flow intrinsics)
12662 // used outside of divergent blocks. The phi users need to also be treated as
12663 // always uniform.
12664 static bool hasCFUser(const Value *V, SmallPtrSet<const Value *, 16> &Visited,
12665                       unsigned WaveSize) {
12666   // FIXME: We assume we never cast the mask results of a control flow
12667   // intrinsic.
12668   // Early exit if the type won't be consistent as a compile time hack.
12669   IntegerType *IT = dyn_cast<IntegerType>(V->getType());
12670   if (!IT || IT->getBitWidth() != WaveSize)
12671     return false;
12672 
12673   if (!isa<Instruction>(V))
12674     return false;
12675   if (!Visited.insert(V).second)
12676     return false;
12677   bool Result = false;
12678   for (auto U : V->users()) {
12679     if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(U)) {
12680       if (V == U->getOperand(1)) {
12681         switch (Intrinsic->getIntrinsicID()) {
12682         default:
12683           Result = false;
12684           break;
12685         case Intrinsic::amdgcn_if_break:
12686         case Intrinsic::amdgcn_if:
12687         case Intrinsic::amdgcn_else:
12688           Result = true;
12689           break;
12690         }
12691       }
12692       if (V == U->getOperand(0)) {
12693         switch (Intrinsic->getIntrinsicID()) {
12694         default:
12695           Result = false;
12696           break;
12697         case Intrinsic::amdgcn_end_cf:
12698         case Intrinsic::amdgcn_loop:
12699           Result = true;
12700           break;
12701         }
12702       }
12703     } else {
12704       Result = hasCFUser(U, Visited, WaveSize);
12705     }
12706     if (Result)
12707       break;
12708   }
12709   return Result;
12710 }
12711 
12712 bool SITargetLowering::requiresUniformRegister(MachineFunction &MF,
12713                                                const Value *V) const {
12714   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
12715     if (CI->isInlineAsm()) {
12716       // FIXME: This cannot give a correct answer. This should only trigger in
12717       // the case where inline asm returns mixed SGPR and VGPR results, used
12718       // outside the defining block. We don't have a specific result to
12719       // consider, so this assumes if any value is SGPR, the overall register
12720       // also needs to be SGPR.
12721       const SIRegisterInfo *SIRI = Subtarget->getRegisterInfo();
12722       TargetLowering::AsmOperandInfoVector TargetConstraints = ParseConstraints(
12723           MF.getDataLayout(), Subtarget->getRegisterInfo(), *CI);
12724       for (auto &TC : TargetConstraints) {
12725         if (TC.Type == InlineAsm::isOutput) {
12726           ComputeConstraintToUse(TC, SDValue());
12727           const TargetRegisterClass *RC = getRegForInlineAsmConstraint(
12728               SIRI, TC.ConstraintCode, TC.ConstraintVT).second;
12729           if (RC && SIRI->isSGPRClass(RC))
12730             return true;
12731         }
12732       }
12733     }
12734   }
12735   SmallPtrSet<const Value *, 16> Visited;
12736   return hasCFUser(V, Visited, Subtarget->getWavefrontSize());
12737 }
12738 
12739 std::pair<InstructionCost, MVT>
12740 SITargetLowering::getTypeLegalizationCost(const DataLayout &DL,
12741                                           Type *Ty) const {
12742   std::pair<InstructionCost, MVT> Cost =
12743       TargetLoweringBase::getTypeLegalizationCost(DL, Ty);
12744   auto Size = DL.getTypeSizeInBits(Ty);
12745   // Maximum load or store can handle 8 dwords for scalar and 4 for
12746   // vector ALU. Let's assume anything above 8 dwords is expensive
12747   // even if legal.
12748   if (Size <= 256)
12749     return Cost;
12750 
12751   Cost.first += (Size + 255) / 256;
12752   return Cost;
12753 }
12754 
12755 bool SITargetLowering::hasMemSDNodeUser(SDNode *N) const {
12756   SDNode::use_iterator I = N->use_begin(), E = N->use_end();
12757   for (; I != E; ++I) {
12758     if (MemSDNode *M = dyn_cast<MemSDNode>(*I)) {
12759       if (getBasePtrIndex(M) == I.getOperandNo())
12760         return true;
12761     }
12762   }
12763   return false;
12764 }
12765 
12766 bool SITargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0,
12767                                            SDValue N1) const {
12768   if (!N0.hasOneUse())
12769     return false;
12770   // Take care of the opportunity to keep N0 uniform
12771   if (N0->isDivergent() || !N1->isDivergent())
12772     return true;
12773   // Check if we have a good chance to form the memory access pattern with the
12774   // base and offset
12775   return (DAG.isBaseWithConstantOffset(N0) &&
12776           hasMemSDNodeUser(*N0->use_begin()));
12777 }
12778 
12779 MachineMemOperand::Flags
12780 SITargetLowering::getTargetMMOFlags(const Instruction &I) const {
12781   // Propagate metadata set by AMDGPUAnnotateUniformValues to the MMO of a load.
12782   if (I.getMetadata("amdgpu.noclobber"))
12783     return MONoClobber;
12784   return MachineMemOperand::MONone;
12785 }
12786