1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief Custom DAG lowering for SI
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifdef _MSC_VER
16 // Provide M_PI.
17 #define _USE_MATH_DEFINES
18 #endif
19 
20 #include "SIISelLowering.h"
21 #include "AMDGPU.h"
22 #include "AMDGPUIntrinsicInfo.h"
23 #include "AMDGPUSubtarget.h"
24 #include "AMDGPUTargetMachine.h"
25 #include "SIDefines.h"
26 #include "SIInstrInfo.h"
27 #include "SIMachineFunctionInfo.h"
28 #include "SIRegisterInfo.h"
29 #include "Utils/AMDGPUBaseInfo.h"
30 #include "llvm/ADT/APFloat.h"
31 #include "llvm/ADT/APInt.h"
32 #include "llvm/ADT/ArrayRef.h"
33 #include "llvm/ADT/BitVector.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringRef.h"
37 #include "llvm/ADT/StringSwitch.h"
38 #include "llvm/ADT/Twine.h"
39 #include "llvm/CodeGen/Analysis.h"
40 #include "llvm/CodeGen/CallingConvLower.h"
41 #include "llvm/CodeGen/DAGCombine.h"
42 #include "llvm/CodeGen/ISDOpcodes.h"
43 #include "llvm/CodeGen/MachineBasicBlock.h"
44 #include "llvm/CodeGen/MachineFrameInfo.h"
45 #include "llvm/CodeGen/MachineFunction.h"
46 #include "llvm/CodeGen/MachineInstr.h"
47 #include "llvm/CodeGen/MachineInstrBuilder.h"
48 #include "llvm/CodeGen/MachineMemOperand.h"
49 #include "llvm/CodeGen/MachineModuleInfo.h"
50 #include "llvm/CodeGen/MachineOperand.h"
51 #include "llvm/CodeGen/MachineRegisterInfo.h"
52 #include "llvm/CodeGen/MachineValueType.h"
53 #include "llvm/CodeGen/SelectionDAG.h"
54 #include "llvm/CodeGen/SelectionDAGNodes.h"
55 #include "llvm/CodeGen/TargetCallingConv.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/Constants.h"
59 #include "llvm/IR/DataLayout.h"
60 #include "llvm/IR/DebugLoc.h"
61 #include "llvm/IR/DerivedTypes.h"
62 #include "llvm/IR/DiagnosticInfo.h"
63 #include "llvm/IR/Function.h"
64 #include "llvm/IR/GlobalValue.h"
65 #include "llvm/IR/InstrTypes.h"
66 #include "llvm/IR/Instruction.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/IntrinsicInst.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/Support/Casting.h"
71 #include "llvm/Support/CodeGen.h"
72 #include "llvm/Support/CommandLine.h"
73 #include "llvm/Support/Compiler.h"
74 #include "llvm/Support/ErrorHandling.h"
75 #include "llvm/Support/KnownBits.h"
76 #include "llvm/Support/MathExtras.h"
77 #include "llvm/Target/TargetOptions.h"
78 #include <cassert>
79 #include <cmath>
80 #include <cstdint>
81 #include <iterator>
82 #include <tuple>
83 #include <utility>
84 #include <vector>
85 
86 using namespace llvm;
87 
88 #define DEBUG_TYPE "si-lower"
89 
90 STATISTIC(NumTailCalls, "Number of tail calls");
91 
92 static cl::opt<bool> EnableVGPRIndexMode(
93   "amdgpu-vgpr-index-mode",
94   cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
95   cl::init(false));
96 
97 static cl::opt<unsigned> AssumeFrameIndexHighZeroBits(
98   "amdgpu-frame-index-zero-bits",
99   cl::desc("High bits of frame index assumed to be zero"),
100   cl::init(5),
101   cl::ReallyHidden);
102 
103 static unsigned findFirstFreeSGPR(CCState &CCInfo) {
104   unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
105   for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
106     if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
107       return AMDGPU::SGPR0 + Reg;
108     }
109   }
110   llvm_unreachable("Cannot allocate sgpr");
111 }
112 
113 SITargetLowering::SITargetLowering(const TargetMachine &TM,
114                                    const SISubtarget &STI)
115     : AMDGPUTargetLowering(TM, STI) {
116   addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
117   addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
118 
119   addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
120   addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
121 
122   addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
123   addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
124   addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
125 
126   addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
127   addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
128 
129   addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
130   addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
131 
132   addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
133   addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
134 
135   addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
136   addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
137 
138   if (Subtarget->has16BitInsts()) {
139     addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
140     addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
141   }
142 
143   if (Subtarget->hasVOP3PInsts()) {
144     addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
145     addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
146   }
147 
148   computeRegisterProperties(STI.getRegisterInfo());
149 
150   // We need to custom lower vector stores from local memory
151   setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
152   setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
153   setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
154   setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
155   setOperationAction(ISD::LOAD, MVT::i1, Custom);
156 
157   setOperationAction(ISD::STORE, MVT::v2i32, Custom);
158   setOperationAction(ISD::STORE, MVT::v4i32, Custom);
159   setOperationAction(ISD::STORE, MVT::v8i32, Custom);
160   setOperationAction(ISD::STORE, MVT::v16i32, Custom);
161   setOperationAction(ISD::STORE, MVT::i1, Custom);
162 
163   setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
164   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
165   setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
166   setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
167   setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
168   setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
169   setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
170   setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
171   setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
172   setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
173 
174   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
175   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
176   setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand);
177 
178   setOperationAction(ISD::SELECT, MVT::i1, Promote);
179   setOperationAction(ISD::SELECT, MVT::i64, Custom);
180   setOperationAction(ISD::SELECT, MVT::f64, Promote);
181   AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
182 
183   setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
184   setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
185   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
186   setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
187   setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
188 
189   setOperationAction(ISD::SETCC, MVT::i1, Promote);
190   setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
191   setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
192   AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
193 
194   setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
195   setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
196 
197   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
198   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
199   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
200   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
201   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
202   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
203   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
204 
205   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
206   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
207   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
208   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
209 
210   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
211   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
212   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
213 
214   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
215   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
216   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
217   setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
218 
219   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
220   setOperationAction(ISD::BR_CC, MVT::i1, Expand);
221   setOperationAction(ISD::BR_CC, MVT::i32, Expand);
222   setOperationAction(ISD::BR_CC, MVT::i64, Expand);
223   setOperationAction(ISD::BR_CC, MVT::f32, Expand);
224   setOperationAction(ISD::BR_CC, MVT::f64, Expand);
225 
226   setOperationAction(ISD::UADDO, MVT::i32, Legal);
227   setOperationAction(ISD::USUBO, MVT::i32, Legal);
228 
229   setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
230   setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
231 
232 #if 0
233   setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
234   setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
235 #endif
236 
237   //setOperationAction(ISD::ADDC, MVT::i64, Expand);
238   //setOperationAction(ISD::SUBC, MVT::i64, Expand);
239 
240   // We only support LOAD/STORE and vector manipulation ops for vectors
241   // with > 4 elements.
242   for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
243         MVT::v2i64, MVT::v2f64}) {
244     for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
245       switch (Op) {
246       case ISD::LOAD:
247       case ISD::STORE:
248       case ISD::BUILD_VECTOR:
249       case ISD::BITCAST:
250       case ISD::EXTRACT_VECTOR_ELT:
251       case ISD::INSERT_VECTOR_ELT:
252       case ISD::INSERT_SUBVECTOR:
253       case ISD::EXTRACT_SUBVECTOR:
254       case ISD::SCALAR_TO_VECTOR:
255         break;
256       case ISD::CONCAT_VECTORS:
257         setOperationAction(Op, VT, Custom);
258         break;
259       default:
260         setOperationAction(Op, VT, Expand);
261         break;
262       }
263     }
264   }
265 
266   // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
267   // is expanded to avoid having two separate loops in case the index is a VGPR.
268 
269   // Most operations are naturally 32-bit vector operations. We only support
270   // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
271   for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
272     setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
273     AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
274 
275     setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
276     AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
277 
278     setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
279     AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
280 
281     setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
282     AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
283   }
284 
285   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
286   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
287   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
288   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
289 
290   // Avoid stack access for these.
291   // TODO: Generalize to more vector types.
292   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
293   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
294   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
295   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
296 
297   // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
298   // and output demarshalling
299   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
300   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
301 
302   // We can't return success/failure, only the old value,
303   // let LLVM add the comparison
304   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
305   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
306 
307   if (getSubtarget()->hasFlatAddressSpace()) {
308     setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
309     setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
310   }
311 
312   setOperationAction(ISD::BSWAP, MVT::i32, Legal);
313   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
314 
315   // On SI this is s_memtime and s_memrealtime on VI.
316   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
317   setOperationAction(ISD::TRAP, MVT::Other, Custom);
318   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
319 
320   setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
321   setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
322 
323   if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) {
324     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
325     setOperationAction(ISD::FCEIL, MVT::f64, Legal);
326     setOperationAction(ISD::FRINT, MVT::f64, Legal);
327   }
328 
329   setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
330 
331   setOperationAction(ISD::FSIN, MVT::f32, Custom);
332   setOperationAction(ISD::FCOS, MVT::f32, Custom);
333   setOperationAction(ISD::FDIV, MVT::f32, Custom);
334   setOperationAction(ISD::FDIV, MVT::f64, Custom);
335 
336   if (Subtarget->has16BitInsts()) {
337     setOperationAction(ISD::Constant, MVT::i16, Legal);
338 
339     setOperationAction(ISD::SMIN, MVT::i16, Legal);
340     setOperationAction(ISD::SMAX, MVT::i16, Legal);
341 
342     setOperationAction(ISD::UMIN, MVT::i16, Legal);
343     setOperationAction(ISD::UMAX, MVT::i16, Legal);
344 
345     setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
346     AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
347 
348     setOperationAction(ISD::ROTR, MVT::i16, Promote);
349     setOperationAction(ISD::ROTL, MVT::i16, Promote);
350 
351     setOperationAction(ISD::SDIV, MVT::i16, Promote);
352     setOperationAction(ISD::UDIV, MVT::i16, Promote);
353     setOperationAction(ISD::SREM, MVT::i16, Promote);
354     setOperationAction(ISD::UREM, MVT::i16, Promote);
355 
356     setOperationAction(ISD::BSWAP, MVT::i16, Promote);
357     setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
358 
359     setOperationAction(ISD::CTTZ, MVT::i16, Promote);
360     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
361     setOperationAction(ISD::CTLZ, MVT::i16, Promote);
362     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
363 
364     setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
365 
366     setOperationAction(ISD::BR_CC, MVT::i16, Expand);
367 
368     setOperationAction(ISD::LOAD, MVT::i16, Custom);
369 
370     setTruncStoreAction(MVT::i64, MVT::i16, Expand);
371 
372     setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
373     AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
374     setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
375     AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
376 
377     setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
378     setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
379     setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
380     setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
381 
382     // F16 - Constant Actions.
383     setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
384 
385     // F16 - Load/Store Actions.
386     setOperationAction(ISD::LOAD, MVT::f16, Promote);
387     AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
388     setOperationAction(ISD::STORE, MVT::f16, Promote);
389     AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
390 
391     // F16 - VOP1 Actions.
392     setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
393     setOperationAction(ISD::FCOS, MVT::f16, Promote);
394     setOperationAction(ISD::FSIN, MVT::f16, Promote);
395     setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
396     setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
397     setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
398     setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
399     setOperationAction(ISD::FROUND, MVT::f16, Custom);
400 
401     // F16 - VOP2 Actions.
402     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
403     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
404     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
405     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
406     setOperationAction(ISD::FDIV, MVT::f16, Custom);
407 
408     // F16 - VOP3 Actions.
409     setOperationAction(ISD::FMA, MVT::f16, Legal);
410     if (!Subtarget->hasFP16Denormals())
411       setOperationAction(ISD::FMAD, MVT::f16, Legal);
412   }
413 
414   if (Subtarget->hasVOP3PInsts()) {
415     for (MVT VT : {MVT::v2i16, MVT::v2f16}) {
416       for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
417         switch (Op) {
418         case ISD::LOAD:
419         case ISD::STORE:
420         case ISD::BUILD_VECTOR:
421         case ISD::BITCAST:
422         case ISD::EXTRACT_VECTOR_ELT:
423         case ISD::INSERT_VECTOR_ELT:
424         case ISD::INSERT_SUBVECTOR:
425         case ISD::EXTRACT_SUBVECTOR:
426         case ISD::SCALAR_TO_VECTOR:
427           break;
428         case ISD::CONCAT_VECTORS:
429           setOperationAction(Op, VT, Custom);
430           break;
431         default:
432           setOperationAction(Op, VT, Expand);
433           break;
434         }
435       }
436     }
437 
438     // XXX - Do these do anything? Vector constants turn into build_vector.
439     setOperationAction(ISD::Constant, MVT::v2i16, Legal);
440     setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
441 
442     setOperationAction(ISD::STORE, MVT::v2i16, Promote);
443     AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
444     setOperationAction(ISD::STORE, MVT::v2f16, Promote);
445     AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
446 
447     setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
448     AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
449     setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
450     AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
451 
452     setOperationAction(ISD::AND, MVT::v2i16, Promote);
453     AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
454     setOperationAction(ISD::OR, MVT::v2i16, Promote);
455     AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
456     setOperationAction(ISD::XOR, MVT::v2i16, Promote);
457     AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
458     setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
459     AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
460     setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
461     AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
462 
463     setOperationAction(ISD::ADD, MVT::v2i16, Legal);
464     setOperationAction(ISD::SUB, MVT::v2i16, Legal);
465     setOperationAction(ISD::MUL, MVT::v2i16, Legal);
466     setOperationAction(ISD::SHL, MVT::v2i16, Legal);
467     setOperationAction(ISD::SRL, MVT::v2i16, Legal);
468     setOperationAction(ISD::SRA, MVT::v2i16, Legal);
469     setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
470     setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
471     setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
472     setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
473 
474     setOperationAction(ISD::FADD, MVT::v2f16, Legal);
475     setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
476     setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
477     setOperationAction(ISD::FMA, MVT::v2f16, Legal);
478     setOperationAction(ISD::FMINNUM, MVT::v2f16, Legal);
479     setOperationAction(ISD::FMAXNUM, MVT::v2f16, Legal);
480 
481     // This isn't really legal, but this avoids the legalizer unrolling it (and
482     // allows matching fneg (fabs x) patterns)
483     setOperationAction(ISD::FABS, MVT::v2f16, Legal);
484 
485     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
486     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
487 
488     setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
489     setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
490     setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
491     setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
492   } else {
493     setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
494     setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
495   }
496 
497   for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
498     setOperationAction(ISD::SELECT, VT, Custom);
499   }
500 
501   setTargetDAGCombine(ISD::ADD);
502   setTargetDAGCombine(ISD::ADDCARRY);
503   setTargetDAGCombine(ISD::SUB);
504   setTargetDAGCombine(ISD::SUBCARRY);
505   setTargetDAGCombine(ISD::FADD);
506   setTargetDAGCombine(ISD::FSUB);
507   setTargetDAGCombine(ISD::FMINNUM);
508   setTargetDAGCombine(ISD::FMAXNUM);
509   setTargetDAGCombine(ISD::SMIN);
510   setTargetDAGCombine(ISD::SMAX);
511   setTargetDAGCombine(ISD::UMIN);
512   setTargetDAGCombine(ISD::UMAX);
513   setTargetDAGCombine(ISD::SETCC);
514   setTargetDAGCombine(ISD::AND);
515   setTargetDAGCombine(ISD::OR);
516   setTargetDAGCombine(ISD::XOR);
517   setTargetDAGCombine(ISD::SINT_TO_FP);
518   setTargetDAGCombine(ISD::UINT_TO_FP);
519   setTargetDAGCombine(ISD::FCANONICALIZE);
520   setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
521   setTargetDAGCombine(ISD::ZERO_EXTEND);
522   setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
523   setTargetDAGCombine(ISD::BUILD_VECTOR);
524 
525   // All memory operations. Some folding on the pointer operand is done to help
526   // matching the constant offsets in the addressing modes.
527   setTargetDAGCombine(ISD::LOAD);
528   setTargetDAGCombine(ISD::STORE);
529   setTargetDAGCombine(ISD::ATOMIC_LOAD);
530   setTargetDAGCombine(ISD::ATOMIC_STORE);
531   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
532   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
533   setTargetDAGCombine(ISD::ATOMIC_SWAP);
534   setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
535   setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
536   setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
537   setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
538   setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
539   setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
540   setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
541   setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
542   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
543   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
544 
545   setSchedulingPreference(Sched::RegPressure);
546 }
547 
548 const SISubtarget *SITargetLowering::getSubtarget() const {
549   return static_cast<const SISubtarget *>(Subtarget);
550 }
551 
552 //===----------------------------------------------------------------------===//
553 // TargetLowering queries
554 //===----------------------------------------------------------------------===//
555 
556 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
557   // SI has some legal vector types, but no legal vector operations. Say no
558   // shuffles are legal in order to prefer scalarizing some vector operations.
559   return false;
560 }
561 
562 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
563                                           const CallInst &CI,
564                                           MachineFunction &MF,
565                                           unsigned IntrID) const {
566   switch (IntrID) {
567   case Intrinsic::amdgcn_atomic_inc:
568   case Intrinsic::amdgcn_atomic_dec: {
569     Info.opc = ISD::INTRINSIC_W_CHAIN;
570     Info.memVT = MVT::getVT(CI.getType());
571     Info.ptrVal = CI.getOperand(0);
572     Info.align = 0;
573     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
574 
575     const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
576     if (!Vol || !Vol->isZero())
577       Info.flags |= MachineMemOperand::MOVolatile;
578 
579     return true;
580   }
581 
582   // Image load.
583   case Intrinsic::amdgcn_image_load:
584   case Intrinsic::amdgcn_image_load_mip:
585 
586   // Sample.
587   case Intrinsic::amdgcn_image_sample:
588   case Intrinsic::amdgcn_image_sample_cl:
589   case Intrinsic::amdgcn_image_sample_d:
590   case Intrinsic::amdgcn_image_sample_d_cl:
591   case Intrinsic::amdgcn_image_sample_l:
592   case Intrinsic::amdgcn_image_sample_b:
593   case Intrinsic::amdgcn_image_sample_b_cl:
594   case Intrinsic::amdgcn_image_sample_lz:
595   case Intrinsic::amdgcn_image_sample_cd:
596   case Intrinsic::amdgcn_image_sample_cd_cl:
597 
598     // Sample with comparison.
599   case Intrinsic::amdgcn_image_sample_c:
600   case Intrinsic::amdgcn_image_sample_c_cl:
601   case Intrinsic::amdgcn_image_sample_c_d:
602   case Intrinsic::amdgcn_image_sample_c_d_cl:
603   case Intrinsic::amdgcn_image_sample_c_l:
604   case Intrinsic::amdgcn_image_sample_c_b:
605   case Intrinsic::amdgcn_image_sample_c_b_cl:
606   case Intrinsic::amdgcn_image_sample_c_lz:
607   case Intrinsic::amdgcn_image_sample_c_cd:
608   case Intrinsic::amdgcn_image_sample_c_cd_cl:
609 
610     // Sample with offsets.
611   case Intrinsic::amdgcn_image_sample_o:
612   case Intrinsic::amdgcn_image_sample_cl_o:
613   case Intrinsic::amdgcn_image_sample_d_o:
614   case Intrinsic::amdgcn_image_sample_d_cl_o:
615   case Intrinsic::amdgcn_image_sample_l_o:
616   case Intrinsic::amdgcn_image_sample_b_o:
617   case Intrinsic::amdgcn_image_sample_b_cl_o:
618   case Intrinsic::amdgcn_image_sample_lz_o:
619   case Intrinsic::amdgcn_image_sample_cd_o:
620   case Intrinsic::amdgcn_image_sample_cd_cl_o:
621 
622     // Sample with comparison and offsets.
623   case Intrinsic::amdgcn_image_sample_c_o:
624   case Intrinsic::amdgcn_image_sample_c_cl_o:
625   case Intrinsic::amdgcn_image_sample_c_d_o:
626   case Intrinsic::amdgcn_image_sample_c_d_cl_o:
627   case Intrinsic::amdgcn_image_sample_c_l_o:
628   case Intrinsic::amdgcn_image_sample_c_b_o:
629   case Intrinsic::amdgcn_image_sample_c_b_cl_o:
630   case Intrinsic::amdgcn_image_sample_c_lz_o:
631   case Intrinsic::amdgcn_image_sample_c_cd_o:
632   case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
633 
634     // Basic gather4
635   case Intrinsic::amdgcn_image_gather4:
636   case Intrinsic::amdgcn_image_gather4_cl:
637   case Intrinsic::amdgcn_image_gather4_l:
638   case Intrinsic::amdgcn_image_gather4_b:
639   case Intrinsic::amdgcn_image_gather4_b_cl:
640   case Intrinsic::amdgcn_image_gather4_lz:
641 
642     // Gather4 with comparison
643   case Intrinsic::amdgcn_image_gather4_c:
644   case Intrinsic::amdgcn_image_gather4_c_cl:
645   case Intrinsic::amdgcn_image_gather4_c_l:
646   case Intrinsic::amdgcn_image_gather4_c_b:
647   case Intrinsic::amdgcn_image_gather4_c_b_cl:
648   case Intrinsic::amdgcn_image_gather4_c_lz:
649 
650     // Gather4 with offsets
651   case Intrinsic::amdgcn_image_gather4_o:
652   case Intrinsic::amdgcn_image_gather4_cl_o:
653   case Intrinsic::amdgcn_image_gather4_l_o:
654   case Intrinsic::amdgcn_image_gather4_b_o:
655   case Intrinsic::amdgcn_image_gather4_b_cl_o:
656   case Intrinsic::amdgcn_image_gather4_lz_o:
657 
658     // Gather4 with comparison and offsets
659   case Intrinsic::amdgcn_image_gather4_c_o:
660   case Intrinsic::amdgcn_image_gather4_c_cl_o:
661   case Intrinsic::amdgcn_image_gather4_c_l_o:
662   case Intrinsic::amdgcn_image_gather4_c_b_o:
663   case Intrinsic::amdgcn_image_gather4_c_b_cl_o:
664   case Intrinsic::amdgcn_image_gather4_c_lz_o: {
665     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
666     Info.opc = ISD::INTRINSIC_W_CHAIN;
667     Info.memVT = MVT::getVT(CI.getType());
668     Info.ptrVal = MFI->getImagePSV(
669       *MF.getSubtarget<SISubtarget>().getInstrInfo(),
670       CI.getArgOperand(1));
671     Info.align = 0;
672     Info.flags = MachineMemOperand::MOLoad |
673                  MachineMemOperand::MODereferenceable;
674     return true;
675   }
676   case Intrinsic::amdgcn_image_store:
677   case Intrinsic::amdgcn_image_store_mip: {
678     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
679     Info.opc = ISD::INTRINSIC_VOID;
680     Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
681     Info.ptrVal = MFI->getImagePSV(
682       *MF.getSubtarget<SISubtarget>().getInstrInfo(),
683       CI.getArgOperand(2));
684     Info.flags = MachineMemOperand::MOStore |
685                  MachineMemOperand::MODereferenceable;
686     Info.align = 0;
687     return true;
688   }
689   case Intrinsic::amdgcn_image_atomic_swap:
690   case Intrinsic::amdgcn_image_atomic_add:
691   case Intrinsic::amdgcn_image_atomic_sub:
692   case Intrinsic::amdgcn_image_atomic_smin:
693   case Intrinsic::amdgcn_image_atomic_umin:
694   case Intrinsic::amdgcn_image_atomic_smax:
695   case Intrinsic::amdgcn_image_atomic_umax:
696   case Intrinsic::amdgcn_image_atomic_and:
697   case Intrinsic::amdgcn_image_atomic_or:
698   case Intrinsic::amdgcn_image_atomic_xor:
699   case Intrinsic::amdgcn_image_atomic_inc:
700   case Intrinsic::amdgcn_image_atomic_dec: {
701     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
702     Info.opc = ISD::INTRINSIC_W_CHAIN;
703     Info.memVT = MVT::getVT(CI.getType());
704     Info.ptrVal = MFI->getImagePSV(
705       *MF.getSubtarget<SISubtarget>().getInstrInfo(),
706       CI.getArgOperand(2));
707 
708     Info.flags = MachineMemOperand::MOLoad |
709                  MachineMemOperand::MOStore |
710                  MachineMemOperand::MODereferenceable;
711 
712     // XXX - Should this be volatile without known ordering?
713     Info.flags |= MachineMemOperand::MOVolatile;
714     return true;
715   }
716   case Intrinsic::amdgcn_image_atomic_cmpswap: {
717     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
718     Info.opc = ISD::INTRINSIC_W_CHAIN;
719     Info.memVT = MVT::getVT(CI.getType());
720     Info.ptrVal = MFI->getImagePSV(
721       *MF.getSubtarget<SISubtarget>().getInstrInfo(),
722       CI.getArgOperand(3));
723 
724     Info.flags = MachineMemOperand::MOLoad |
725                  MachineMemOperand::MOStore |
726                  MachineMemOperand::MODereferenceable;
727 
728     // XXX - Should this be volatile without known ordering?
729     Info.flags |= MachineMemOperand::MOVolatile;
730     return true;
731   }
732   case Intrinsic::amdgcn_tbuffer_load:
733   case Intrinsic::amdgcn_buffer_load:
734   case Intrinsic::amdgcn_buffer_load_format: {
735     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
736     Info.opc = ISD::INTRINSIC_W_CHAIN;
737     Info.ptrVal = MFI->getBufferPSV(
738       *MF.getSubtarget<SISubtarget>().getInstrInfo(),
739       CI.getArgOperand(0));
740     Info.memVT = MVT::getVT(CI.getType());
741     Info.flags = MachineMemOperand::MOLoad |
742                  MachineMemOperand::MODereferenceable;
743 
744     // There is a constant offset component, but there are additional register
745     // offsets which could break AA if we set the offset to anything non-0.
746     return true;
747   }
748   case Intrinsic::amdgcn_tbuffer_store:
749   case Intrinsic::amdgcn_buffer_store:
750   case Intrinsic::amdgcn_buffer_store_format: {
751     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
752     Info.opc = ISD::INTRINSIC_VOID;
753     Info.ptrVal = MFI->getBufferPSV(
754       *MF.getSubtarget<SISubtarget>().getInstrInfo(),
755       CI.getArgOperand(1));
756     Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
757     Info.flags = MachineMemOperand::MOStore |
758                  MachineMemOperand::MODereferenceable;
759     return true;
760   }
761   case Intrinsic::amdgcn_buffer_atomic_swap:
762   case Intrinsic::amdgcn_buffer_atomic_add:
763   case Intrinsic::amdgcn_buffer_atomic_sub:
764   case Intrinsic::amdgcn_buffer_atomic_smin:
765   case Intrinsic::amdgcn_buffer_atomic_umin:
766   case Intrinsic::amdgcn_buffer_atomic_smax:
767   case Intrinsic::amdgcn_buffer_atomic_umax:
768   case Intrinsic::amdgcn_buffer_atomic_and:
769   case Intrinsic::amdgcn_buffer_atomic_or:
770   case Intrinsic::amdgcn_buffer_atomic_xor: {
771     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
772     Info.opc = ISD::INTRINSIC_W_CHAIN;
773     Info.ptrVal = MFI->getBufferPSV(
774       *MF.getSubtarget<SISubtarget>().getInstrInfo(),
775       CI.getArgOperand(1));
776     Info.memVT = MVT::getVT(CI.getType());
777     Info.flags = MachineMemOperand::MOLoad |
778                  MachineMemOperand::MOStore |
779                  MachineMemOperand::MODereferenceable |
780                  MachineMemOperand::MOVolatile;
781     return true;
782   }
783   case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
784     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
785     Info.opc = ISD::INTRINSIC_W_CHAIN;
786     Info.ptrVal = MFI->getBufferPSV(
787       *MF.getSubtarget<SISubtarget>().getInstrInfo(),
788       CI.getArgOperand(2));
789     Info.memVT = MVT::getVT(CI.getType());
790     Info.flags = MachineMemOperand::MOLoad |
791                  MachineMemOperand::MOStore |
792                  MachineMemOperand::MODereferenceable |
793                  MachineMemOperand::MOVolatile;
794     return true;
795   }
796   default:
797     return false;
798   }
799 }
800 
801 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
802                                             SmallVectorImpl<Value*> &Ops,
803                                             Type *&AccessTy) const {
804   switch (II->getIntrinsicID()) {
805   case Intrinsic::amdgcn_atomic_inc:
806   case Intrinsic::amdgcn_atomic_dec: {
807     Value *Ptr = II->getArgOperand(0);
808     AccessTy = II->getType();
809     Ops.push_back(Ptr);
810     return true;
811   }
812   default:
813     return false;
814   }
815 }
816 
817 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
818   if (!Subtarget->hasFlatInstOffsets()) {
819     // Flat instructions do not have offsets, and only have the register
820     // address.
821     return AM.BaseOffs == 0 && AM.Scale == 0;
822   }
823 
824   // GFX9 added a 13-bit signed offset. When using regular flat instructions,
825   // the sign bit is ignored and is treated as a 12-bit unsigned offset.
826 
827   // Just r + i
828   return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
829 }
830 
831 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
832   if (Subtarget->hasFlatGlobalInsts())
833     return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
834 
835   if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
836       // Assume the we will use FLAT for all global memory accesses
837       // on VI.
838       // FIXME: This assumption is currently wrong.  On VI we still use
839       // MUBUF instructions for the r + i addressing mode.  As currently
840       // implemented, the MUBUF instructions only work on buffer < 4GB.
841       // It may be possible to support > 4GB buffers with MUBUF instructions,
842       // by setting the stride value in the resource descriptor which would
843       // increase the size limit to (stride * 4GB).  However, this is risky,
844       // because it has never been validated.
845     return isLegalFlatAddressingMode(AM);
846   }
847 
848   return isLegalMUBUFAddressingMode(AM);
849 }
850 
851 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
852   // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
853   // additionally can do r + r + i with addr64. 32-bit has more addressing
854   // mode options. Depending on the resource constant, it can also do
855   // (i64 r0) + (i32 r1) * (i14 i).
856   //
857   // Private arrays end up using a scratch buffer most of the time, so also
858   // assume those use MUBUF instructions. Scratch loads / stores are currently
859   // implemented as mubuf instructions with offen bit set, so slightly
860   // different than the normal addr64.
861   if (!isUInt<12>(AM.BaseOffs))
862     return false;
863 
864   // FIXME: Since we can split immediate into soffset and immediate offset,
865   // would it make sense to allow any immediate?
866 
867   switch (AM.Scale) {
868   case 0: // r + i or just i, depending on HasBaseReg.
869     return true;
870   case 1:
871     return true; // We have r + r or r + i.
872   case 2:
873     if (AM.HasBaseReg) {
874       // Reject 2 * r + r.
875       return false;
876     }
877 
878     // Allow 2 * r as r + r
879     // Or  2 * r + i is allowed as r + r + i.
880     return true;
881   default: // Don't allow n * r
882     return false;
883   }
884 }
885 
886 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
887                                              const AddrMode &AM, Type *Ty,
888                                              unsigned AS, Instruction *I) const {
889   // No global is ever allowed as a base.
890   if (AM.BaseGV)
891     return false;
892 
893   if (AS == AMDGPUASI.GLOBAL_ADDRESS)
894     return isLegalGlobalAddressingMode(AM);
895 
896   if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
897     // If the offset isn't a multiple of 4, it probably isn't going to be
898     // correctly aligned.
899     // FIXME: Can we get the real alignment here?
900     if (AM.BaseOffs % 4 != 0)
901       return isLegalMUBUFAddressingMode(AM);
902 
903     // There are no SMRD extloads, so if we have to do a small type access we
904     // will use a MUBUF load.
905     // FIXME?: We also need to do this if unaligned, but we don't know the
906     // alignment here.
907     if (DL.getTypeStoreSize(Ty) < 4)
908       return isLegalGlobalAddressingMode(AM);
909 
910     if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
911       // SMRD instructions have an 8-bit, dword offset on SI.
912       if (!isUInt<8>(AM.BaseOffs / 4))
913         return false;
914     } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) {
915       // On CI+, this can also be a 32-bit literal constant offset. If it fits
916       // in 8-bits, it can use a smaller encoding.
917       if (!isUInt<32>(AM.BaseOffs / 4))
918         return false;
919     } else if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
920       // On VI, these use the SMEM format and the offset is 20-bit in bytes.
921       if (!isUInt<20>(AM.BaseOffs))
922         return false;
923     } else
924       llvm_unreachable("unhandled generation");
925 
926     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
927       return true;
928 
929     if (AM.Scale == 1 && AM.HasBaseReg)
930       return true;
931 
932     return false;
933 
934   } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
935     return isLegalMUBUFAddressingMode(AM);
936   } else if (AS == AMDGPUASI.LOCAL_ADDRESS ||
937              AS == AMDGPUASI.REGION_ADDRESS) {
938     // Basic, single offset DS instructions allow a 16-bit unsigned immediate
939     // field.
940     // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
941     // an 8-bit dword offset but we don't know the alignment here.
942     if (!isUInt<16>(AM.BaseOffs))
943       return false;
944 
945     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
946       return true;
947 
948     if (AM.Scale == 1 && AM.HasBaseReg)
949       return true;
950 
951     return false;
952   } else if (AS == AMDGPUASI.FLAT_ADDRESS ||
953              AS == AMDGPUASI.UNKNOWN_ADDRESS_SPACE) {
954     // For an unknown address space, this usually means that this is for some
955     // reason being used for pure arithmetic, and not based on some addressing
956     // computation. We don't have instructions that compute pointers with any
957     // addressing modes, so treat them as having no offset like flat
958     // instructions.
959     return isLegalFlatAddressingMode(AM);
960   } else {
961     llvm_unreachable("unhandled address space");
962   }
963 }
964 
965 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
966                                         const SelectionDAG &DAG) const {
967   if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) {
968     return (MemVT.getSizeInBits() <= 4 * 32);
969   } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
970     unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
971     return (MemVT.getSizeInBits() <= MaxPrivateBits);
972   } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
973     return (MemVT.getSizeInBits() <= 2 * 32);
974   }
975   return true;
976 }
977 
978 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
979                                                       unsigned AddrSpace,
980                                                       unsigned Align,
981                                                       bool *IsFast) const {
982   if (IsFast)
983     *IsFast = false;
984 
985   // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
986   // which isn't a simple VT.
987   // Until MVT is extended to handle this, simply check for the size and
988   // rely on the condition below: allow accesses if the size is a multiple of 4.
989   if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
990                            VT.getStoreSize() > 16)) {
991     return false;
992   }
993 
994   if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS ||
995       AddrSpace == AMDGPUASI.REGION_ADDRESS) {
996     // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
997     // aligned, 8 byte access in a single operation using ds_read2/write2_b32
998     // with adjacent offsets.
999     bool AlignedBy4 = (Align % 4 == 0);
1000     if (IsFast)
1001       *IsFast = AlignedBy4;
1002 
1003     return AlignedBy4;
1004   }
1005 
1006   // FIXME: We have to be conservative here and assume that flat operations
1007   // will access scratch.  If we had access to the IR function, then we
1008   // could determine if any private memory was used in the function.
1009   if (!Subtarget->hasUnalignedScratchAccess() &&
1010       (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS ||
1011        AddrSpace == AMDGPUASI.FLAT_ADDRESS)) {
1012     return false;
1013   }
1014 
1015   if (Subtarget->hasUnalignedBufferAccess()) {
1016     // If we have an uniform constant load, it still requires using a slow
1017     // buffer instruction if unaligned.
1018     if (IsFast) {
1019       *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS) ?
1020         (Align % 4 == 0) : true;
1021     }
1022 
1023     return true;
1024   }
1025 
1026   // Smaller than dword value must be aligned.
1027   if (VT.bitsLT(MVT::i32))
1028     return false;
1029 
1030   // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1031   // byte-address are ignored, thus forcing Dword alignment.
1032   // This applies to private, global, and constant memory.
1033   if (IsFast)
1034     *IsFast = true;
1035 
1036   return VT.bitsGT(MVT::i32) && Align % 4 == 0;
1037 }
1038 
1039 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
1040                                           unsigned SrcAlign, bool IsMemset,
1041                                           bool ZeroMemset,
1042                                           bool MemcpyStrSrc,
1043                                           MachineFunction &MF) const {
1044   // FIXME: Should account for address space here.
1045 
1046   // The default fallback uses the private pointer size as a guess for a type to
1047   // use. Make sure we switch these to 64-bit accesses.
1048 
1049   if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
1050     return MVT::v4i32;
1051 
1052   if (Size >= 8 && DstAlign >= 4)
1053     return MVT::v2i32;
1054 
1055   // Use the default.
1056   return MVT::Other;
1057 }
1058 
1059 static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) {
1060   return AS == AMDGPUASI.GLOBAL_ADDRESS ||
1061          AS == AMDGPUASI.FLAT_ADDRESS ||
1062          AS == AMDGPUASI.CONSTANT_ADDRESS;
1063 }
1064 
1065 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1066                                            unsigned DestAS) const {
1067   return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) &&
1068          isFlatGlobalAddrSpace(DestAS, AMDGPUASI);
1069 }
1070 
1071 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
1072   const MemSDNode *MemNode = cast<MemSDNode>(N);
1073   const Value *Ptr = MemNode->getMemOperand()->getValue();
1074   const Instruction *I = dyn_cast<Instruction>(Ptr);
1075   return I && I->getMetadata("amdgpu.noclobber");
1076 }
1077 
1078 bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
1079                                             unsigned DestAS) const {
1080   // Flat -> private/local is a simple truncate.
1081   // Flat -> global is no-op
1082   if (SrcAS == AMDGPUASI.FLAT_ADDRESS)
1083     return true;
1084 
1085   return isNoopAddrSpaceCast(SrcAS, DestAS);
1086 }
1087 
1088 bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1089   const MemSDNode *MemNode = cast<MemSDNode>(N);
1090 
1091   return AMDGPU::isUniformMMO(MemNode->getMemOperand());
1092 }
1093 
1094 TargetLoweringBase::LegalizeTypeAction
1095 SITargetLowering::getPreferredVectorAction(EVT VT) const {
1096   if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
1097     return TypeSplitVector;
1098 
1099   return TargetLoweringBase::getPreferredVectorAction(VT);
1100 }
1101 
1102 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1103                                                          Type *Ty) const {
1104   // FIXME: Could be smarter if called for vector constants.
1105   return true;
1106 }
1107 
1108 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
1109   if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1110     switch (Op) {
1111     case ISD::LOAD:
1112     case ISD::STORE:
1113 
1114     // These operations are done with 32-bit instructions anyway.
1115     case ISD::AND:
1116     case ISD::OR:
1117     case ISD::XOR:
1118     case ISD::SELECT:
1119       // TODO: Extensions?
1120       return true;
1121     default:
1122       return false;
1123     }
1124   }
1125 
1126   // SimplifySetCC uses this function to determine whether or not it should
1127   // create setcc with i1 operands.  We don't have instructions for i1 setcc.
1128   if (VT == MVT::i1 && Op == ISD::SETCC)
1129     return false;
1130 
1131   return TargetLowering::isTypeDesirableForOp(Op, VT);
1132 }
1133 
1134 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1135                                                    const SDLoc &SL,
1136                                                    SDValue Chain,
1137                                                    uint64_t Offset) const {
1138   const DataLayout &DL = DAG.getDataLayout();
1139   MachineFunction &MF = DAG.getMachineFunction();
1140   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1141 
1142   const ArgDescriptor *InputPtrReg;
1143   const TargetRegisterClass *RC;
1144 
1145   std::tie(InputPtrReg, RC)
1146     = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1147 
1148   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1149   MVT PtrVT = getPointerTy(DL, AMDGPUASI.CONSTANT_ADDRESS);
1150   SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
1151     MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1152 
1153   return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
1154                      DAG.getConstant(Offset, SL, PtrVT));
1155 }
1156 
1157 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1158                                             const SDLoc &SL) const {
1159   auto MFI = DAG.getMachineFunction().getInfo<SIMachineFunctionInfo>();
1160   uint64_t Offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT);
1161   return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1162 }
1163 
1164 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1165                                          const SDLoc &SL, SDValue Val,
1166                                          bool Signed,
1167                                          const ISD::InputArg *Arg) const {
1168   if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1169       VT.bitsLT(MemVT)) {
1170     unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1171     Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1172   }
1173 
1174   if (MemVT.isFloatingPoint())
1175     Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
1176   else if (Signed)
1177     Val = DAG.getSExtOrTrunc(Val, SL, VT);
1178   else
1179     Val = DAG.getZExtOrTrunc(Val, SL, VT);
1180 
1181   return Val;
1182 }
1183 
1184 SDValue SITargetLowering::lowerKernargMemParameter(
1185   SelectionDAG &DAG, EVT VT, EVT MemVT,
1186   const SDLoc &SL, SDValue Chain,
1187   uint64_t Offset, bool Signed,
1188   const ISD::InputArg *Arg) const {
1189   const DataLayout &DL = DAG.getDataLayout();
1190   Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
1191   PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
1192   MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1193 
1194   unsigned Align = DL.getABITypeAlignment(Ty);
1195 
1196   SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1197   SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
1198                              MachineMemOperand::MONonTemporal |
1199                              MachineMemOperand::MODereferenceable |
1200                              MachineMemOperand::MOInvariant);
1201 
1202   SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
1203   return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
1204 }
1205 
1206 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1207                                               const SDLoc &SL, SDValue Chain,
1208                                               const ISD::InputArg &Arg) const {
1209   MachineFunction &MF = DAG.getMachineFunction();
1210   MachineFrameInfo &MFI = MF.getFrameInfo();
1211 
1212   if (Arg.Flags.isByVal()) {
1213     unsigned Size = Arg.Flags.getByValSize();
1214     int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1215     return DAG.getFrameIndex(FrameIdx, MVT::i32);
1216   }
1217 
1218   unsigned ArgOffset = VA.getLocMemOffset();
1219   unsigned ArgSize = VA.getValVT().getStoreSize();
1220 
1221   int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1222 
1223   // Create load nodes to retrieve arguments from the stack.
1224   SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1225   SDValue ArgValue;
1226 
1227   // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1228   ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1229   MVT MemVT = VA.getValVT();
1230 
1231   switch (VA.getLocInfo()) {
1232   default:
1233     break;
1234   case CCValAssign::BCvt:
1235     MemVT = VA.getLocVT();
1236     break;
1237   case CCValAssign::SExt:
1238     ExtType = ISD::SEXTLOAD;
1239     break;
1240   case CCValAssign::ZExt:
1241     ExtType = ISD::ZEXTLOAD;
1242     break;
1243   case CCValAssign::AExt:
1244     ExtType = ISD::EXTLOAD;
1245     break;
1246   }
1247 
1248   ArgValue = DAG.getExtLoad(
1249     ExtType, SL, VA.getLocVT(), Chain, FIN,
1250     MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1251     MemVT);
1252   return ArgValue;
1253 }
1254 
1255 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1256   const SIMachineFunctionInfo &MFI,
1257   EVT VT,
1258   AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1259   const ArgDescriptor *Reg;
1260   const TargetRegisterClass *RC;
1261 
1262   std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1263   return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1264 }
1265 
1266 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1267                                    CallingConv::ID CallConv,
1268                                    ArrayRef<ISD::InputArg> Ins,
1269                                    BitVector &Skipped,
1270                                    FunctionType *FType,
1271                                    SIMachineFunctionInfo *Info) {
1272   for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1273     const ISD::InputArg &Arg = Ins[I];
1274 
1275     // First check if it's a PS input addr.
1276     if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() &&
1277         !Arg.Flags.isByVal() && PSInputNum <= 15) {
1278 
1279       if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) {
1280         // We can safely skip PS inputs.
1281         Skipped.set(I);
1282         ++PSInputNum;
1283         continue;
1284       }
1285 
1286       Info->markPSInputAllocated(PSInputNum);
1287       if (Arg.Used)
1288         Info->markPSInputEnabled(PSInputNum);
1289 
1290       ++PSInputNum;
1291     }
1292 
1293     // Second split vertices into their elements.
1294     if (Arg.VT.isVector()) {
1295       ISD::InputArg NewArg = Arg;
1296       NewArg.Flags.setSplit();
1297       NewArg.VT = Arg.VT.getVectorElementType();
1298 
1299       // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
1300       // three or five element vertex only needs three or five registers,
1301       // NOT four or eight.
1302       Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
1303       unsigned NumElements = ParamType->getVectorNumElements();
1304 
1305       for (unsigned J = 0; J != NumElements; ++J) {
1306         Splits.push_back(NewArg);
1307         NewArg.PartOffset += NewArg.VT.getStoreSize();
1308       }
1309     } else {
1310       Splits.push_back(Arg);
1311     }
1312   }
1313 }
1314 
1315 // Allocate special inputs passed in VGPRs.
1316 static void allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1317                                            MachineFunction &MF,
1318                                            const SIRegisterInfo &TRI,
1319                                            SIMachineFunctionInfo &Info) {
1320   if (Info.hasWorkItemIDX()) {
1321     unsigned Reg = AMDGPU::VGPR0;
1322     MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1323 
1324     CCInfo.AllocateReg(Reg);
1325     Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1326   }
1327 
1328   if (Info.hasWorkItemIDY()) {
1329     unsigned Reg = AMDGPU::VGPR1;
1330     MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1331 
1332     CCInfo.AllocateReg(Reg);
1333     Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1334   }
1335 
1336   if (Info.hasWorkItemIDZ()) {
1337     unsigned Reg = AMDGPU::VGPR2;
1338     MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1339 
1340     CCInfo.AllocateReg(Reg);
1341     Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1342   }
1343 }
1344 
1345 // Try to allocate a VGPR at the end of the argument list, or if no argument
1346 // VGPRs are left allocating a stack slot.
1347 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo) {
1348   ArrayRef<MCPhysReg> ArgVGPRs
1349     = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1350   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1351   if (RegIdx == ArgVGPRs.size()) {
1352     // Spill to stack required.
1353     int64_t Offset = CCInfo.AllocateStack(4, 4);
1354 
1355     return ArgDescriptor::createStack(Offset);
1356   }
1357 
1358   unsigned Reg = ArgVGPRs[RegIdx];
1359   Reg = CCInfo.AllocateReg(Reg);
1360   assert(Reg != AMDGPU::NoRegister);
1361 
1362   MachineFunction &MF = CCInfo.getMachineFunction();
1363   MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1364   return ArgDescriptor::createRegister(Reg);
1365 }
1366 
1367 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1368                                              const TargetRegisterClass *RC,
1369                                              unsigned NumArgRegs) {
1370   ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1371   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1372   if (RegIdx == ArgSGPRs.size())
1373     report_fatal_error("ran out of SGPRs for arguments");
1374 
1375   unsigned Reg = ArgSGPRs[RegIdx];
1376   Reg = CCInfo.AllocateReg(Reg);
1377   assert(Reg != AMDGPU::NoRegister);
1378 
1379   MachineFunction &MF = CCInfo.getMachineFunction();
1380   MF.addLiveIn(Reg, RC);
1381   return ArgDescriptor::createRegister(Reg);
1382 }
1383 
1384 static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1385   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1386 }
1387 
1388 static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1389   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1390 }
1391 
1392 static void allocateSpecialInputVGPRs(CCState &CCInfo,
1393                                       MachineFunction &MF,
1394                                       const SIRegisterInfo &TRI,
1395                                       SIMachineFunctionInfo &Info) {
1396   if (Info.hasWorkItemIDX())
1397     Info.setWorkItemIDX(allocateVGPR32Input(CCInfo));
1398 
1399   if (Info.hasWorkItemIDY())
1400     Info.setWorkItemIDY(allocateVGPR32Input(CCInfo));
1401 
1402   if (Info.hasWorkItemIDZ())
1403     Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo));
1404 }
1405 
1406 static void allocateSpecialInputSGPRs(CCState &CCInfo,
1407                                       MachineFunction &MF,
1408                                       const SIRegisterInfo &TRI,
1409                                       SIMachineFunctionInfo &Info) {
1410   auto &ArgInfo = Info.getArgInfo();
1411 
1412   // TODO: Unify handling with private memory pointers.
1413 
1414   if (Info.hasDispatchPtr())
1415     ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1416 
1417   if (Info.hasQueuePtr())
1418     ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1419 
1420   if (Info.hasKernargSegmentPtr())
1421     ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1422 
1423   if (Info.hasDispatchID())
1424     ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1425 
1426   // flat_scratch_init is not applicable for non-kernel functions.
1427 
1428   if (Info.hasWorkGroupIDX())
1429     ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1430 
1431   if (Info.hasWorkGroupIDY())
1432     ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1433 
1434   if (Info.hasWorkGroupIDZ())
1435     ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
1436 
1437   if (Info.hasImplicitArgPtr())
1438     ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
1439 }
1440 
1441 // Allocate special inputs passed in user SGPRs.
1442 static void allocateHSAUserSGPRs(CCState &CCInfo,
1443                                  MachineFunction &MF,
1444                                  const SIRegisterInfo &TRI,
1445                                  SIMachineFunctionInfo &Info) {
1446   if (Info.hasImplicitBufferPtr()) {
1447     unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1448     MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1449     CCInfo.AllocateReg(ImplicitBufferPtrReg);
1450   }
1451 
1452   // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1453   if (Info.hasPrivateSegmentBuffer()) {
1454     unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1455     MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1456     CCInfo.AllocateReg(PrivateSegmentBufferReg);
1457   }
1458 
1459   if (Info.hasDispatchPtr()) {
1460     unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1461     MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1462     CCInfo.AllocateReg(DispatchPtrReg);
1463   }
1464 
1465   if (Info.hasQueuePtr()) {
1466     unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1467     MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1468     CCInfo.AllocateReg(QueuePtrReg);
1469   }
1470 
1471   if (Info.hasKernargSegmentPtr()) {
1472     unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1473     MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1474     CCInfo.AllocateReg(InputPtrReg);
1475   }
1476 
1477   if (Info.hasDispatchID()) {
1478     unsigned DispatchIDReg = Info.addDispatchID(TRI);
1479     MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1480     CCInfo.AllocateReg(DispatchIDReg);
1481   }
1482 
1483   if (Info.hasFlatScratchInit()) {
1484     unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1485     MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1486     CCInfo.AllocateReg(FlatScratchInitReg);
1487   }
1488 
1489   // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1490   // these from the dispatch pointer.
1491 }
1492 
1493 // Allocate special input registers that are initialized per-wave.
1494 static void allocateSystemSGPRs(CCState &CCInfo,
1495                                 MachineFunction &MF,
1496                                 SIMachineFunctionInfo &Info,
1497                                 CallingConv::ID CallConv,
1498                                 bool IsShader) {
1499   if (Info.hasWorkGroupIDX()) {
1500     unsigned Reg = Info.addWorkGroupIDX();
1501     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1502     CCInfo.AllocateReg(Reg);
1503   }
1504 
1505   if (Info.hasWorkGroupIDY()) {
1506     unsigned Reg = Info.addWorkGroupIDY();
1507     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1508     CCInfo.AllocateReg(Reg);
1509   }
1510 
1511   if (Info.hasWorkGroupIDZ()) {
1512     unsigned Reg = Info.addWorkGroupIDZ();
1513     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1514     CCInfo.AllocateReg(Reg);
1515   }
1516 
1517   if (Info.hasWorkGroupInfo()) {
1518     unsigned Reg = Info.addWorkGroupInfo();
1519     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1520     CCInfo.AllocateReg(Reg);
1521   }
1522 
1523   if (Info.hasPrivateSegmentWaveByteOffset()) {
1524     // Scratch wave offset passed in system SGPR.
1525     unsigned PrivateSegmentWaveByteOffsetReg;
1526 
1527     if (IsShader) {
1528       PrivateSegmentWaveByteOffsetReg =
1529         Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1530 
1531       // This is true if the scratch wave byte offset doesn't have a fixed
1532       // location.
1533       if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1534         PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1535         Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1536       }
1537     } else
1538       PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1539 
1540     MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1541     CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1542   }
1543 }
1544 
1545 static void reservePrivateMemoryRegs(const TargetMachine &TM,
1546                                      MachineFunction &MF,
1547                                      const SIRegisterInfo &TRI,
1548                                      SIMachineFunctionInfo &Info) {
1549   // Now that we've figured out where the scratch register inputs are, see if
1550   // should reserve the arguments and use them directly.
1551   MachineFrameInfo &MFI = MF.getFrameInfo();
1552   bool HasStackObjects = MFI.hasStackObjects();
1553 
1554   // Record that we know we have non-spill stack objects so we don't need to
1555   // check all stack objects later.
1556   if (HasStackObjects)
1557     Info.setHasNonSpillStackObjects(true);
1558 
1559   // Everything live out of a block is spilled with fast regalloc, so it's
1560   // almost certain that spilling will be required.
1561   if (TM.getOptLevel() == CodeGenOpt::None)
1562     HasStackObjects = true;
1563 
1564   // For now assume stack access is needed in any callee functions, so we need
1565   // the scratch registers to pass in.
1566   bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1567 
1568   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1569   if (ST.isAmdCodeObjectV2(MF)) {
1570     if (RequiresStackAccess) {
1571       // If we have stack objects, we unquestionably need the private buffer
1572       // resource. For the Code Object V2 ABI, this will be the first 4 user
1573       // SGPR inputs. We can reserve those and use them directly.
1574 
1575       unsigned PrivateSegmentBufferReg = Info.getPreloadedReg(
1576         AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
1577       Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1578 
1579       if (MFI.hasCalls()) {
1580         // If we have calls, we need to keep the frame register in a register
1581         // that won't be clobbered by a call, so ensure it is copied somewhere.
1582 
1583         // This is not a problem for the scratch wave offset, because the same
1584         // registers are reserved in all functions.
1585 
1586         // FIXME: Nothing is really ensuring this is a call preserved register,
1587         // it's just selected from the end so it happens to be.
1588         unsigned ReservedOffsetReg
1589           = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1590         Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1591       } else {
1592         unsigned PrivateSegmentWaveByteOffsetReg = Info.getPreloadedReg(
1593           AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1594         Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
1595       }
1596     } else {
1597       unsigned ReservedBufferReg
1598         = TRI.reservedPrivateSegmentBufferReg(MF);
1599       unsigned ReservedOffsetReg
1600         = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1601 
1602       // We tentatively reserve the last registers (skipping the last two
1603       // which may contain VCC). After register allocation, we'll replace
1604       // these with the ones immediately after those which were really
1605       // allocated. In the prologue copies will be inserted from the argument
1606       // to these reserved registers.
1607       Info.setScratchRSrcReg(ReservedBufferReg);
1608       Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1609     }
1610   } else {
1611     unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1612 
1613     // Without HSA, relocations are used for the scratch pointer and the
1614     // buffer resource setup is always inserted in the prologue. Scratch wave
1615     // offset is still in an input SGPR.
1616     Info.setScratchRSrcReg(ReservedBufferReg);
1617 
1618     if (HasStackObjects && !MFI.hasCalls()) {
1619       unsigned ScratchWaveOffsetReg = Info.getPreloadedReg(
1620         AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1621       Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg);
1622     } else {
1623       unsigned ReservedOffsetReg
1624         = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1625       Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1626     }
1627   }
1628 }
1629 
1630 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1631   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1632   return !Info->isEntryFunction();
1633 }
1634 
1635 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1636 
1637 }
1638 
1639 void SITargetLowering::insertCopiesSplitCSR(
1640   MachineBasicBlock *Entry,
1641   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1642   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1643 
1644   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1645   if (!IStart)
1646     return;
1647 
1648   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1649   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1650   MachineBasicBlock::iterator MBBI = Entry->begin();
1651   for (const MCPhysReg *I = IStart; *I; ++I) {
1652     const TargetRegisterClass *RC = nullptr;
1653     if (AMDGPU::SReg_64RegClass.contains(*I))
1654       RC = &AMDGPU::SGPR_64RegClass;
1655     else if (AMDGPU::SReg_32RegClass.contains(*I))
1656       RC = &AMDGPU::SGPR_32RegClass;
1657     else
1658       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1659 
1660     unsigned NewVR = MRI->createVirtualRegister(RC);
1661     // Create copy from CSR to a virtual register.
1662     Entry->addLiveIn(*I);
1663     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1664       .addReg(*I);
1665 
1666     // Insert the copy-back instructions right before the terminator.
1667     for (auto *Exit : Exits)
1668       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1669               TII->get(TargetOpcode::COPY), *I)
1670         .addReg(NewVR);
1671   }
1672 }
1673 
1674 SDValue SITargetLowering::LowerFormalArguments(
1675     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1676     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1677     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1678   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1679 
1680   MachineFunction &MF = DAG.getMachineFunction();
1681   FunctionType *FType = MF.getFunction().getFunctionType();
1682   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1683   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1684 
1685   if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
1686     const Function &Fn = MF.getFunction();
1687     DiagnosticInfoUnsupported NoGraphicsHSA(
1688         Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
1689     DAG.getContext()->diagnose(NoGraphicsHSA);
1690     return DAG.getEntryNode();
1691   }
1692 
1693   // Create stack objects that are used for emitting debugger prologue if
1694   // "amdgpu-debugger-emit-prologue" attribute was specified.
1695   if (ST.debuggerEmitPrologue())
1696     createDebuggerPrologueStackObjects(MF);
1697 
1698   SmallVector<ISD::InputArg, 16> Splits;
1699   SmallVector<CCValAssign, 16> ArgLocs;
1700   BitVector Skipped(Ins.size());
1701   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1702                  *DAG.getContext());
1703 
1704   bool IsShader = AMDGPU::isShader(CallConv);
1705   bool IsKernel = AMDGPU::isKernel(CallConv);
1706   bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
1707 
1708   if (!IsEntryFunc) {
1709     // 4 bytes are reserved at offset 0 for the emergency stack slot. Skip over
1710     // this when allocating argument fixed offsets.
1711     CCInfo.AllocateStack(4, 4);
1712   }
1713 
1714   if (IsShader) {
1715     processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1716 
1717     // At least one interpolation mode must be enabled or else the GPU will
1718     // hang.
1719     //
1720     // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1721     // set PSInputAddr, the user wants to enable some bits after the compilation
1722     // based on run-time states. Since we can't know what the final PSInputEna
1723     // will look like, so we shouldn't do anything here and the user should take
1724     // responsibility for the correct programming.
1725     //
1726     // Otherwise, the following restrictions apply:
1727     // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1728     // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1729     //   enabled too.
1730     if (CallConv == CallingConv::AMDGPU_PS) {
1731       if ((Info->getPSInputAddr() & 0x7F) == 0 ||
1732            ((Info->getPSInputAddr() & 0xF) == 0 &&
1733             Info->isPSInputAllocated(11))) {
1734         CCInfo.AllocateReg(AMDGPU::VGPR0);
1735         CCInfo.AllocateReg(AMDGPU::VGPR1);
1736         Info->markPSInputAllocated(0);
1737         Info->markPSInputEnabled(0);
1738       }
1739       if (Subtarget->isAmdPalOS()) {
1740         // For isAmdPalOS, the user does not enable some bits after compilation
1741         // based on run-time states; the register values being generated here are
1742         // the final ones set in hardware. Therefore we need to apply the
1743         // workaround to PSInputAddr and PSInputEnable together.  (The case where
1744         // a bit is set in PSInputAddr but not PSInputEnable is where the
1745         // frontend set up an input arg for a particular interpolation mode, but
1746         // nothing uses that input arg. Really we should have an earlier pass
1747         // that removes such an arg.)
1748         unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
1749         if ((PsInputBits & 0x7F) == 0 ||
1750             ((PsInputBits & 0xF) == 0 &&
1751              (PsInputBits >> 11 & 1)))
1752           Info->markPSInputEnabled(
1753               countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
1754       }
1755     }
1756 
1757     assert(!Info->hasDispatchPtr() &&
1758            !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1759            !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1760            !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1761            !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1762            !Info->hasWorkItemIDZ());
1763   } else if (IsKernel) {
1764     assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
1765   } else {
1766     Splits.append(Ins.begin(), Ins.end());
1767   }
1768 
1769   if (IsEntryFunc) {
1770     allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
1771     allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
1772   }
1773 
1774   if (IsKernel) {
1775     analyzeFormalArgumentsCompute(CCInfo, Ins);
1776   } else {
1777     CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
1778     CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
1779   }
1780 
1781   SmallVector<SDValue, 16> Chains;
1782 
1783   for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
1784     const ISD::InputArg &Arg = Ins[i];
1785     if (Skipped[i]) {
1786       InVals.push_back(DAG.getUNDEF(Arg.VT));
1787       continue;
1788     }
1789 
1790     CCValAssign &VA = ArgLocs[ArgIdx++];
1791     MVT VT = VA.getLocVT();
1792 
1793     if (IsEntryFunc && VA.isMemLoc()) {
1794       VT = Ins[i].VT;
1795       EVT MemVT = VA.getLocVT();
1796 
1797       const uint64_t Offset = Subtarget->getExplicitKernelArgOffset(MF) +
1798         VA.getLocMemOffset();
1799       Info->setABIArgOffset(Offset + MemVT.getStoreSize());
1800 
1801       // The first 36 bytes of the input buffer contains information about
1802       // thread group and global sizes.
1803       SDValue Arg = lowerKernargMemParameter(
1804         DAG, VT, MemVT, DL, Chain, Offset, Ins[i].Flags.isSExt(), &Ins[i]);
1805       Chains.push_back(Arg.getValue(1));
1806 
1807       auto *ParamTy =
1808         dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
1809       if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
1810           ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
1811         // On SI local pointers are just offsets into LDS, so they are always
1812         // less than 16-bits.  On CI and newer they could potentially be
1813         // real pointers, so we can't guarantee their size.
1814         Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
1815                           DAG.getValueType(MVT::i16));
1816       }
1817 
1818       InVals.push_back(Arg);
1819       continue;
1820     } else if (!IsEntryFunc && VA.isMemLoc()) {
1821       SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
1822       InVals.push_back(Val);
1823       if (!Arg.Flags.isByVal())
1824         Chains.push_back(Val.getValue(1));
1825       continue;
1826     }
1827 
1828     assert(VA.isRegLoc() && "Parameter must be in a register!");
1829 
1830     unsigned Reg = VA.getLocReg();
1831     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
1832     EVT ValVT = VA.getValVT();
1833 
1834     Reg = MF.addLiveIn(Reg, RC);
1835     SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1836 
1837     if (Arg.Flags.isSRet() && !getSubtarget()->enableHugePrivateBuffer()) {
1838       // The return object should be reasonably addressable.
1839 
1840       // FIXME: This helps when the return is a real sret. If it is a
1841       // automatically inserted sret (i.e. CanLowerReturn returns false), an
1842       // extra copy is inserted in SelectionDAGBuilder which obscures this.
1843       unsigned NumBits = 32 - AssumeFrameIndexHighZeroBits;
1844       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1845         DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
1846     }
1847 
1848     // If this is an 8 or 16-bit value, it is really passed promoted
1849     // to 32 bits. Insert an assert[sz]ext to capture this, then
1850     // truncate to the right size.
1851     switch (VA.getLocInfo()) {
1852     case CCValAssign::Full:
1853       break;
1854     case CCValAssign::BCvt:
1855       Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
1856       break;
1857     case CCValAssign::SExt:
1858       Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
1859                         DAG.getValueType(ValVT));
1860       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1861       break;
1862     case CCValAssign::ZExt:
1863       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1864                         DAG.getValueType(ValVT));
1865       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1866       break;
1867     case CCValAssign::AExt:
1868       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1869       break;
1870     default:
1871       llvm_unreachable("Unknown loc info!");
1872     }
1873 
1874     if (IsShader && Arg.VT.isVector()) {
1875       // Build a vector from the registers
1876       Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
1877       unsigned NumElements = ParamType->getVectorNumElements();
1878 
1879       SmallVector<SDValue, 4> Regs;
1880       Regs.push_back(Val);
1881       for (unsigned j = 1; j != NumElements; ++j) {
1882         Reg = ArgLocs[ArgIdx++].getLocReg();
1883         Reg = MF.addLiveIn(Reg, RC);
1884 
1885         SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1886         Regs.push_back(Copy);
1887       }
1888 
1889       // Fill up the missing vector elements
1890       NumElements = Arg.VT.getVectorNumElements() - NumElements;
1891       Regs.append(NumElements, DAG.getUNDEF(VT));
1892 
1893       InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs));
1894       continue;
1895     }
1896 
1897     InVals.push_back(Val);
1898   }
1899 
1900   if (!IsEntryFunc) {
1901     // Special inputs come after user arguments.
1902     allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
1903   }
1904 
1905   // Start adding system SGPRs.
1906   if (IsEntryFunc) {
1907     allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
1908   } else {
1909     CCInfo.AllocateReg(Info->getScratchRSrcReg());
1910     CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
1911     CCInfo.AllocateReg(Info->getFrameOffsetReg());
1912     allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
1913   }
1914 
1915   auto &ArgUsageInfo =
1916     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
1917   ArgUsageInfo.setFuncArgInfo(MF.getFunction(), Info->getArgInfo());
1918 
1919   unsigned StackArgSize = CCInfo.getNextStackOffset();
1920   Info->setBytesInStackArgArea(StackArgSize);
1921 
1922   return Chains.empty() ? Chain :
1923     DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1924 }
1925 
1926 // TODO: If return values can't fit in registers, we should return as many as
1927 // possible in registers before passing on stack.
1928 bool SITargetLowering::CanLowerReturn(
1929   CallingConv::ID CallConv,
1930   MachineFunction &MF, bool IsVarArg,
1931   const SmallVectorImpl<ISD::OutputArg> &Outs,
1932   LLVMContext &Context) const {
1933   // Replacing returns with sret/stack usage doesn't make sense for shaders.
1934   // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
1935   // for shaders. Vector types should be explicitly handled by CC.
1936   if (AMDGPU::isEntryFunctionCC(CallConv))
1937     return true;
1938 
1939   SmallVector<CCValAssign, 16> RVLocs;
1940   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1941   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
1942 }
1943 
1944 SDValue
1945 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1946                               bool isVarArg,
1947                               const SmallVectorImpl<ISD::OutputArg> &Outs,
1948                               const SmallVectorImpl<SDValue> &OutVals,
1949                               const SDLoc &DL, SelectionDAG &DAG) const {
1950   MachineFunction &MF = DAG.getMachineFunction();
1951   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1952 
1953   if (AMDGPU::isKernel(CallConv)) {
1954     return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
1955                                              OutVals, DL, DAG);
1956   }
1957 
1958   bool IsShader = AMDGPU::isShader(CallConv);
1959 
1960   Info->setIfReturnsVoid(Outs.size() == 0);
1961   bool IsWaveEnd = Info->returnsVoid() && IsShader;
1962 
1963   SmallVector<ISD::OutputArg, 48> Splits;
1964   SmallVector<SDValue, 48> SplitVals;
1965 
1966   // Split vectors into their elements.
1967   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1968     const ISD::OutputArg &Out = Outs[i];
1969 
1970     if (IsShader && Out.VT.isVector()) {
1971       MVT VT = Out.VT.getVectorElementType();
1972       ISD::OutputArg NewOut = Out;
1973       NewOut.Flags.setSplit();
1974       NewOut.VT = VT;
1975 
1976       // We want the original number of vector elements here, e.g.
1977       // three or five, not four or eight.
1978       unsigned NumElements = Out.ArgVT.getVectorNumElements();
1979 
1980       for (unsigned j = 0; j != NumElements; ++j) {
1981         SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i],
1982                                    DAG.getConstant(j, DL, MVT::i32));
1983         SplitVals.push_back(Elem);
1984         Splits.push_back(NewOut);
1985         NewOut.PartOffset += NewOut.VT.getStoreSize();
1986       }
1987     } else {
1988       SplitVals.push_back(OutVals[i]);
1989       Splits.push_back(Out);
1990     }
1991   }
1992 
1993   // CCValAssign - represent the assignment of the return value to a location.
1994   SmallVector<CCValAssign, 48> RVLocs;
1995 
1996   // CCState - Info about the registers and stack slots.
1997   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1998                  *DAG.getContext());
1999 
2000   // Analyze outgoing return values.
2001   CCInfo.AnalyzeReturn(Splits, CCAssignFnForReturn(CallConv, isVarArg));
2002 
2003   SDValue Flag;
2004   SmallVector<SDValue, 48> RetOps;
2005   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2006 
2007   // Add return address for callable functions.
2008   if (!Info->isEntryFunction()) {
2009     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2010     SDValue ReturnAddrReg = CreateLiveInRegister(
2011       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2012 
2013     // FIXME: Should be able to use a vreg here, but need a way to prevent it
2014     // from being allcoated to a CSR.
2015 
2016     SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2017                                                 MVT::i64);
2018 
2019     Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
2020     Flag = Chain.getValue(1);
2021 
2022     RetOps.push_back(PhysReturnAddrReg);
2023   }
2024 
2025   // Copy the result values into the output registers.
2026   for (unsigned i = 0, realRVLocIdx = 0;
2027        i != RVLocs.size();
2028        ++i, ++realRVLocIdx) {
2029     CCValAssign &VA = RVLocs[i];
2030     assert(VA.isRegLoc() && "Can only return in registers!");
2031     // TODO: Partially return in registers if return values don't fit.
2032 
2033     SDValue Arg = SplitVals[realRVLocIdx];
2034 
2035     // Copied from other backends.
2036     switch (VA.getLocInfo()) {
2037     case CCValAssign::Full:
2038       break;
2039     case CCValAssign::BCvt:
2040       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2041       break;
2042     case CCValAssign::SExt:
2043       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2044       break;
2045     case CCValAssign::ZExt:
2046       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2047       break;
2048     case CCValAssign::AExt:
2049       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2050       break;
2051     default:
2052       llvm_unreachable("Unknown loc info!");
2053     }
2054 
2055     Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2056     Flag = Chain.getValue(1);
2057     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2058   }
2059 
2060   // FIXME: Does sret work properly?
2061   if (!Info->isEntryFunction()) {
2062     const SIRegisterInfo *TRI
2063       = static_cast<const SISubtarget *>(Subtarget)->getRegisterInfo();
2064     const MCPhysReg *I =
2065       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2066     if (I) {
2067       for (; *I; ++I) {
2068         if (AMDGPU::SReg_64RegClass.contains(*I))
2069           RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2070         else if (AMDGPU::SReg_32RegClass.contains(*I))
2071           RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2072         else
2073           llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2074       }
2075     }
2076   }
2077 
2078   // Update chain and glue.
2079   RetOps[0] = Chain;
2080   if (Flag.getNode())
2081     RetOps.push_back(Flag);
2082 
2083   unsigned Opc = AMDGPUISD::ENDPGM;
2084   if (!IsWaveEnd)
2085     Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
2086   return DAG.getNode(Opc, DL, MVT::Other, RetOps);
2087 }
2088 
2089 SDValue SITargetLowering::LowerCallResult(
2090     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2091     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2092     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2093     SDValue ThisVal) const {
2094   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2095 
2096   // Assign locations to each value returned by this call.
2097   SmallVector<CCValAssign, 16> RVLocs;
2098   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2099                  *DAG.getContext());
2100   CCInfo.AnalyzeCallResult(Ins, RetCC);
2101 
2102   // Copy all of the result registers out of their specified physreg.
2103   for (unsigned i = 0; i != RVLocs.size(); ++i) {
2104     CCValAssign VA = RVLocs[i];
2105     SDValue Val;
2106 
2107     if (VA.isRegLoc()) {
2108       Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2109       Chain = Val.getValue(1);
2110       InFlag = Val.getValue(2);
2111     } else if (VA.isMemLoc()) {
2112       report_fatal_error("TODO: return values in memory");
2113     } else
2114       llvm_unreachable("unknown argument location type");
2115 
2116     switch (VA.getLocInfo()) {
2117     case CCValAssign::Full:
2118       break;
2119     case CCValAssign::BCvt:
2120       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2121       break;
2122     case CCValAssign::ZExt:
2123       Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2124                         DAG.getValueType(VA.getValVT()));
2125       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2126       break;
2127     case CCValAssign::SExt:
2128       Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2129                         DAG.getValueType(VA.getValVT()));
2130       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2131       break;
2132     case CCValAssign::AExt:
2133       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2134       break;
2135     default:
2136       llvm_unreachable("Unknown loc info!");
2137     }
2138 
2139     InVals.push_back(Val);
2140   }
2141 
2142   return Chain;
2143 }
2144 
2145 // Add code to pass special inputs required depending on used features separate
2146 // from the explicit user arguments present in the IR.
2147 void SITargetLowering::passSpecialInputs(
2148     CallLoweringInfo &CLI,
2149     const SIMachineFunctionInfo &Info,
2150     SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2151     SmallVectorImpl<SDValue> &MemOpChains,
2152     SDValue Chain,
2153     SDValue StackPtr) const {
2154   // If we don't have a call site, this was a call inserted by
2155   // legalization. These can never use special inputs.
2156   if (!CLI.CS)
2157     return;
2158 
2159   const Function *CalleeFunc = CLI.CS.getCalledFunction();
2160   assert(CalleeFunc);
2161 
2162   SelectionDAG &DAG = CLI.DAG;
2163   const SDLoc &DL = CLI.DL;
2164 
2165   const SISubtarget *ST = getSubtarget();
2166   const SIRegisterInfo *TRI = ST->getRegisterInfo();
2167 
2168   auto &ArgUsageInfo =
2169     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2170   const AMDGPUFunctionArgInfo &CalleeArgInfo
2171     = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2172 
2173   const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2174 
2175   // TODO: Unify with private memory register handling. This is complicated by
2176   // the fact that at least in kernels, the input argument is not necessarily
2177   // in the same location as the input.
2178   AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2179     AMDGPUFunctionArgInfo::DISPATCH_PTR,
2180     AMDGPUFunctionArgInfo::QUEUE_PTR,
2181     AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
2182     AMDGPUFunctionArgInfo::DISPATCH_ID,
2183     AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2184     AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2185     AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
2186     AMDGPUFunctionArgInfo::WORKITEM_ID_X,
2187     AMDGPUFunctionArgInfo::WORKITEM_ID_Y,
2188     AMDGPUFunctionArgInfo::WORKITEM_ID_Z,
2189     AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
2190   };
2191 
2192   for (auto InputID : InputRegs) {
2193     const ArgDescriptor *OutgoingArg;
2194     const TargetRegisterClass *ArgRC;
2195 
2196     std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2197     if (!OutgoingArg)
2198       continue;
2199 
2200     const ArgDescriptor *IncomingArg;
2201     const TargetRegisterClass *IncomingArgRC;
2202     std::tie(IncomingArg, IncomingArgRC)
2203       = CallerArgInfo.getPreloadedValue(InputID);
2204     assert(IncomingArgRC == ArgRC);
2205 
2206     // All special arguments are ints for now.
2207     EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
2208     SDValue InputReg;
2209 
2210     if (IncomingArg) {
2211       InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2212     } else {
2213       // The implicit arg ptr is special because it doesn't have a corresponding
2214       // input for kernels, and is computed from the kernarg segment pointer.
2215       assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2216       InputReg = getImplicitArgPtr(DAG, DL);
2217     }
2218 
2219     if (OutgoingArg->isRegister()) {
2220       RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2221     } else {
2222       SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, StackPtr,
2223                                               InputReg,
2224                                               OutgoingArg->getStackOffset());
2225       MemOpChains.push_back(ArgStore);
2226     }
2227   }
2228 }
2229 
2230 static bool canGuaranteeTCO(CallingConv::ID CC) {
2231   return CC == CallingConv::Fast;
2232 }
2233 
2234 /// Return true if we might ever do TCO for calls with this calling convention.
2235 static bool mayTailCallThisCC(CallingConv::ID CC) {
2236   switch (CC) {
2237   case CallingConv::C:
2238     return true;
2239   default:
2240     return canGuaranteeTCO(CC);
2241   }
2242 }
2243 
2244 bool SITargetLowering::isEligibleForTailCallOptimization(
2245     SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2246     const SmallVectorImpl<ISD::OutputArg> &Outs,
2247     const SmallVectorImpl<SDValue> &OutVals,
2248     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2249   if (!mayTailCallThisCC(CalleeCC))
2250     return false;
2251 
2252   MachineFunction &MF = DAG.getMachineFunction();
2253   const Function &CallerF = MF.getFunction();
2254   CallingConv::ID CallerCC = CallerF.getCallingConv();
2255   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2256   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2257 
2258   // Kernels aren't callable, and don't have a live in return address so it
2259   // doesn't make sense to do a tail call with entry functions.
2260   if (!CallerPreserved)
2261     return false;
2262 
2263   bool CCMatch = CallerCC == CalleeCC;
2264 
2265   if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2266     if (canGuaranteeTCO(CalleeCC) && CCMatch)
2267       return true;
2268     return false;
2269   }
2270 
2271   // TODO: Can we handle var args?
2272   if (IsVarArg)
2273     return false;
2274 
2275   for (const Argument &Arg : CallerF.args()) {
2276     if (Arg.hasByValAttr())
2277       return false;
2278   }
2279 
2280   LLVMContext &Ctx = *DAG.getContext();
2281 
2282   // Check that the call results are passed in the same way.
2283   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2284                                   CCAssignFnForCall(CalleeCC, IsVarArg),
2285                                   CCAssignFnForCall(CallerCC, IsVarArg)))
2286     return false;
2287 
2288   // The callee has to preserve all registers the caller needs to preserve.
2289   if (!CCMatch) {
2290     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2291     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2292       return false;
2293   }
2294 
2295   // Nothing more to check if the callee is taking no arguments.
2296   if (Outs.empty())
2297     return true;
2298 
2299   SmallVector<CCValAssign, 16> ArgLocs;
2300   CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2301 
2302   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2303 
2304   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2305   // If the stack arguments for this call do not fit into our own save area then
2306   // the call cannot be made tail.
2307   // TODO: Is this really necessary?
2308   if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2309     return false;
2310 
2311   const MachineRegisterInfo &MRI = MF.getRegInfo();
2312   return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2313 }
2314 
2315 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2316   if (!CI->isTailCall())
2317     return false;
2318 
2319   const Function *ParentFn = CI->getParent()->getParent();
2320   if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2321     return false;
2322 
2323   auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2324   return (Attr.getValueAsString() != "true");
2325 }
2326 
2327 // The wave scratch offset register is used as the global base pointer.
2328 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2329                                     SmallVectorImpl<SDValue> &InVals) const {
2330   SelectionDAG &DAG = CLI.DAG;
2331   const SDLoc &DL = CLI.DL;
2332   SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2333   SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2334   SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2335   SDValue Chain = CLI.Chain;
2336   SDValue Callee = CLI.Callee;
2337   bool &IsTailCall = CLI.IsTailCall;
2338   CallingConv::ID CallConv = CLI.CallConv;
2339   bool IsVarArg = CLI.IsVarArg;
2340   bool IsSibCall = false;
2341   bool IsThisReturn = false;
2342   MachineFunction &MF = DAG.getMachineFunction();
2343 
2344   if (IsVarArg) {
2345     return lowerUnhandledCall(CLI, InVals,
2346                               "unsupported call to variadic function ");
2347   }
2348 
2349   if (!CLI.CS.getCalledFunction()) {
2350     return lowerUnhandledCall(CLI, InVals,
2351                               "unsupported indirect call to function ");
2352   }
2353 
2354   if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2355     return lowerUnhandledCall(CLI, InVals,
2356                               "unsupported required tail call to function ");
2357   }
2358 
2359   // The first 4 bytes are reserved for the callee's emergency stack slot.
2360   const unsigned CalleeUsableStackOffset = 4;
2361 
2362   if (IsTailCall) {
2363     IsTailCall = isEligibleForTailCallOptimization(
2364       Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2365     if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2366       report_fatal_error("failed to perform tail call elimination on a call "
2367                          "site marked musttail");
2368     }
2369 
2370     bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2371 
2372     // A sibling call is one where we're under the usual C ABI and not planning
2373     // to change that but can still do a tail call:
2374     if (!TailCallOpt && IsTailCall)
2375       IsSibCall = true;
2376 
2377     if (IsTailCall)
2378       ++NumTailCalls;
2379   }
2380 
2381   if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee)) {
2382     // FIXME: Remove this hack for function pointer types after removing
2383     // support of old address space mapping. In the new address space
2384     // mapping the pointer in default address space is 64 bit, therefore
2385     // does not need this hack.
2386     if (Callee.getValueType() == MVT::i32) {
2387       const GlobalValue *GV = GA->getGlobal();
2388       Callee = DAG.getGlobalAddress(GV, DL, MVT::i64, GA->getOffset(), false,
2389                                     GA->getTargetFlags());
2390     }
2391   }
2392   assert(Callee.getValueType() == MVT::i64);
2393 
2394   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2395 
2396   // Analyze operands of the call, assigning locations to each operand.
2397   SmallVector<CCValAssign, 16> ArgLocs;
2398   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2399   CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2400   CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2401 
2402   // Get a count of how many bytes are to be pushed on the stack.
2403   unsigned NumBytes = CCInfo.getNextStackOffset();
2404 
2405   if (IsSibCall) {
2406     // Since we're not changing the ABI to make this a tail call, the memory
2407     // operands are already available in the caller's incoming argument space.
2408     NumBytes = 0;
2409   }
2410 
2411   // FPDiff is the byte offset of the call's argument area from the callee's.
2412   // Stores to callee stack arguments will be placed in FixedStackSlots offset
2413   // by this amount for a tail call. In a sibling call it must be 0 because the
2414   // caller will deallocate the entire stack and the callee still expects its
2415   // arguments to begin at SP+0. Completely unused for non-tail calls.
2416   int32_t FPDiff = 0;
2417   MachineFrameInfo &MFI = MF.getFrameInfo();
2418   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2419 
2420   SDValue CallerSavedFP;
2421 
2422   // Adjust the stack pointer for the new arguments...
2423   // These operations are automatically eliminated by the prolog/epilog pass
2424   if (!IsSibCall) {
2425     Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2426 
2427     unsigned OffsetReg = Info->getScratchWaveOffsetReg();
2428 
2429     // In the HSA case, this should be an identity copy.
2430     SDValue ScratchRSrcReg
2431       = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2432     RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2433 
2434     // TODO: Don't hardcode these registers and get from the callee function.
2435     SDValue ScratchWaveOffsetReg
2436       = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32);
2437     RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg);
2438 
2439     if (!Info->isEntryFunction()) {
2440       // Avoid clobbering this function's FP value. In the current convention
2441       // callee will overwrite this, so do save/restore around the call site.
2442       CallerSavedFP = DAG.getCopyFromReg(Chain, DL,
2443                                          Info->getFrameOffsetReg(), MVT::i32);
2444     }
2445   }
2446 
2447   // Stack pointer relative accesses are done by changing the offset SGPR. This
2448   // is just the VGPR offset component.
2449   SDValue StackPtr = DAG.getConstant(CalleeUsableStackOffset, DL, MVT::i32);
2450 
2451   SmallVector<SDValue, 8> MemOpChains;
2452   MVT PtrVT = MVT::i32;
2453 
2454   // Walk the register/memloc assignments, inserting copies/loads.
2455   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2456        ++i, ++realArgIdx) {
2457     CCValAssign &VA = ArgLocs[i];
2458     SDValue Arg = OutVals[realArgIdx];
2459 
2460     // Promote the value if needed.
2461     switch (VA.getLocInfo()) {
2462     case CCValAssign::Full:
2463       break;
2464     case CCValAssign::BCvt:
2465       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2466       break;
2467     case CCValAssign::ZExt:
2468       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2469       break;
2470     case CCValAssign::SExt:
2471       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2472       break;
2473     case CCValAssign::AExt:
2474       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2475       break;
2476     case CCValAssign::FPExt:
2477       Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2478       break;
2479     default:
2480       llvm_unreachable("Unknown loc info!");
2481     }
2482 
2483     if (VA.isRegLoc()) {
2484       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2485     } else {
2486       assert(VA.isMemLoc());
2487 
2488       SDValue DstAddr;
2489       MachinePointerInfo DstInfo;
2490 
2491       unsigned LocMemOffset = VA.getLocMemOffset();
2492       int32_t Offset = LocMemOffset;
2493 
2494       SDValue PtrOff = DAG.getObjectPtrOffset(DL, StackPtr, Offset);
2495 
2496       if (IsTailCall) {
2497         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2498         unsigned OpSize = Flags.isByVal() ?
2499           Flags.getByValSize() : VA.getValVT().getStoreSize();
2500 
2501         Offset = Offset + FPDiff;
2502         int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2503 
2504         DstAddr = DAG.getObjectPtrOffset(DL, DAG.getFrameIndex(FI, PtrVT),
2505                                          StackPtr);
2506         DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2507 
2508         // Make sure any stack arguments overlapping with where we're storing
2509         // are loaded before this eventual operation. Otherwise they'll be
2510         // clobbered.
2511 
2512         // FIXME: Why is this really necessary? This seems to just result in a
2513         // lot of code to copy the stack and write them back to the same
2514         // locations, which are supposed to be immutable?
2515         Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2516       } else {
2517         DstAddr = PtrOff;
2518         DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2519       }
2520 
2521       if (Outs[i].Flags.isByVal()) {
2522         SDValue SizeNode =
2523             DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2524         SDValue Cpy = DAG.getMemcpy(
2525             Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2526             /*isVol = */ false, /*AlwaysInline = */ true,
2527             /*isTailCall = */ false, DstInfo,
2528             MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy(
2529                 *DAG.getContext(), AMDGPUASI.PRIVATE_ADDRESS))));
2530 
2531         MemOpChains.push_back(Cpy);
2532       } else {
2533         SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo);
2534         MemOpChains.push_back(Store);
2535       }
2536     }
2537   }
2538 
2539   // Copy special input registers after user input arguments.
2540   passSpecialInputs(CLI, *Info, RegsToPass, MemOpChains, Chain, StackPtr);
2541 
2542   if (!MemOpChains.empty())
2543     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2544 
2545   // Build a sequence of copy-to-reg nodes chained together with token chain
2546   // and flag operands which copy the outgoing args into the appropriate regs.
2547   SDValue InFlag;
2548   for (auto &RegToPass : RegsToPass) {
2549     Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2550                              RegToPass.second, InFlag);
2551     InFlag = Chain.getValue(1);
2552   }
2553 
2554 
2555   SDValue PhysReturnAddrReg;
2556   if (IsTailCall) {
2557     // Since the return is being combined with the call, we need to pass on the
2558     // return address.
2559 
2560     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2561     SDValue ReturnAddrReg = CreateLiveInRegister(
2562       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2563 
2564     PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2565                                         MVT::i64);
2566     Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2567     InFlag = Chain.getValue(1);
2568   }
2569 
2570   // We don't usually want to end the call-sequence here because we would tidy
2571   // the frame up *after* the call, however in the ABI-changing tail-call case
2572   // we've carefully laid out the parameters so that when sp is reset they'll be
2573   // in the correct location.
2574   if (IsTailCall && !IsSibCall) {
2575     Chain = DAG.getCALLSEQ_END(Chain,
2576                                DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2577                                DAG.getTargetConstant(0, DL, MVT::i32),
2578                                InFlag, DL);
2579     InFlag = Chain.getValue(1);
2580   }
2581 
2582   std::vector<SDValue> Ops;
2583   Ops.push_back(Chain);
2584   Ops.push_back(Callee);
2585 
2586   if (IsTailCall) {
2587     // Each tail call may have to adjust the stack by a different amount, so
2588     // this information must travel along with the operation for eventual
2589     // consumption by emitEpilogue.
2590     Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
2591 
2592     Ops.push_back(PhysReturnAddrReg);
2593   }
2594 
2595   // Add argument registers to the end of the list so that they are known live
2596   // into the call.
2597   for (auto &RegToPass : RegsToPass) {
2598     Ops.push_back(DAG.getRegister(RegToPass.first,
2599                                   RegToPass.second.getValueType()));
2600   }
2601 
2602   // Add a register mask operand representing the call-preserved registers.
2603 
2604   const AMDGPURegisterInfo *TRI = Subtarget->getRegisterInfo();
2605   const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2606   assert(Mask && "Missing call preserved mask for calling convention");
2607   Ops.push_back(DAG.getRegisterMask(Mask));
2608 
2609   if (InFlag.getNode())
2610     Ops.push_back(InFlag);
2611 
2612   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2613 
2614   // If we're doing a tall call, use a TC_RETURN here rather than an
2615   // actual call instruction.
2616   if (IsTailCall) {
2617     MFI.setHasTailCall();
2618     return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
2619   }
2620 
2621   // Returns a chain and a flag for retval copy to use.
2622   SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2623   Chain = Call.getValue(0);
2624   InFlag = Call.getValue(1);
2625 
2626   if (CallerSavedFP) {
2627     SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32);
2628     Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag);
2629     InFlag = Chain.getValue(1);
2630   }
2631 
2632   uint64_t CalleePopBytes = NumBytes;
2633   Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
2634                              DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2635                              InFlag, DL);
2636   if (!Ins.empty())
2637     InFlag = Chain.getValue(1);
2638 
2639   // Handle result values, copying them out of physregs into vregs that we
2640   // return.
2641   return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2642                          InVals, IsThisReturn,
2643                          IsThisReturn ? OutVals[0] : SDValue());
2644 }
2645 
2646 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2647                                              SelectionDAG &DAG) const {
2648   unsigned Reg = StringSwitch<unsigned>(RegName)
2649     .Case("m0", AMDGPU::M0)
2650     .Case("exec", AMDGPU::EXEC)
2651     .Case("exec_lo", AMDGPU::EXEC_LO)
2652     .Case("exec_hi", AMDGPU::EXEC_HI)
2653     .Case("flat_scratch", AMDGPU::FLAT_SCR)
2654     .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2655     .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2656     .Default(AMDGPU::NoRegister);
2657 
2658   if (Reg == AMDGPU::NoRegister) {
2659     report_fatal_error(Twine("invalid register name \""
2660                              + StringRef(RegName)  + "\"."));
2661 
2662   }
2663 
2664   if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
2665       Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2666     report_fatal_error(Twine("invalid register \""
2667                              + StringRef(RegName)  + "\" for subtarget."));
2668   }
2669 
2670   switch (Reg) {
2671   case AMDGPU::M0:
2672   case AMDGPU::EXEC_LO:
2673   case AMDGPU::EXEC_HI:
2674   case AMDGPU::FLAT_SCR_LO:
2675   case AMDGPU::FLAT_SCR_HI:
2676     if (VT.getSizeInBits() == 32)
2677       return Reg;
2678     break;
2679   case AMDGPU::EXEC:
2680   case AMDGPU::FLAT_SCR:
2681     if (VT.getSizeInBits() == 64)
2682       return Reg;
2683     break;
2684   default:
2685     llvm_unreachable("missing register type checking");
2686   }
2687 
2688   report_fatal_error(Twine("invalid type for register \""
2689                            + StringRef(RegName) + "\"."));
2690 }
2691 
2692 // If kill is not the last instruction, split the block so kill is always a
2693 // proper terminator.
2694 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
2695                                                     MachineBasicBlock *BB) const {
2696   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2697 
2698   MachineBasicBlock::iterator SplitPoint(&MI);
2699   ++SplitPoint;
2700 
2701   if (SplitPoint == BB->end()) {
2702     // Don't bother with a new block.
2703     MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
2704     return BB;
2705   }
2706 
2707   MachineFunction *MF = BB->getParent();
2708   MachineBasicBlock *SplitBB
2709     = MF->CreateMachineBasicBlock(BB->getBasicBlock());
2710 
2711   MF->insert(++MachineFunction::iterator(BB), SplitBB);
2712   SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
2713 
2714   SplitBB->transferSuccessorsAndUpdatePHIs(BB);
2715   BB->addSuccessor(SplitBB);
2716 
2717   MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
2718   return SplitBB;
2719 }
2720 
2721 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
2722 // wavefront. If the value is uniform and just happens to be in a VGPR, this
2723 // will only do one iteration. In the worst case, this will loop 64 times.
2724 //
2725 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
2726 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
2727   const SIInstrInfo *TII,
2728   MachineRegisterInfo &MRI,
2729   MachineBasicBlock &OrigBB,
2730   MachineBasicBlock &LoopBB,
2731   const DebugLoc &DL,
2732   const MachineOperand &IdxReg,
2733   unsigned InitReg,
2734   unsigned ResultReg,
2735   unsigned PhiReg,
2736   unsigned InitSaveExecReg,
2737   int Offset,
2738   bool UseGPRIdxMode) {
2739   MachineBasicBlock::iterator I = LoopBB.begin();
2740 
2741   unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2742   unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2743   unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2744   unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2745 
2746   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
2747     .addReg(InitReg)
2748     .addMBB(&OrigBB)
2749     .addReg(ResultReg)
2750     .addMBB(&LoopBB);
2751 
2752   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
2753     .addReg(InitSaveExecReg)
2754     .addMBB(&OrigBB)
2755     .addReg(NewExec)
2756     .addMBB(&LoopBB);
2757 
2758   // Read the next variant <- also loop target.
2759   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
2760     .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
2761 
2762   // Compare the just read M0 value to all possible Idx values.
2763   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
2764     .addReg(CurrentIdxReg)
2765     .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
2766 
2767   if (UseGPRIdxMode) {
2768     unsigned IdxReg;
2769     if (Offset == 0) {
2770       IdxReg = CurrentIdxReg;
2771     } else {
2772       IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2773       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
2774         .addReg(CurrentIdxReg, RegState::Kill)
2775         .addImm(Offset);
2776     }
2777 
2778     MachineInstr *SetIdx =
2779       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_IDX))
2780       .addReg(IdxReg, RegState::Kill);
2781     SetIdx->getOperand(2).setIsUndef();
2782   } else {
2783     // Move index from VCC into M0
2784     if (Offset == 0) {
2785       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2786         .addReg(CurrentIdxReg, RegState::Kill);
2787     } else {
2788       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2789         .addReg(CurrentIdxReg, RegState::Kill)
2790         .addImm(Offset);
2791     }
2792   }
2793 
2794   // Update EXEC, save the original EXEC value to VCC.
2795   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
2796     .addReg(CondReg, RegState::Kill);
2797 
2798   MRI.setSimpleHint(NewExec, CondReg);
2799 
2800   // Update EXEC, switch all done bits to 0 and all todo bits to 1.
2801   MachineInstr *InsertPt =
2802     BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
2803     .addReg(AMDGPU::EXEC)
2804     .addReg(NewExec);
2805 
2806   // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
2807   // s_cbranch_scc0?
2808 
2809   // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
2810   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
2811     .addMBB(&LoopBB);
2812 
2813   return InsertPt->getIterator();
2814 }
2815 
2816 // This has slightly sub-optimal regalloc when the source vector is killed by
2817 // the read. The register allocator does not understand that the kill is
2818 // per-workitem, so is kept alive for the whole loop so we end up not re-using a
2819 // subregister from it, using 1 more VGPR than necessary. This was saved when
2820 // this was expanded after register allocation.
2821 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
2822                                                   MachineBasicBlock &MBB,
2823                                                   MachineInstr &MI,
2824                                                   unsigned InitResultReg,
2825                                                   unsigned PhiReg,
2826                                                   int Offset,
2827                                                   bool UseGPRIdxMode) {
2828   MachineFunction *MF = MBB.getParent();
2829   MachineRegisterInfo &MRI = MF->getRegInfo();
2830   const DebugLoc &DL = MI.getDebugLoc();
2831   MachineBasicBlock::iterator I(&MI);
2832 
2833   unsigned DstReg = MI.getOperand(0).getReg();
2834   unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
2835   unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
2836 
2837   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
2838 
2839   // Save the EXEC mask
2840   BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
2841     .addReg(AMDGPU::EXEC);
2842 
2843   // To insert the loop we need to split the block. Move everything after this
2844   // point to a new block, and insert a new empty block between the two.
2845   MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
2846   MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
2847   MachineFunction::iterator MBBI(MBB);
2848   ++MBBI;
2849 
2850   MF->insert(MBBI, LoopBB);
2851   MF->insert(MBBI, RemainderBB);
2852 
2853   LoopBB->addSuccessor(LoopBB);
2854   LoopBB->addSuccessor(RemainderBB);
2855 
2856   // Move the rest of the block into a new block.
2857   RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
2858   RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
2859 
2860   MBB.addSuccessor(LoopBB);
2861 
2862   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2863 
2864   auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
2865                                       InitResultReg, DstReg, PhiReg, TmpExec,
2866                                       Offset, UseGPRIdxMode);
2867 
2868   MachineBasicBlock::iterator First = RemainderBB->begin();
2869   BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
2870     .addReg(SaveExec);
2871 
2872   return InsPt;
2873 }
2874 
2875 // Returns subreg index, offset
2876 static std::pair<unsigned, int>
2877 computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
2878                             const TargetRegisterClass *SuperRC,
2879                             unsigned VecReg,
2880                             int Offset) {
2881   int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
2882 
2883   // Skip out of bounds offsets, or else we would end up using an undefined
2884   // register.
2885   if (Offset >= NumElts || Offset < 0)
2886     return std::make_pair(AMDGPU::sub0, Offset);
2887 
2888   return std::make_pair(AMDGPU::sub0 + Offset, 0);
2889 }
2890 
2891 // Return true if the index is an SGPR and was set.
2892 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
2893                                  MachineRegisterInfo &MRI,
2894                                  MachineInstr &MI,
2895                                  int Offset,
2896                                  bool UseGPRIdxMode,
2897                                  bool IsIndirectSrc) {
2898   MachineBasicBlock *MBB = MI.getParent();
2899   const DebugLoc &DL = MI.getDebugLoc();
2900   MachineBasicBlock::iterator I(&MI);
2901 
2902   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2903   const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
2904 
2905   assert(Idx->getReg() != AMDGPU::NoRegister);
2906 
2907   if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
2908     return false;
2909 
2910   if (UseGPRIdxMode) {
2911     unsigned IdxMode = IsIndirectSrc ?
2912       VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
2913     if (Offset == 0) {
2914       MachineInstr *SetOn =
2915           BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2916               .add(*Idx)
2917               .addImm(IdxMode);
2918 
2919       SetOn->getOperand(3).setIsUndef();
2920     } else {
2921       unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
2922       BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
2923           .add(*Idx)
2924           .addImm(Offset);
2925       MachineInstr *SetOn =
2926         BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2927         .addReg(Tmp, RegState::Kill)
2928         .addImm(IdxMode);
2929 
2930       SetOn->getOperand(3).setIsUndef();
2931     }
2932 
2933     return true;
2934   }
2935 
2936   if (Offset == 0) {
2937     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2938       .add(*Idx);
2939   } else {
2940     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2941       .add(*Idx)
2942       .addImm(Offset);
2943   }
2944 
2945   return true;
2946 }
2947 
2948 // Control flow needs to be inserted if indexing with a VGPR.
2949 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
2950                                           MachineBasicBlock &MBB,
2951                                           const SISubtarget &ST) {
2952   const SIInstrInfo *TII = ST.getInstrInfo();
2953   const SIRegisterInfo &TRI = TII->getRegisterInfo();
2954   MachineFunction *MF = MBB.getParent();
2955   MachineRegisterInfo &MRI = MF->getRegInfo();
2956 
2957   unsigned Dst = MI.getOperand(0).getReg();
2958   unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
2959   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
2960 
2961   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
2962 
2963   unsigned SubReg;
2964   std::tie(SubReg, Offset)
2965     = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
2966 
2967   bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
2968 
2969   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
2970     MachineBasicBlock::iterator I(&MI);
2971     const DebugLoc &DL = MI.getDebugLoc();
2972 
2973     if (UseGPRIdxMode) {
2974       // TODO: Look at the uses to avoid the copy. This may require rescheduling
2975       // to avoid interfering with other uses, so probably requires a new
2976       // optimization pass.
2977       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
2978         .addReg(SrcReg, RegState::Undef, SubReg)
2979         .addReg(SrcReg, RegState::Implicit)
2980         .addReg(AMDGPU::M0, RegState::Implicit);
2981       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
2982     } else {
2983       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
2984         .addReg(SrcReg, RegState::Undef, SubReg)
2985         .addReg(SrcReg, RegState::Implicit);
2986     }
2987 
2988     MI.eraseFromParent();
2989 
2990     return &MBB;
2991   }
2992 
2993   const DebugLoc &DL = MI.getDebugLoc();
2994   MachineBasicBlock::iterator I(&MI);
2995 
2996   unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2997   unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2998 
2999   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3000 
3001   if (UseGPRIdxMode) {
3002     MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3003       .addImm(0) // Reset inside loop.
3004       .addImm(VGPRIndexMode::SRC0_ENABLE);
3005     SetOn->getOperand(3).setIsUndef();
3006 
3007     // Disable again after the loop.
3008     BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3009   }
3010 
3011   auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, UseGPRIdxMode);
3012   MachineBasicBlock *LoopBB = InsPt->getParent();
3013 
3014   if (UseGPRIdxMode) {
3015     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3016       .addReg(SrcReg, RegState::Undef, SubReg)
3017       .addReg(SrcReg, RegState::Implicit)
3018       .addReg(AMDGPU::M0, RegState::Implicit);
3019   } else {
3020     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3021       .addReg(SrcReg, RegState::Undef, SubReg)
3022       .addReg(SrcReg, RegState::Implicit);
3023   }
3024 
3025   MI.eraseFromParent();
3026 
3027   return LoopBB;
3028 }
3029 
3030 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
3031                                  const TargetRegisterClass *VecRC) {
3032   switch (TRI.getRegSizeInBits(*VecRC)) {
3033   case 32: // 4 bytes
3034     return AMDGPU::V_MOVRELD_B32_V1;
3035   case 64: // 8 bytes
3036     return AMDGPU::V_MOVRELD_B32_V2;
3037   case 128: // 16 bytes
3038     return AMDGPU::V_MOVRELD_B32_V4;
3039   case 256: // 32 bytes
3040     return AMDGPU::V_MOVRELD_B32_V8;
3041   case 512: // 64 bytes
3042     return AMDGPU::V_MOVRELD_B32_V16;
3043   default:
3044     llvm_unreachable("unsupported size for MOVRELD pseudos");
3045   }
3046 }
3047 
3048 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3049                                           MachineBasicBlock &MBB,
3050                                           const SISubtarget &ST) {
3051   const SIInstrInfo *TII = ST.getInstrInfo();
3052   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3053   MachineFunction *MF = MBB.getParent();
3054   MachineRegisterInfo &MRI = MF->getRegInfo();
3055 
3056   unsigned Dst = MI.getOperand(0).getReg();
3057   const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3058   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3059   const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3060   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3061   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3062 
3063   // This can be an immediate, but will be folded later.
3064   assert(Val->getReg());
3065 
3066   unsigned SubReg;
3067   std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3068                                                          SrcVec->getReg(),
3069                                                          Offset);
3070   bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
3071 
3072   if (Idx->getReg() == AMDGPU::NoRegister) {
3073     MachineBasicBlock::iterator I(&MI);
3074     const DebugLoc &DL = MI.getDebugLoc();
3075 
3076     assert(Offset == 0);
3077 
3078     BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
3079         .add(*SrcVec)
3080         .add(*Val)
3081         .addImm(SubReg);
3082 
3083     MI.eraseFromParent();
3084     return &MBB;
3085   }
3086 
3087   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
3088     MachineBasicBlock::iterator I(&MI);
3089     const DebugLoc &DL = MI.getDebugLoc();
3090 
3091     if (UseGPRIdxMode) {
3092       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3093           .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3094           .add(*Val)
3095           .addReg(Dst, RegState::ImplicitDefine)
3096           .addReg(SrcVec->getReg(), RegState::Implicit)
3097           .addReg(AMDGPU::M0, RegState::Implicit);
3098 
3099       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3100     } else {
3101       const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3102 
3103       BuildMI(MBB, I, DL, MovRelDesc)
3104           .addReg(Dst, RegState::Define)
3105           .addReg(SrcVec->getReg())
3106           .add(*Val)
3107           .addImm(SubReg - AMDGPU::sub0);
3108     }
3109 
3110     MI.eraseFromParent();
3111     return &MBB;
3112   }
3113 
3114   if (Val->isReg())
3115     MRI.clearKillFlags(Val->getReg());
3116 
3117   const DebugLoc &DL = MI.getDebugLoc();
3118 
3119   if (UseGPRIdxMode) {
3120     MachineBasicBlock::iterator I(&MI);
3121 
3122     MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3123       .addImm(0) // Reset inside loop.
3124       .addImm(VGPRIndexMode::DST_ENABLE);
3125     SetOn->getOperand(3).setIsUndef();
3126 
3127     // Disable again after the loop.
3128     BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3129   }
3130 
3131   unsigned PhiReg = MRI.createVirtualRegister(VecRC);
3132 
3133   auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
3134                               Offset, UseGPRIdxMode);
3135   MachineBasicBlock *LoopBB = InsPt->getParent();
3136 
3137   if (UseGPRIdxMode) {
3138     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3139         .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3140         .add(*Val)                               // src0
3141         .addReg(Dst, RegState::ImplicitDefine)
3142         .addReg(PhiReg, RegState::Implicit)
3143         .addReg(AMDGPU::M0, RegState::Implicit);
3144   } else {
3145     const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3146 
3147     BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3148         .addReg(Dst, RegState::Define)
3149         .addReg(PhiReg)
3150         .add(*Val)
3151         .addImm(SubReg - AMDGPU::sub0);
3152   }
3153 
3154   MI.eraseFromParent();
3155 
3156   return LoopBB;
3157 }
3158 
3159 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3160   MachineInstr &MI, MachineBasicBlock *BB) const {
3161 
3162   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3163   MachineFunction *MF = BB->getParent();
3164   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3165 
3166   if (TII->isMIMG(MI)) {
3167     if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3168       report_fatal_error("missing mem operand from MIMG instruction");
3169     }
3170     // Add a memoperand for mimg instructions so that they aren't assumed to
3171     // be ordered memory instuctions.
3172 
3173     return BB;
3174   }
3175 
3176   switch (MI.getOpcode()) {
3177   case AMDGPU::S_ADD_U64_PSEUDO:
3178   case AMDGPU::S_SUB_U64_PSEUDO: {
3179     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3180     const DebugLoc &DL = MI.getDebugLoc();
3181 
3182     MachineOperand &Dest = MI.getOperand(0);
3183     MachineOperand &Src0 = MI.getOperand(1);
3184     MachineOperand &Src1 = MI.getOperand(2);
3185 
3186     unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3187     unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3188 
3189     MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3190      Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3191      &AMDGPU::SReg_32_XM0RegClass);
3192     MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3193       Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3194       &AMDGPU::SReg_32_XM0RegClass);
3195 
3196     MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3197       Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3198       &AMDGPU::SReg_32_XM0RegClass);
3199     MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3200       Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3201       &AMDGPU::SReg_32_XM0RegClass);
3202 
3203     bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3204 
3205     unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3206     unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3207     BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3208       .add(Src0Sub0)
3209       .add(Src1Sub0);
3210     BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3211       .add(Src0Sub1)
3212       .add(Src1Sub1);
3213     BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3214       .addReg(DestSub0)
3215       .addImm(AMDGPU::sub0)
3216       .addReg(DestSub1)
3217       .addImm(AMDGPU::sub1);
3218     MI.eraseFromParent();
3219     return BB;
3220   }
3221   case AMDGPU::SI_INIT_M0: {
3222     BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
3223             TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3224         .add(MI.getOperand(0));
3225     MI.eraseFromParent();
3226     return BB;
3227   }
3228   case AMDGPU::SI_INIT_EXEC:
3229     // This should be before all vector instructions.
3230     BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3231             AMDGPU::EXEC)
3232         .addImm(MI.getOperand(0).getImm());
3233     MI.eraseFromParent();
3234     return BB;
3235 
3236   case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3237     // Extract the thread count from an SGPR input and set EXEC accordingly.
3238     // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3239     //
3240     // S_BFE_U32 count, input, {shift, 7}
3241     // S_BFM_B64 exec, count, 0
3242     // S_CMP_EQ_U32 count, 64
3243     // S_CMOV_B64 exec, -1
3244     MachineInstr *FirstMI = &*BB->begin();
3245     MachineRegisterInfo &MRI = MF->getRegInfo();
3246     unsigned InputReg = MI.getOperand(0).getReg();
3247     unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3248     bool Found = false;
3249 
3250     // Move the COPY of the input reg to the beginning, so that we can use it.
3251     for (auto I = BB->begin(); I != &MI; I++) {
3252       if (I->getOpcode() != TargetOpcode::COPY ||
3253           I->getOperand(0).getReg() != InputReg)
3254         continue;
3255 
3256       if (I == FirstMI) {
3257         FirstMI = &*++BB->begin();
3258       } else {
3259         I->removeFromParent();
3260         BB->insert(FirstMI, &*I);
3261       }
3262       Found = true;
3263       break;
3264     }
3265     assert(Found);
3266     (void)Found;
3267 
3268     // This should be before all vector instructions.
3269     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3270         .addReg(InputReg)
3271         .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
3272     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
3273             AMDGPU::EXEC)
3274         .addReg(CountReg)
3275         .addImm(0);
3276     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3277         .addReg(CountReg, RegState::Kill)
3278         .addImm(64);
3279     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
3280             AMDGPU::EXEC)
3281         .addImm(-1);
3282     MI.eraseFromParent();
3283     return BB;
3284   }
3285 
3286   case AMDGPU::GET_GROUPSTATICSIZE: {
3287     DebugLoc DL = MI.getDebugLoc();
3288     BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
3289         .add(MI.getOperand(0))
3290         .addImm(MFI->getLDSSize());
3291     MI.eraseFromParent();
3292     return BB;
3293   }
3294   case AMDGPU::SI_INDIRECT_SRC_V1:
3295   case AMDGPU::SI_INDIRECT_SRC_V2:
3296   case AMDGPU::SI_INDIRECT_SRC_V4:
3297   case AMDGPU::SI_INDIRECT_SRC_V8:
3298   case AMDGPU::SI_INDIRECT_SRC_V16:
3299     return emitIndirectSrc(MI, *BB, *getSubtarget());
3300   case AMDGPU::SI_INDIRECT_DST_V1:
3301   case AMDGPU::SI_INDIRECT_DST_V2:
3302   case AMDGPU::SI_INDIRECT_DST_V4:
3303   case AMDGPU::SI_INDIRECT_DST_V8:
3304   case AMDGPU::SI_INDIRECT_DST_V16:
3305     return emitIndirectDst(MI, *BB, *getSubtarget());
3306   case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3307   case AMDGPU::SI_KILL_I1_PSEUDO:
3308     return splitKillBlock(MI, BB);
3309   case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3310     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3311 
3312     unsigned Dst = MI.getOperand(0).getReg();
3313     unsigned Src0 = MI.getOperand(1).getReg();
3314     unsigned Src1 = MI.getOperand(2).getReg();
3315     const DebugLoc &DL = MI.getDebugLoc();
3316     unsigned SrcCond = MI.getOperand(3).getReg();
3317 
3318     unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3319     unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3320     unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
3321 
3322     BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3323       .addReg(SrcCond);
3324     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3325       .addReg(Src0, 0, AMDGPU::sub0)
3326       .addReg(Src1, 0, AMDGPU::sub0)
3327       .addReg(SrcCondCopy);
3328     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3329       .addReg(Src0, 0, AMDGPU::sub1)
3330       .addReg(Src1, 0, AMDGPU::sub1)
3331       .addReg(SrcCondCopy);
3332 
3333     BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3334       .addReg(DstLo)
3335       .addImm(AMDGPU::sub0)
3336       .addReg(DstHi)
3337       .addImm(AMDGPU::sub1);
3338     MI.eraseFromParent();
3339     return BB;
3340   }
3341   case AMDGPU::SI_BR_UNDEF: {
3342     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3343     const DebugLoc &DL = MI.getDebugLoc();
3344     MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3345                            .add(MI.getOperand(0));
3346     Br->getOperand(1).setIsUndef(true); // read undef SCC
3347     MI.eraseFromParent();
3348     return BB;
3349   }
3350   case AMDGPU::ADJCALLSTACKUP:
3351   case AMDGPU::ADJCALLSTACKDOWN: {
3352     const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3353     MachineInstrBuilder MIB(*MF, &MI);
3354     MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
3355         .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit);
3356     return BB;
3357   }
3358   case AMDGPU::SI_CALL_ISEL:
3359   case AMDGPU::SI_TCRETURN_ISEL: {
3360     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3361     const DebugLoc &DL = MI.getDebugLoc();
3362     unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
3363 
3364     MachineRegisterInfo &MRI = MF->getRegInfo();
3365     unsigned GlobalAddrReg = MI.getOperand(0).getReg();
3366     MachineInstr *PCRel = MRI.getVRegDef(GlobalAddrReg);
3367     assert(PCRel->getOpcode() == AMDGPU::SI_PC_ADD_REL_OFFSET);
3368 
3369     const GlobalValue *G = PCRel->getOperand(1).getGlobal();
3370 
3371     MachineInstrBuilder MIB;
3372     if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
3373       MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg)
3374         .add(MI.getOperand(0))
3375         .addGlobalAddress(G);
3376     } else {
3377       MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_TCRETURN))
3378         .add(MI.getOperand(0))
3379         .addGlobalAddress(G);
3380 
3381       // There is an additional imm operand for tcreturn, but it should be in the
3382       // right place already.
3383     }
3384 
3385     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I)
3386       MIB.add(MI.getOperand(I));
3387 
3388     MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
3389     MI.eraseFromParent();
3390     return BB;
3391   }
3392   default:
3393     return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
3394   }
3395 }
3396 
3397 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3398   return isTypeLegal(VT.getScalarType());
3399 }
3400 
3401 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3402   // This currently forces unfolding various combinations of fsub into fma with
3403   // free fneg'd operands. As long as we have fast FMA (controlled by
3404   // isFMAFasterThanFMulAndFAdd), we should perform these.
3405 
3406   // When fma is quarter rate, for f64 where add / sub are at best half rate,
3407   // most of these combines appear to be cycle neutral but save on instruction
3408   // count / code size.
3409   return true;
3410 }
3411 
3412 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3413                                          EVT VT) const {
3414   if (!VT.isVector()) {
3415     return MVT::i1;
3416   }
3417   return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
3418 }
3419 
3420 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3421   // TODO: Should i16 be used always if legal? For now it would force VALU
3422   // shifts.
3423   return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
3424 }
3425 
3426 // Answering this is somewhat tricky and depends on the specific device which
3427 // have different rates for fma or all f64 operations.
3428 //
3429 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3430 // regardless of which device (although the number of cycles differs between
3431 // devices), so it is always profitable for f64.
3432 //
3433 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3434 // only on full rate devices. Normally, we should prefer selecting v_mad_f32
3435 // which we can always do even without fused FP ops since it returns the same
3436 // result as the separate operations and since it is always full
3437 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3438 // however does not support denormals, so we do report fma as faster if we have
3439 // a fast fma device and require denormals.
3440 //
3441 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3442   VT = VT.getScalarType();
3443 
3444   switch (VT.getSimpleVT().SimpleTy) {
3445   case MVT::f32:
3446     // This is as fast on some subtargets. However, we always have full rate f32
3447     // mad available which returns the same result as the separate operations
3448     // which we should prefer over fma. We can't use this if we want to support
3449     // denormals, so only report this in these cases.
3450     return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32();
3451   case MVT::f64:
3452     return true;
3453   case MVT::f16:
3454     return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
3455   default:
3456     break;
3457   }
3458 
3459   return false;
3460 }
3461 
3462 //===----------------------------------------------------------------------===//
3463 // Custom DAG Lowering Operations
3464 //===----------------------------------------------------------------------===//
3465 
3466 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3467   switch (Op.getOpcode()) {
3468   default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
3469   case ISD::BRCOND: return LowerBRCOND(Op, DAG);
3470   case ISD::LOAD: {
3471     SDValue Result = LowerLOAD(Op, DAG);
3472     assert((!Result.getNode() ||
3473             Result.getNode()->getNumValues() == 2) &&
3474            "Load should return a value and a chain");
3475     return Result;
3476   }
3477 
3478   case ISD::FSIN:
3479   case ISD::FCOS:
3480     return LowerTrig(Op, DAG);
3481   case ISD::SELECT: return LowerSELECT(Op, DAG);
3482   case ISD::FDIV: return LowerFDIV(Op, DAG);
3483   case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
3484   case ISD::STORE: return LowerSTORE(Op, DAG);
3485   case ISD::GlobalAddress: {
3486     MachineFunction &MF = DAG.getMachineFunction();
3487     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3488     return LowerGlobalAddress(MFI, Op, DAG);
3489   }
3490   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3491   case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
3492   case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
3493   case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
3494   case ISD::INSERT_VECTOR_ELT:
3495     return lowerINSERT_VECTOR_ELT(Op, DAG);
3496   case ISD::EXTRACT_VECTOR_ELT:
3497     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3498   case ISD::FP_ROUND:
3499     return lowerFP_ROUND(Op, DAG);
3500   case ISD::TRAP:
3501   case ISD::DEBUGTRAP:
3502     return lowerTRAP(Op, DAG);
3503   }
3504   return SDValue();
3505 }
3506 
3507 static SDValue adjustLoadValueType(SDValue Result, EVT LoadVT, SDLoc DL,
3508                                    SelectionDAG &DAG, bool Unpacked) {
3509   if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
3510     // Truncate to v2i16/v4i16.
3511     EVT IntLoadVT = LoadVT.changeTypeToInteger();
3512     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, IntLoadVT, Result);
3513     // Bitcast to original type (v2f16/v4f16).
3514     return DAG.getNode(ISD::BITCAST, DL, LoadVT, Trunc);
3515   }
3516   // Cast back to the original packed type.
3517   return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3518 }
3519 
3520 // This is to lower INTRINSIC_W_CHAIN with illegal result types.
3521 SDValue SITargetLowering::lowerIntrinsicWChain_IllegalReturnType(SDValue Op,
3522                                      SDValue &Chain, SelectionDAG &DAG) const {
3523   EVT LoadVT = Op.getValueType();
3524   // TODO: handle v3f16.
3525   if (LoadVT != MVT::v2f16 && LoadVT != MVT::v4f16)
3526     return SDValue();
3527 
3528   bool Unpacked = Subtarget->hasUnpackedD16VMem();
3529   EVT UnpackedLoadVT = (LoadVT == MVT::v2f16) ? MVT::v2i32 : MVT::v4i32;
3530   EVT EquivLoadVT = Unpacked ? UnpackedLoadVT :
3531                                getEquivalentMemType(*DAG.getContext(), LoadVT);
3532   // Change from v4f16/v2f16 to EquivLoadVT.
3533   SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
3534 
3535   SDValue Res;
3536   SDLoc DL(Op);
3537   MemSDNode *M = cast<MemSDNode>(Op);
3538   unsigned IID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
3539   switch (IID) {
3540   case Intrinsic::amdgcn_tbuffer_load: {
3541     SDValue Ops[] = {
3542         Op.getOperand(0),  // Chain
3543         Op.getOperand(2),  // rsrc
3544         Op.getOperand(3),  // vindex
3545         Op.getOperand(4),  // voffset
3546         Op.getOperand(5),  // soffset
3547         Op.getOperand(6),  // offset
3548         Op.getOperand(7),  // dfmt
3549         Op.getOperand(8),  // nfmt
3550         Op.getOperand(9),  // glc
3551         Op.getOperand(10)  // slc
3552     };
3553     Res = DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, DL,
3554                                   VTList, Ops, M->getMemoryVT(),
3555                                   M->getMemOperand());
3556     Chain = Res.getValue(1);
3557     return adjustLoadValueType(Res, LoadVT, DL, DAG, Unpacked);
3558   }
3559   case Intrinsic::amdgcn_buffer_load_format: {
3560       SDValue Ops[] = {
3561         Op.getOperand(0), // Chain
3562         Op.getOperand(2), // rsrc
3563         Op.getOperand(3), // vindex
3564         Op.getOperand(4), // offset
3565         Op.getOperand(5), // glc
3566         Op.getOperand(6)  // slc
3567       };
3568       Res = DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
3569                                      DL, VTList, Ops, M->getMemoryVT(),
3570                                      M->getMemOperand());
3571       Chain = Res.getValue(1);
3572       return adjustLoadValueType(Res, LoadVT, DL, DAG, Unpacked);
3573   }
3574   default:
3575     return SDValue();
3576   }
3577 }
3578 
3579 void SITargetLowering::ReplaceNodeResults(SDNode *N,
3580                                           SmallVectorImpl<SDValue> &Results,
3581                                           SelectionDAG &DAG) const {
3582   switch (N->getOpcode()) {
3583   case ISD::INSERT_VECTOR_ELT: {
3584     if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
3585       Results.push_back(Res);
3586     return;
3587   }
3588   case ISD::EXTRACT_VECTOR_ELT: {
3589     if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
3590       Results.push_back(Res);
3591     return;
3592   }
3593   case ISD::INTRINSIC_WO_CHAIN: {
3594     unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3595     if (IID == Intrinsic::amdgcn_cvt_pkrtz) {
3596       SDValue Src0 = N->getOperand(1);
3597       SDValue Src1 = N->getOperand(2);
3598       SDLoc SL(N);
3599       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
3600                                 Src0, Src1);
3601       Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
3602       return;
3603     }
3604     break;
3605   }
3606   case ISD::INTRINSIC_W_CHAIN: {
3607     SDValue Chain;
3608     if (SDValue Res = lowerIntrinsicWChain_IllegalReturnType(SDValue(N, 0),
3609                                                              Chain, DAG)) {
3610       Results.push_back(Res);
3611       Results.push_back(Chain);
3612       return;
3613     }
3614     break;
3615   }
3616   case ISD::SELECT: {
3617     SDLoc SL(N);
3618     EVT VT = N->getValueType(0);
3619     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3620     SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
3621     SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
3622 
3623     EVT SelectVT = NewVT;
3624     if (NewVT.bitsLT(MVT::i32)) {
3625       LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
3626       RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
3627       SelectVT = MVT::i32;
3628     }
3629 
3630     SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
3631                                     N->getOperand(0), LHS, RHS);
3632 
3633     if (NewVT != SelectVT)
3634       NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
3635     Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
3636     return;
3637   }
3638   default:
3639     break;
3640   }
3641 }
3642 
3643 /// \brief Helper function for LowerBRCOND
3644 static SDNode *findUser(SDValue Value, unsigned Opcode) {
3645 
3646   SDNode *Parent = Value.getNode();
3647   for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
3648        I != E; ++I) {
3649 
3650     if (I.getUse().get() != Value)
3651       continue;
3652 
3653     if (I->getOpcode() == Opcode)
3654       return *I;
3655   }
3656   return nullptr;
3657 }
3658 
3659 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
3660   if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
3661     switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
3662     case Intrinsic::amdgcn_if:
3663       return AMDGPUISD::IF;
3664     case Intrinsic::amdgcn_else:
3665       return AMDGPUISD::ELSE;
3666     case Intrinsic::amdgcn_loop:
3667       return AMDGPUISD::LOOP;
3668     case Intrinsic::amdgcn_end_cf:
3669       llvm_unreachable("should not occur");
3670     default:
3671       return 0;
3672     }
3673   }
3674 
3675   // break, if_break, else_break are all only used as inputs to loop, not
3676   // directly as branch conditions.
3677   return 0;
3678 }
3679 
3680 void SITargetLowering::createDebuggerPrologueStackObjects(
3681     MachineFunction &MF) const {
3682   // Create stack objects that are used for emitting debugger prologue.
3683   //
3684   // Debugger prologue writes work group IDs and work item IDs to scratch memory
3685   // at fixed location in the following format:
3686   //   offset 0:  work group ID x
3687   //   offset 4:  work group ID y
3688   //   offset 8:  work group ID z
3689   //   offset 16: work item ID x
3690   //   offset 20: work item ID y
3691   //   offset 24: work item ID z
3692   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
3693   int ObjectIdx = 0;
3694 
3695   // For each dimension:
3696   for (unsigned i = 0; i < 3; ++i) {
3697     // Create fixed stack object for work group ID.
3698     ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true);
3699     Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx);
3700     // Create fixed stack object for work item ID.
3701     ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true);
3702     Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx);
3703   }
3704 }
3705 
3706 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
3707   const Triple &TT = getTargetMachine().getTargetTriple();
3708   return GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS &&
3709          AMDGPU::shouldEmitConstantsToTextSection(TT);
3710 }
3711 
3712 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
3713   return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
3714               GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) &&
3715          !shouldEmitFixup(GV) &&
3716          !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3717 }
3718 
3719 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
3720   return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
3721 }
3722 
3723 /// This transforms the control flow intrinsics to get the branch destination as
3724 /// last parameter, also switches branch target with BR if the need arise
3725 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
3726                                       SelectionDAG &DAG) const {
3727   SDLoc DL(BRCOND);
3728 
3729   SDNode *Intr = BRCOND.getOperand(1).getNode();
3730   SDValue Target = BRCOND.getOperand(2);
3731   SDNode *BR = nullptr;
3732   SDNode *SetCC = nullptr;
3733 
3734   if (Intr->getOpcode() == ISD::SETCC) {
3735     // As long as we negate the condition everything is fine
3736     SetCC = Intr;
3737     Intr = SetCC->getOperand(0).getNode();
3738 
3739   } else {
3740     // Get the target from BR if we don't negate the condition
3741     BR = findUser(BRCOND, ISD::BR);
3742     Target = BR->getOperand(1);
3743   }
3744 
3745   // FIXME: This changes the types of the intrinsics instead of introducing new
3746   // nodes with the correct types.
3747   // e.g. llvm.amdgcn.loop
3748 
3749   // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
3750   // =>     t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
3751 
3752   unsigned CFNode = isCFIntrinsic(Intr);
3753   if (CFNode == 0) {
3754     // This is a uniform branch so we don't need to legalize.
3755     return BRCOND;
3756   }
3757 
3758   bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
3759                    Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
3760 
3761   assert(!SetCC ||
3762         (SetCC->getConstantOperandVal(1) == 1 &&
3763          cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
3764                                                              ISD::SETNE));
3765 
3766   // operands of the new intrinsic call
3767   SmallVector<SDValue, 4> Ops;
3768   if (HaveChain)
3769     Ops.push_back(BRCOND.getOperand(0));
3770 
3771   Ops.append(Intr->op_begin() + (HaveChain ?  2 : 1), Intr->op_end());
3772   Ops.push_back(Target);
3773 
3774   ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
3775 
3776   // build the new intrinsic call
3777   SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
3778 
3779   if (!HaveChain) {
3780     SDValue Ops[] =  {
3781       SDValue(Result, 0),
3782       BRCOND.getOperand(0)
3783     };
3784 
3785     Result = DAG.getMergeValues(Ops, DL).getNode();
3786   }
3787 
3788   if (BR) {
3789     // Give the branch instruction our target
3790     SDValue Ops[] = {
3791       BR->getOperand(0),
3792       BRCOND.getOperand(2)
3793     };
3794     SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
3795     DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
3796     BR = NewBR.getNode();
3797   }
3798 
3799   SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
3800 
3801   // Copy the intrinsic results to registers
3802   for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
3803     SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
3804     if (!CopyToReg)
3805       continue;
3806 
3807     Chain = DAG.getCopyToReg(
3808       Chain, DL,
3809       CopyToReg->getOperand(1),
3810       SDValue(Result, i - 1),
3811       SDValue());
3812 
3813     DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
3814   }
3815 
3816   // Remove the old intrinsic from the chain
3817   DAG.ReplaceAllUsesOfValueWith(
3818     SDValue(Intr, Intr->getNumValues() - 1),
3819     Intr->getOperand(0));
3820 
3821   return Chain;
3822 }
3823 
3824 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
3825                                             SDValue Op,
3826                                             const SDLoc &DL,
3827                                             EVT VT) const {
3828   return Op.getValueType().bitsLE(VT) ?
3829       DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
3830       DAG.getNode(ISD::FTRUNC, DL, VT, Op);
3831 }
3832 
3833 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
3834   assert(Op.getValueType() == MVT::f16 &&
3835          "Do not know how to custom lower FP_ROUND for non-f16 type");
3836 
3837   SDValue Src = Op.getOperand(0);
3838   EVT SrcVT = Src.getValueType();
3839   if (SrcVT != MVT::f64)
3840     return Op;
3841 
3842   SDLoc DL(Op);
3843 
3844   SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
3845   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
3846   return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
3847 }
3848 
3849 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
3850   SDLoc SL(Op);
3851   MachineFunction &MF = DAG.getMachineFunction();
3852   SDValue Chain = Op.getOperand(0);
3853 
3854   unsigned TrapID = Op.getOpcode() == ISD::DEBUGTRAP ?
3855     SISubtarget::TrapIDLLVMDebugTrap : SISubtarget::TrapIDLLVMTrap;
3856 
3857   if (Subtarget->getTrapHandlerAbi() == SISubtarget::TrapHandlerAbiHsa &&
3858       Subtarget->isTrapHandlerEnabled()) {
3859     SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
3860     unsigned UserSGPR = Info->getQueuePtrUserSGPR();
3861     assert(UserSGPR != AMDGPU::NoRegister);
3862 
3863     SDValue QueuePtr = CreateLiveInRegister(
3864       DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
3865 
3866     SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
3867 
3868     SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
3869                                      QueuePtr, SDValue());
3870 
3871     SDValue Ops[] = {
3872       ToReg,
3873       DAG.getTargetConstant(TrapID, SL, MVT::i16),
3874       SGPR01,
3875       ToReg.getValue(1)
3876     };
3877 
3878     return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
3879   }
3880 
3881   switch (TrapID) {
3882   case SISubtarget::TrapIDLLVMTrap:
3883     return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
3884   case SISubtarget::TrapIDLLVMDebugTrap: {
3885     DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
3886                                      "debugtrap handler not supported",
3887                                      Op.getDebugLoc(),
3888                                      DS_Warning);
3889     LLVMContext &Ctx = MF.getFunction().getContext();
3890     Ctx.diagnose(NoTrap);
3891     return Chain;
3892   }
3893   default:
3894     llvm_unreachable("unsupported trap handler type!");
3895   }
3896 
3897   return Chain;
3898 }
3899 
3900 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
3901                                              SelectionDAG &DAG) const {
3902   // FIXME: Use inline constants (src_{shared, private}_base) instead.
3903   if (Subtarget->hasApertureRegs()) {
3904     unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ?
3905         AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
3906         AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
3907     unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ?
3908         AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
3909         AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
3910     unsigned Encoding =
3911         AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
3912         Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
3913         WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
3914 
3915     SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
3916     SDValue ApertureReg = SDValue(
3917         DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
3918     SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
3919     return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
3920   }
3921 
3922   MachineFunction &MF = DAG.getMachineFunction();
3923   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
3924   unsigned UserSGPR = Info->getQueuePtrUserSGPR();
3925   assert(UserSGPR != AMDGPU::NoRegister);
3926 
3927   SDValue QueuePtr = CreateLiveInRegister(
3928     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
3929 
3930   // Offset into amd_queue_t for group_segment_aperture_base_hi /
3931   // private_segment_aperture_base_hi.
3932   uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44;
3933 
3934   SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
3935 
3936   // TODO: Use custom target PseudoSourceValue.
3937   // TODO: We should use the value from the IR intrinsic call, but it might not
3938   // be available and how do we get it?
3939   Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
3940                                               AMDGPUASI.CONSTANT_ADDRESS));
3941 
3942   MachinePointerInfo PtrInfo(V, StructOffset);
3943   return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
3944                      MinAlign(64, StructOffset),
3945                      MachineMemOperand::MODereferenceable |
3946                          MachineMemOperand::MOInvariant);
3947 }
3948 
3949 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
3950                                              SelectionDAG &DAG) const {
3951   SDLoc SL(Op);
3952   const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
3953 
3954   SDValue Src = ASC->getOperand(0);
3955   SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
3956 
3957   const AMDGPUTargetMachine &TM =
3958     static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
3959 
3960   // flat -> local/private
3961   if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
3962     unsigned DestAS = ASC->getDestAddressSpace();
3963 
3964     if (DestAS == AMDGPUASI.LOCAL_ADDRESS ||
3965         DestAS == AMDGPUASI.PRIVATE_ADDRESS) {
3966       unsigned NullVal = TM.getNullPointerValue(DestAS);
3967       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
3968       SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
3969       SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
3970 
3971       return DAG.getNode(ISD::SELECT, SL, MVT::i32,
3972                          NonNull, Ptr, SegmentNullPtr);
3973     }
3974   }
3975 
3976   // local/private -> flat
3977   if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
3978     unsigned SrcAS = ASC->getSrcAddressSpace();
3979 
3980     if (SrcAS == AMDGPUASI.LOCAL_ADDRESS ||
3981         SrcAS == AMDGPUASI.PRIVATE_ADDRESS) {
3982       unsigned NullVal = TM.getNullPointerValue(SrcAS);
3983       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
3984 
3985       SDValue NonNull
3986         = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
3987 
3988       SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
3989       SDValue CvtPtr
3990         = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
3991 
3992       return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
3993                          DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
3994                          FlatNullPtr);
3995     }
3996   }
3997 
3998   // global <-> flat are no-ops and never emitted.
3999 
4000   const MachineFunction &MF = DAG.getMachineFunction();
4001   DiagnosticInfoUnsupported InvalidAddrSpaceCast(
4002     MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
4003   DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4004 
4005   return DAG.getUNDEF(ASC->getValueType(0));
4006 }
4007 
4008 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4009                                                  SelectionDAG &DAG) const {
4010   SDValue Idx = Op.getOperand(2);
4011   if (isa<ConstantSDNode>(Idx))
4012     return SDValue();
4013 
4014   // Avoid stack access for dynamic indexing.
4015   SDLoc SL(Op);
4016   SDValue Vec = Op.getOperand(0);
4017   SDValue Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Op.getOperand(1));
4018 
4019   // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
4020   SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Val);
4021 
4022   // Convert vector index to bit-index.
4023   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx,
4024                                   DAG.getConstant(16, SL, MVT::i32));
4025 
4026   SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
4027 
4028   SDValue BFM = DAG.getNode(ISD::SHL, SL, MVT::i32,
4029                             DAG.getConstant(0xffff, SL, MVT::i32),
4030                             ScaledIdx);
4031 
4032   SDValue LHS = DAG.getNode(ISD::AND, SL, MVT::i32, BFM, ExtVal);
4033   SDValue RHS = DAG.getNode(ISD::AND, SL, MVT::i32,
4034                             DAG.getNOT(SL, BFM, MVT::i32), BCVec);
4035 
4036   SDValue BFI = DAG.getNode(ISD::OR, SL, MVT::i32, LHS, RHS);
4037   return DAG.getNode(ISD::BITCAST, SL, Op.getValueType(), BFI);
4038 }
4039 
4040 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4041                                                   SelectionDAG &DAG) const {
4042   SDLoc SL(Op);
4043 
4044   EVT ResultVT = Op.getValueType();
4045   SDValue Vec = Op.getOperand(0);
4046   SDValue Idx = Op.getOperand(1);
4047 
4048   DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4049 
4050   // Make sure we we do any optimizations that will make it easier to fold
4051   // source modifiers before obscuring it with bit operations.
4052 
4053   // XXX - Why doesn't this get called when vector_shuffle is expanded?
4054   if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4055     return Combined;
4056 
4057   if (const ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) {
4058     SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
4059 
4060     if (CIdx->getZExtValue() == 1) {
4061       Result = DAG.getNode(ISD::SRL, SL, MVT::i32, Result,
4062                            DAG.getConstant(16, SL, MVT::i32));
4063     } else {
4064       assert(CIdx->getZExtValue() == 0);
4065     }
4066 
4067     if (ResultVT.bitsLT(MVT::i32))
4068       Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result);
4069     return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4070   }
4071 
4072   SDValue Sixteen = DAG.getConstant(16, SL, MVT::i32);
4073 
4074   // Convert vector index to bit-index.
4075   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, Sixteen);
4076 
4077   SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
4078   SDValue Elt = DAG.getNode(ISD::SRL, SL, MVT::i32, BC, ScaledIdx);
4079 
4080   SDValue Result = Elt;
4081   if (ResultVT.bitsLT(MVT::i32))
4082     Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result);
4083 
4084   return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4085 }
4086 
4087 bool
4088 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4089   // We can fold offsets for anything that doesn't require a GOT relocation.
4090   return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
4091               GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) &&
4092          !shouldEmitGOTReloc(GA->getGlobal());
4093 }
4094 
4095 static SDValue
4096 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
4097                         const SDLoc &DL, unsigned Offset, EVT PtrVT,
4098                         unsigned GAFlags = SIInstrInfo::MO_NONE) {
4099   // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
4100   // lowered to the following code sequence:
4101   //
4102   // For constant address space:
4103   //   s_getpc_b64 s[0:1]
4104   //   s_add_u32 s0, s0, $symbol
4105   //   s_addc_u32 s1, s1, 0
4106   //
4107   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
4108   //   a fixup or relocation is emitted to replace $symbol with a literal
4109   //   constant, which is a pc-relative offset from the encoding of the $symbol
4110   //   operand to the global variable.
4111   //
4112   // For global address space:
4113   //   s_getpc_b64 s[0:1]
4114   //   s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
4115   //   s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
4116   //
4117   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
4118   //   fixups or relocations are emitted to replace $symbol@*@lo and
4119   //   $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
4120   //   which is a 64-bit pc-relative offset from the encoding of the $symbol
4121   //   operand to the global variable.
4122   //
4123   // What we want here is an offset from the value returned by s_getpc
4124   // (which is the address of the s_add_u32 instruction) to the global
4125   // variable, but since the encoding of $symbol starts 4 bytes after the start
4126   // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
4127   // small. This requires us to add 4 to the global variable offset in order to
4128   // compute the correct address.
4129   SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4130                                              GAFlags);
4131   SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4132                                              GAFlags == SIInstrInfo::MO_NONE ?
4133                                              GAFlags : GAFlags + 1);
4134   return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
4135 }
4136 
4137 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
4138                                              SDValue Op,
4139                                              SelectionDAG &DAG) const {
4140   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
4141   const GlobalValue *GV = GSD->getGlobal();
4142 
4143   if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS &&
4144       GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS &&
4145       // FIXME: It isn't correct to rely on the type of the pointer. This should
4146       // be removed when address space 0 is 64-bit.
4147       !GV->getType()->getElementType()->isFunctionTy())
4148     return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
4149 
4150   SDLoc DL(GSD);
4151   EVT PtrVT = Op.getValueType();
4152 
4153   if (shouldEmitFixup(GV))
4154     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
4155   else if (shouldEmitPCReloc(GV))
4156     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
4157                                    SIInstrInfo::MO_REL32);
4158 
4159   SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
4160                                             SIInstrInfo::MO_GOTPCREL32);
4161 
4162   Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
4163   PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
4164   const DataLayout &DataLayout = DAG.getDataLayout();
4165   unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
4166   // FIXME: Use a PseudoSourceValue once those can be assigned an address space.
4167   MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
4168 
4169   return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
4170                      MachineMemOperand::MODereferenceable |
4171                          MachineMemOperand::MOInvariant);
4172 }
4173 
4174 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
4175                                    const SDLoc &DL, SDValue V) const {
4176   // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
4177   // the destination register.
4178   //
4179   // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
4180   // so we will end up with redundant moves to m0.
4181   //
4182   // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
4183 
4184   // A Null SDValue creates a glue result.
4185   SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
4186                                   V, Chain);
4187   return SDValue(M0, 0);
4188 }
4189 
4190 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
4191                                                  SDValue Op,
4192                                                  MVT VT,
4193                                                  unsigned Offset) const {
4194   SDLoc SL(Op);
4195   SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
4196                                            DAG.getEntryNode(), Offset, false);
4197   // The local size values will have the hi 16-bits as zero.
4198   return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
4199                      DAG.getValueType(VT));
4200 }
4201 
4202 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4203                                         EVT VT) {
4204   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
4205                                       "non-hsa intrinsic with hsa target",
4206                                       DL.getDebugLoc());
4207   DAG.getContext()->diagnose(BadIntrin);
4208   return DAG.getUNDEF(VT);
4209 }
4210 
4211 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4212                                          EVT VT) {
4213   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
4214                                       "intrinsic not supported on subtarget",
4215                                       DL.getDebugLoc());
4216   DAG.getContext()->diagnose(BadIntrin);
4217   return DAG.getUNDEF(VT);
4218 }
4219 
4220 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4221                                                   SelectionDAG &DAG) const {
4222   MachineFunction &MF = DAG.getMachineFunction();
4223   auto MFI = MF.getInfo<SIMachineFunctionInfo>();
4224 
4225   EVT VT = Op.getValueType();
4226   SDLoc DL(Op);
4227   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4228 
4229   // TODO: Should this propagate fast-math-flags?
4230 
4231   switch (IntrinsicID) {
4232   case Intrinsic::amdgcn_implicit_buffer_ptr: {
4233     if (getSubtarget()->isAmdCodeObjectV2(MF))
4234       return emitNonHSAIntrinsicError(DAG, DL, VT);
4235     return getPreloadedValue(DAG, *MFI, VT,
4236                              AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
4237   }
4238   case Intrinsic::amdgcn_dispatch_ptr:
4239   case Intrinsic::amdgcn_queue_ptr: {
4240     if (!Subtarget->isAmdCodeObjectV2(MF)) {
4241       DiagnosticInfoUnsupported BadIntrin(
4242           MF.getFunction(), "unsupported hsa intrinsic without hsa target",
4243           DL.getDebugLoc());
4244       DAG.getContext()->diagnose(BadIntrin);
4245       return DAG.getUNDEF(VT);
4246     }
4247 
4248     auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
4249       AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
4250     return getPreloadedValue(DAG, *MFI, VT, RegID);
4251   }
4252   case Intrinsic::amdgcn_implicitarg_ptr: {
4253     if (MFI->isEntryFunction())
4254       return getImplicitArgPtr(DAG, DL);
4255     return getPreloadedValue(DAG, *MFI, VT,
4256                              AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
4257   }
4258   case Intrinsic::amdgcn_kernarg_segment_ptr: {
4259     return getPreloadedValue(DAG, *MFI, VT,
4260                              AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
4261   }
4262   case Intrinsic::amdgcn_dispatch_id: {
4263     return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
4264   }
4265   case Intrinsic::amdgcn_rcp:
4266     return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
4267   case Intrinsic::amdgcn_rsq:
4268     return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
4269   case Intrinsic::amdgcn_rsq_legacy:
4270     if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
4271       return emitRemovedIntrinsicError(DAG, DL, VT);
4272 
4273     return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
4274   case Intrinsic::amdgcn_rcp_legacy:
4275     if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
4276       return emitRemovedIntrinsicError(DAG, DL, VT);
4277     return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
4278   case Intrinsic::amdgcn_rsq_clamp: {
4279     if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
4280       return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
4281 
4282     Type *Type = VT.getTypeForEVT(*DAG.getContext());
4283     APFloat Max = APFloat::getLargest(Type->getFltSemantics());
4284     APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
4285 
4286     SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
4287     SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
4288                               DAG.getConstantFP(Max, DL, VT));
4289     return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
4290                        DAG.getConstantFP(Min, DL, VT));
4291   }
4292   case Intrinsic::r600_read_ngroups_x:
4293     if (Subtarget->isAmdHsaOS())
4294       return emitNonHSAIntrinsicError(DAG, DL, VT);
4295 
4296     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4297                                     SI::KernelInputOffsets::NGROUPS_X, false);
4298   case Intrinsic::r600_read_ngroups_y:
4299     if (Subtarget->isAmdHsaOS())
4300       return emitNonHSAIntrinsicError(DAG, DL, VT);
4301 
4302     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4303                                     SI::KernelInputOffsets::NGROUPS_Y, false);
4304   case Intrinsic::r600_read_ngroups_z:
4305     if (Subtarget->isAmdHsaOS())
4306       return emitNonHSAIntrinsicError(DAG, DL, VT);
4307 
4308     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4309                                     SI::KernelInputOffsets::NGROUPS_Z, false);
4310   case Intrinsic::r600_read_global_size_x:
4311     if (Subtarget->isAmdHsaOS())
4312       return emitNonHSAIntrinsicError(DAG, DL, VT);
4313 
4314     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4315                                     SI::KernelInputOffsets::GLOBAL_SIZE_X, false);
4316   case Intrinsic::r600_read_global_size_y:
4317     if (Subtarget->isAmdHsaOS())
4318       return emitNonHSAIntrinsicError(DAG, DL, VT);
4319 
4320     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4321                                     SI::KernelInputOffsets::GLOBAL_SIZE_Y, false);
4322   case Intrinsic::r600_read_global_size_z:
4323     if (Subtarget->isAmdHsaOS())
4324       return emitNonHSAIntrinsicError(DAG, DL, VT);
4325 
4326     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4327                                     SI::KernelInputOffsets::GLOBAL_SIZE_Z, false);
4328   case Intrinsic::r600_read_local_size_x:
4329     if (Subtarget->isAmdHsaOS())
4330       return emitNonHSAIntrinsicError(DAG, DL, VT);
4331 
4332     return lowerImplicitZextParam(DAG, Op, MVT::i16,
4333                                   SI::KernelInputOffsets::LOCAL_SIZE_X);
4334   case Intrinsic::r600_read_local_size_y:
4335     if (Subtarget->isAmdHsaOS())
4336       return emitNonHSAIntrinsicError(DAG, DL, VT);
4337 
4338     return lowerImplicitZextParam(DAG, Op, MVT::i16,
4339                                   SI::KernelInputOffsets::LOCAL_SIZE_Y);
4340   case Intrinsic::r600_read_local_size_z:
4341     if (Subtarget->isAmdHsaOS())
4342       return emitNonHSAIntrinsicError(DAG, DL, VT);
4343 
4344     return lowerImplicitZextParam(DAG, Op, MVT::i16,
4345                                   SI::KernelInputOffsets::LOCAL_SIZE_Z);
4346   case Intrinsic::amdgcn_workgroup_id_x:
4347   case Intrinsic::r600_read_tgid_x:
4348     return getPreloadedValue(DAG, *MFI, VT,
4349                              AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
4350   case Intrinsic::amdgcn_workgroup_id_y:
4351   case Intrinsic::r600_read_tgid_y:
4352     return getPreloadedValue(DAG, *MFI, VT,
4353                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
4354   case Intrinsic::amdgcn_workgroup_id_z:
4355   case Intrinsic::r600_read_tgid_z:
4356     return getPreloadedValue(DAG, *MFI, VT,
4357                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
4358   case Intrinsic::amdgcn_workitem_id_x: {
4359   case Intrinsic::r600_read_tidig_x:
4360     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4361                           SDLoc(DAG.getEntryNode()),
4362                           MFI->getArgInfo().WorkItemIDX);
4363   }
4364   case Intrinsic::amdgcn_workitem_id_y:
4365   case Intrinsic::r600_read_tidig_y:
4366     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4367                           SDLoc(DAG.getEntryNode()),
4368                           MFI->getArgInfo().WorkItemIDY);
4369   case Intrinsic::amdgcn_workitem_id_z:
4370   case Intrinsic::r600_read_tidig_z:
4371     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4372                           SDLoc(DAG.getEntryNode()),
4373                           MFI->getArgInfo().WorkItemIDZ);
4374   case AMDGPUIntrinsic::SI_load_const: {
4375     SDValue Ops[] = {
4376       Op.getOperand(1),
4377       Op.getOperand(2)
4378     };
4379 
4380     MachineMemOperand *MMO = MF.getMachineMemOperand(
4381         MachinePointerInfo(),
4382         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
4383             MachineMemOperand::MOInvariant,
4384         VT.getStoreSize(), 4);
4385     return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
4386                                    Op->getVTList(), Ops, VT, MMO);
4387   }
4388   case Intrinsic::amdgcn_fdiv_fast:
4389     return lowerFDIV_FAST(Op, DAG);
4390   case Intrinsic::amdgcn_interp_mov: {
4391     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
4392     SDValue Glue = M0.getValue(1);
4393     return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
4394                        Op.getOperand(2), Op.getOperand(3), Glue);
4395   }
4396   case Intrinsic::amdgcn_interp_p1: {
4397     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
4398     SDValue Glue = M0.getValue(1);
4399     return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
4400                        Op.getOperand(2), Op.getOperand(3), Glue);
4401   }
4402   case Intrinsic::amdgcn_interp_p2: {
4403     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
4404     SDValue Glue = SDValue(M0.getNode(), 1);
4405     return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
4406                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
4407                        Glue);
4408   }
4409   case Intrinsic::amdgcn_sin:
4410     return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
4411 
4412   case Intrinsic::amdgcn_cos:
4413     return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
4414 
4415   case Intrinsic::amdgcn_log_clamp: {
4416     if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
4417       return SDValue();
4418 
4419     DiagnosticInfoUnsupported BadIntrin(
4420       MF.getFunction(), "intrinsic not supported on subtarget",
4421       DL.getDebugLoc());
4422       DAG.getContext()->diagnose(BadIntrin);
4423       return DAG.getUNDEF(VT);
4424   }
4425   case Intrinsic::amdgcn_ldexp:
4426     return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
4427                        Op.getOperand(1), Op.getOperand(2));
4428 
4429   case Intrinsic::amdgcn_fract:
4430     return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
4431 
4432   case Intrinsic::amdgcn_class:
4433     return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
4434                        Op.getOperand(1), Op.getOperand(2));
4435   case Intrinsic::amdgcn_div_fmas:
4436     return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
4437                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
4438                        Op.getOperand(4));
4439 
4440   case Intrinsic::amdgcn_div_fixup:
4441     return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
4442                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4443 
4444   case Intrinsic::amdgcn_trig_preop:
4445     return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
4446                        Op.getOperand(1), Op.getOperand(2));
4447   case Intrinsic::amdgcn_div_scale: {
4448     // 3rd parameter required to be a constant.
4449     const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
4450     if (!Param)
4451       return DAG.getMergeValues({ DAG.getUNDEF(VT), DAG.getUNDEF(MVT::i1) }, DL);
4452 
4453     // Translate to the operands expected by the machine instruction. The
4454     // first parameter must be the same as the first instruction.
4455     SDValue Numerator = Op.getOperand(1);
4456     SDValue Denominator = Op.getOperand(2);
4457 
4458     // Note this order is opposite of the machine instruction's operations,
4459     // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
4460     // intrinsic has the numerator as the first operand to match a normal
4461     // division operation.
4462 
4463     SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
4464 
4465     return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
4466                        Denominator, Numerator);
4467   }
4468   case Intrinsic::amdgcn_icmp: {
4469     const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
4470     if (!CD)
4471       return DAG.getUNDEF(VT);
4472 
4473     int CondCode = CD->getSExtValue();
4474     if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
4475         CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
4476       return DAG.getUNDEF(VT);
4477 
4478     ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
4479     ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
4480     return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
4481                        Op.getOperand(2), DAG.getCondCode(CCOpcode));
4482   }
4483   case Intrinsic::amdgcn_fcmp: {
4484     const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
4485     if (!CD)
4486       return DAG.getUNDEF(VT);
4487 
4488     int CondCode = CD->getSExtValue();
4489     if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
4490         CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE)
4491       return DAG.getUNDEF(VT);
4492 
4493     FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
4494     ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
4495     return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
4496                        Op.getOperand(2), DAG.getCondCode(CCOpcode));
4497   }
4498   case Intrinsic::amdgcn_fmed3:
4499     return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
4500                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4501   case Intrinsic::amdgcn_fmul_legacy:
4502     return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
4503                        Op.getOperand(1), Op.getOperand(2));
4504   case Intrinsic::amdgcn_sffbh:
4505     return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
4506   case Intrinsic::amdgcn_sbfe:
4507     return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
4508                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4509   case Intrinsic::amdgcn_ubfe:
4510     return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
4511                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4512   case Intrinsic::amdgcn_cvt_pkrtz: {
4513     // FIXME: Stop adding cast if v2f16 legal.
4514     EVT VT = Op.getValueType();
4515     SDValue Node = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, DL, MVT::i32,
4516                                Op.getOperand(1), Op.getOperand(2));
4517     return DAG.getNode(ISD::BITCAST, DL, VT, Node);
4518   }
4519   case Intrinsic::amdgcn_wqm: {
4520     SDValue Src = Op.getOperand(1);
4521     return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src),
4522                    0);
4523   }
4524   case Intrinsic::amdgcn_wwm: {
4525     SDValue Src = Op.getOperand(1);
4526     return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src),
4527                    0);
4528   }
4529   case Intrinsic::amdgcn_image_getlod:
4530   case Intrinsic::amdgcn_image_getresinfo: {
4531     unsigned Idx = (IntrinsicID == Intrinsic::amdgcn_image_getresinfo) ? 3 : 4;
4532 
4533     // Replace dmask with everything disabled with undef.
4534     const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(Idx));
4535     if (!DMask || DMask->isNullValue())
4536       return DAG.getUNDEF(Op.getValueType());
4537     return SDValue();
4538   }
4539   default:
4540     return Op;
4541   }
4542 }
4543 
4544 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4545                                                  SelectionDAG &DAG) const {
4546   unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
4547   SDLoc DL(Op);
4548 
4549   switch (IntrID) {
4550   case Intrinsic::amdgcn_atomic_inc:
4551   case Intrinsic::amdgcn_atomic_dec: {
4552     MemSDNode *M = cast<MemSDNode>(Op);
4553     unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ?
4554       AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC;
4555     SDValue Ops[] = {
4556       M->getOperand(0), // Chain
4557       M->getOperand(2), // Ptr
4558       M->getOperand(3)  // Value
4559     };
4560 
4561     return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
4562                                    M->getMemoryVT(), M->getMemOperand());
4563   }
4564   case Intrinsic::amdgcn_buffer_load:
4565   case Intrinsic::amdgcn_buffer_load_format: {
4566     SDValue Ops[] = {
4567       Op.getOperand(0), // Chain
4568       Op.getOperand(2), // rsrc
4569       Op.getOperand(3), // vindex
4570       Op.getOperand(4), // offset
4571       Op.getOperand(5), // glc
4572       Op.getOperand(6)  // slc
4573     };
4574 
4575     unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
4576         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
4577     EVT VT = Op.getValueType();
4578     EVT IntVT = VT.changeTypeToInteger();
4579 
4580     auto *M = cast<MemSDNode>(Op);
4581     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
4582                                    M->getMemOperand());
4583   }
4584   case Intrinsic::amdgcn_tbuffer_load: {
4585     MemSDNode *M = cast<MemSDNode>(Op);
4586     SDValue Ops[] = {
4587       Op.getOperand(0),  // Chain
4588       Op.getOperand(2),  // rsrc
4589       Op.getOperand(3),  // vindex
4590       Op.getOperand(4),  // voffset
4591       Op.getOperand(5),  // soffset
4592       Op.getOperand(6),  // offset
4593       Op.getOperand(7),  // dfmt
4594       Op.getOperand(8),  // nfmt
4595       Op.getOperand(9),  // glc
4596       Op.getOperand(10)   // slc
4597     };
4598 
4599     EVT VT = Op.getValueType();
4600 
4601     return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
4602                                    Op->getVTList(), Ops, VT, M->getMemOperand());
4603   }
4604   case Intrinsic::amdgcn_buffer_atomic_swap:
4605   case Intrinsic::amdgcn_buffer_atomic_add:
4606   case Intrinsic::amdgcn_buffer_atomic_sub:
4607   case Intrinsic::amdgcn_buffer_atomic_smin:
4608   case Intrinsic::amdgcn_buffer_atomic_umin:
4609   case Intrinsic::amdgcn_buffer_atomic_smax:
4610   case Intrinsic::amdgcn_buffer_atomic_umax:
4611   case Intrinsic::amdgcn_buffer_atomic_and:
4612   case Intrinsic::amdgcn_buffer_atomic_or:
4613   case Intrinsic::amdgcn_buffer_atomic_xor: {
4614     SDValue Ops[] = {
4615       Op.getOperand(0), // Chain
4616       Op.getOperand(2), // vdata
4617       Op.getOperand(3), // rsrc
4618       Op.getOperand(4), // vindex
4619       Op.getOperand(5), // offset
4620       Op.getOperand(6)  // slc
4621     };
4622     EVT VT = Op.getValueType();
4623 
4624     auto *M = cast<MemSDNode>(Op);
4625     unsigned Opcode = 0;
4626 
4627     switch (IntrID) {
4628     case Intrinsic::amdgcn_buffer_atomic_swap:
4629       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
4630       break;
4631     case Intrinsic::amdgcn_buffer_atomic_add:
4632       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
4633       break;
4634     case Intrinsic::amdgcn_buffer_atomic_sub:
4635       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
4636       break;
4637     case Intrinsic::amdgcn_buffer_atomic_smin:
4638       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
4639       break;
4640     case Intrinsic::amdgcn_buffer_atomic_umin:
4641       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
4642       break;
4643     case Intrinsic::amdgcn_buffer_atomic_smax:
4644       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
4645       break;
4646     case Intrinsic::amdgcn_buffer_atomic_umax:
4647       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
4648       break;
4649     case Intrinsic::amdgcn_buffer_atomic_and:
4650       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
4651       break;
4652     case Intrinsic::amdgcn_buffer_atomic_or:
4653       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
4654       break;
4655     case Intrinsic::amdgcn_buffer_atomic_xor:
4656       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
4657       break;
4658     default:
4659       llvm_unreachable("unhandled atomic opcode");
4660     }
4661 
4662     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
4663                                    M->getMemOperand());
4664   }
4665 
4666   case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
4667     SDValue Ops[] = {
4668       Op.getOperand(0), // Chain
4669       Op.getOperand(2), // src
4670       Op.getOperand(3), // cmp
4671       Op.getOperand(4), // rsrc
4672       Op.getOperand(5), // vindex
4673       Op.getOperand(6), // offset
4674       Op.getOperand(7)  // slc
4675     };
4676     EVT VT = Op.getValueType();
4677     auto *M = cast<MemSDNode>(Op);
4678 
4679     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
4680                                    Op->getVTList(), Ops, VT, M->getMemOperand());
4681   }
4682 
4683   // Basic sample.
4684   case Intrinsic::amdgcn_image_sample:
4685   case Intrinsic::amdgcn_image_sample_cl:
4686   case Intrinsic::amdgcn_image_sample_d:
4687   case Intrinsic::amdgcn_image_sample_d_cl:
4688   case Intrinsic::amdgcn_image_sample_l:
4689   case Intrinsic::amdgcn_image_sample_b:
4690   case Intrinsic::amdgcn_image_sample_b_cl:
4691   case Intrinsic::amdgcn_image_sample_lz:
4692   case Intrinsic::amdgcn_image_sample_cd:
4693   case Intrinsic::amdgcn_image_sample_cd_cl:
4694 
4695   // Sample with comparison.
4696   case Intrinsic::amdgcn_image_sample_c:
4697   case Intrinsic::amdgcn_image_sample_c_cl:
4698   case Intrinsic::amdgcn_image_sample_c_d:
4699   case Intrinsic::amdgcn_image_sample_c_d_cl:
4700   case Intrinsic::amdgcn_image_sample_c_l:
4701   case Intrinsic::amdgcn_image_sample_c_b:
4702   case Intrinsic::amdgcn_image_sample_c_b_cl:
4703   case Intrinsic::amdgcn_image_sample_c_lz:
4704   case Intrinsic::amdgcn_image_sample_c_cd:
4705   case Intrinsic::amdgcn_image_sample_c_cd_cl:
4706 
4707   // Sample with offsets.
4708   case Intrinsic::amdgcn_image_sample_o:
4709   case Intrinsic::amdgcn_image_sample_cl_o:
4710   case Intrinsic::amdgcn_image_sample_d_o:
4711   case Intrinsic::amdgcn_image_sample_d_cl_o:
4712   case Intrinsic::amdgcn_image_sample_l_o:
4713   case Intrinsic::amdgcn_image_sample_b_o:
4714   case Intrinsic::amdgcn_image_sample_b_cl_o:
4715   case Intrinsic::amdgcn_image_sample_lz_o:
4716   case Intrinsic::amdgcn_image_sample_cd_o:
4717   case Intrinsic::amdgcn_image_sample_cd_cl_o:
4718 
4719   // Sample with comparison and offsets.
4720   case Intrinsic::amdgcn_image_sample_c_o:
4721   case Intrinsic::amdgcn_image_sample_c_cl_o:
4722   case Intrinsic::amdgcn_image_sample_c_d_o:
4723   case Intrinsic::amdgcn_image_sample_c_d_cl_o:
4724   case Intrinsic::amdgcn_image_sample_c_l_o:
4725   case Intrinsic::amdgcn_image_sample_c_b_o:
4726   case Intrinsic::amdgcn_image_sample_c_b_cl_o:
4727   case Intrinsic::amdgcn_image_sample_c_lz_o:
4728   case Intrinsic::amdgcn_image_sample_c_cd_o:
4729   case Intrinsic::amdgcn_image_sample_c_cd_cl_o: {
4730     // Replace dmask with everything disabled with undef.
4731     const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(5));
4732     if (!DMask || DMask->isNullValue()) {
4733       SDValue Undef = DAG.getUNDEF(Op.getValueType());
4734       return DAG.getMergeValues({ Undef, Op.getOperand(0) }, SDLoc(Op));
4735     }
4736 
4737     return SDValue();
4738   }
4739   default:
4740     return SDValue();
4741   }
4742 }
4743 
4744 SDValue SITargetLowering::handleD16VData(SDValue VData,
4745                                          SelectionDAG &DAG) const {
4746   EVT StoreVT = VData.getValueType();
4747   SDLoc DL(VData);
4748 
4749   if (StoreVT.isVector()) {
4750     assert ((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
4751     if (!Subtarget->hasUnpackedD16VMem()) {
4752       if (!isTypeLegal(StoreVT)) {
4753         // If Target supports packed vmem, we just need to workaround
4754         // the illegal type by casting to an equivalent one.
4755         EVT EquivStoreVT = getEquivalentMemType(*DAG.getContext(), StoreVT);
4756         return DAG.getNode(ISD::BITCAST, DL, EquivStoreVT, VData);
4757       }
4758     } else { // We need to unpack the packed data to store.
4759       EVT IntStoreVT = StoreVT.changeTypeToInteger();
4760       SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
4761       EVT EquivStoreVT = (StoreVT == MVT::v2f16) ? MVT::v2i32 : MVT::v4i32;
4762       return DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
4763     }
4764   }
4765   // No change for f16 and legal vector D16 types.
4766   return VData;
4767 }
4768 
4769 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4770                                               SelectionDAG &DAG) const {
4771   SDLoc DL(Op);
4772   SDValue Chain = Op.getOperand(0);
4773   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
4774   MachineFunction &MF = DAG.getMachineFunction();
4775 
4776   switch (IntrinsicID) {
4777   case Intrinsic::amdgcn_exp: {
4778     const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
4779     const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
4780     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
4781     const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
4782 
4783     const SDValue Ops[] = {
4784       Chain,
4785       DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
4786       DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8),  // en
4787       Op.getOperand(4), // src0
4788       Op.getOperand(5), // src1
4789       Op.getOperand(6), // src2
4790       Op.getOperand(7), // src3
4791       DAG.getTargetConstant(0, DL, MVT::i1), // compr
4792       DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
4793     };
4794 
4795     unsigned Opc = Done->isNullValue() ?
4796       AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
4797     return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
4798   }
4799   case Intrinsic::amdgcn_exp_compr: {
4800     const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
4801     const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
4802     SDValue Src0 = Op.getOperand(4);
4803     SDValue Src1 = Op.getOperand(5);
4804     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
4805     const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
4806 
4807     SDValue Undef = DAG.getUNDEF(MVT::f32);
4808     const SDValue Ops[] = {
4809       Chain,
4810       DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
4811       DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8),  // en
4812       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
4813       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
4814       Undef, // src2
4815       Undef, // src3
4816       DAG.getTargetConstant(1, DL, MVT::i1), // compr
4817       DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
4818     };
4819 
4820     unsigned Opc = Done->isNullValue() ?
4821       AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
4822     return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
4823   }
4824   case Intrinsic::amdgcn_s_sendmsg:
4825   case Intrinsic::amdgcn_s_sendmsghalt: {
4826     unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ?
4827       AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT;
4828     Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
4829     SDValue Glue = Chain.getValue(1);
4830     return DAG.getNode(NodeOp, DL, MVT::Other, Chain,
4831                        Op.getOperand(2), Glue);
4832   }
4833   case Intrinsic::amdgcn_init_exec: {
4834     return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
4835                        Op.getOperand(2));
4836   }
4837   case Intrinsic::amdgcn_init_exec_from_input: {
4838     return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
4839                        Op.getOperand(2), Op.getOperand(3));
4840   }
4841   case AMDGPUIntrinsic::AMDGPU_kill: {
4842     SDValue Src = Op.getOperand(2);
4843     if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) {
4844       if (!K->isNegative())
4845         return Chain;
4846 
4847       SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32);
4848       return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne);
4849     }
4850 
4851     SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src);
4852     return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast);
4853   }
4854   case Intrinsic::amdgcn_s_barrier: {
4855     if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
4856       const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
4857       unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
4858       if (WGSize <= ST.getWavefrontSize())
4859         return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
4860                                           Op.getOperand(0)), 0);
4861     }
4862     return SDValue();
4863   };
4864   case AMDGPUIntrinsic::SI_tbuffer_store: {
4865 
4866     // Extract vindex and voffset from vaddr as appropriate
4867     const ConstantSDNode *OffEn = cast<ConstantSDNode>(Op.getOperand(10));
4868     const ConstantSDNode *IdxEn = cast<ConstantSDNode>(Op.getOperand(11));
4869     SDValue VAddr = Op.getOperand(5);
4870 
4871     SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
4872 
4873     assert(!(OffEn->isOne() && IdxEn->isOne()) &&
4874            "Legacy intrinsic doesn't support both offset and index - use new version");
4875 
4876     SDValue VIndex = IdxEn->isOne() ? VAddr : Zero;
4877     SDValue VOffset = OffEn->isOne() ? VAddr : Zero;
4878 
4879     // Deal with the vec-3 case
4880     const ConstantSDNode *NumChannels = cast<ConstantSDNode>(Op.getOperand(4));
4881     auto Opcode = NumChannels->getZExtValue() == 3 ?
4882       AMDGPUISD::TBUFFER_STORE_FORMAT_X3 : AMDGPUISD::TBUFFER_STORE_FORMAT;
4883 
4884     SDValue Ops[] = {
4885      Chain,
4886      Op.getOperand(3),  // vdata
4887      Op.getOperand(2),  // rsrc
4888      VIndex,
4889      VOffset,
4890      Op.getOperand(6),  // soffset
4891      Op.getOperand(7),  // inst_offset
4892      Op.getOperand(8),  // dfmt
4893      Op.getOperand(9),  // nfmt
4894      Op.getOperand(12), // glc
4895      Op.getOperand(13), // slc
4896     };
4897 
4898     assert((cast<ConstantSDNode>(Op.getOperand(14)))->getZExtValue() == 0 &&
4899            "Value of tfe other than zero is unsupported");
4900 
4901     EVT VT = Op.getOperand(3).getValueType();
4902     MachineMemOperand *MMO = MF.getMachineMemOperand(
4903       MachinePointerInfo(),
4904       MachineMemOperand::MOStore,
4905       VT.getStoreSize(), 4);
4906     return DAG.getMemIntrinsicNode(Opcode, DL,
4907                                    Op->getVTList(), Ops, VT, MMO);
4908   }
4909 
4910   case Intrinsic::amdgcn_tbuffer_store: {
4911     SDValue VData = Op.getOperand(2);
4912     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
4913     if (IsD16)
4914       VData = handleD16VData(VData, DAG);
4915     SDValue Ops[] = {
4916       Chain,
4917       VData,             // vdata
4918       Op.getOperand(3),  // rsrc
4919       Op.getOperand(4),  // vindex
4920       Op.getOperand(5),  // voffset
4921       Op.getOperand(6),  // soffset
4922       Op.getOperand(7),  // offset
4923       Op.getOperand(8),  // dfmt
4924       Op.getOperand(9),  // nfmt
4925       Op.getOperand(10), // glc
4926       Op.getOperand(11)  // slc
4927     };
4928     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
4929                            AMDGPUISD::TBUFFER_STORE_FORMAT;
4930     MemSDNode *M = cast<MemSDNode>(Op);
4931     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
4932                                    M->getMemoryVT(), M->getMemOperand());
4933   }
4934 
4935   case Intrinsic::amdgcn_buffer_store:
4936   case Intrinsic::amdgcn_buffer_store_format: {
4937     SDValue VData = Op.getOperand(2);
4938     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
4939     if (IsD16)
4940       VData = handleD16VData(VData, DAG);
4941     SDValue Ops[] = {
4942       Chain,
4943       VData,            // vdata
4944       Op.getOperand(3), // rsrc
4945       Op.getOperand(4), // vindex
4946       Op.getOperand(5), // offset
4947       Op.getOperand(6), // glc
4948       Op.getOperand(7)  // slc
4949     };
4950     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
4951                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
4952     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
4953     MemSDNode *M = cast<MemSDNode>(Op);
4954     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
4955                                    M->getMemoryVT(), M->getMemOperand());
4956   }
4957 
4958   default:
4959     return Op;
4960   }
4961 }
4962 
4963 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
4964   SDLoc DL(Op);
4965   LoadSDNode *Load = cast<LoadSDNode>(Op);
4966   ISD::LoadExtType ExtType = Load->getExtensionType();
4967   EVT MemVT = Load->getMemoryVT();
4968 
4969   if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
4970     if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
4971       return SDValue();
4972 
4973     // FIXME: Copied from PPC
4974     // First, load into 32 bits, then truncate to 1 bit.
4975 
4976     SDValue Chain = Load->getChain();
4977     SDValue BasePtr = Load->getBasePtr();
4978     MachineMemOperand *MMO = Load->getMemOperand();
4979 
4980     EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
4981 
4982     SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
4983                                    BasePtr, RealMemVT, MMO);
4984 
4985     SDValue Ops[] = {
4986       DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
4987       NewLD.getValue(1)
4988     };
4989 
4990     return DAG.getMergeValues(Ops, DL);
4991   }
4992 
4993   if (!MemVT.isVector())
4994     return SDValue();
4995 
4996   assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
4997          "Custom lowering for non-i32 vectors hasn't been implemented.");
4998 
4999   unsigned AS = Load->getAddressSpace();
5000   if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
5001                           AS, Load->getAlignment())) {
5002     SDValue Ops[2];
5003     std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
5004     return DAG.getMergeValues(Ops, DL);
5005   }
5006 
5007   MachineFunction &MF = DAG.getMachineFunction();
5008   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
5009   // If there is a possibilty that flat instruction access scratch memory
5010   // then we need to use the same legalization rules we use for private.
5011   if (AS == AMDGPUASI.FLAT_ADDRESS)
5012     AS = MFI->hasFlatScratchInit() ?
5013          AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
5014 
5015   unsigned NumElements = MemVT.getVectorNumElements();
5016   if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
5017     if (isMemOpUniform(Load))
5018       return SDValue();
5019     // Non-uniform loads will be selected to MUBUF instructions, so they
5020     // have the same legalization requirements as global and private
5021     // loads.
5022     //
5023   }
5024   if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS) {
5025     if (Subtarget->getScalarizeGlobalBehavior() && isMemOpUniform(Load) &&
5026         !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load))
5027       return SDValue();
5028     // Non-uniform loads will be selected to MUBUF instructions, so they
5029     // have the same legalization requirements as global and private
5030     // loads.
5031     //
5032   }
5033   if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS ||
5034       AS == AMDGPUASI.FLAT_ADDRESS) {
5035     if (NumElements > 4)
5036       return SplitVectorLoad(Op, DAG);
5037     // v4 loads are supported for private and global memory.
5038     return SDValue();
5039   }
5040   if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
5041     // Depending on the setting of the private_element_size field in the
5042     // resource descriptor, we can only make private accesses up to a certain
5043     // size.
5044     switch (Subtarget->getMaxPrivateElementSize()) {
5045     case 4:
5046       return scalarizeVectorLoad(Load, DAG);
5047     case 8:
5048       if (NumElements > 2)
5049         return SplitVectorLoad(Op, DAG);
5050       return SDValue();
5051     case 16:
5052       // Same as global/flat
5053       if (NumElements > 4)
5054         return SplitVectorLoad(Op, DAG);
5055       return SDValue();
5056     default:
5057       llvm_unreachable("unsupported private_element_size");
5058     }
5059   } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
5060     if (NumElements > 2)
5061       return SplitVectorLoad(Op, DAG);
5062 
5063     if (NumElements == 2)
5064       return SDValue();
5065 
5066     // If properly aligned, if we split we might be able to use ds_read_b64.
5067     return SplitVectorLoad(Op, DAG);
5068   }
5069   return SDValue();
5070 }
5071 
5072 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
5073   if (Op.getValueType() != MVT::i64)
5074     return SDValue();
5075 
5076   SDLoc DL(Op);
5077   SDValue Cond = Op.getOperand(0);
5078 
5079   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
5080   SDValue One = DAG.getConstant(1, DL, MVT::i32);
5081 
5082   SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
5083   SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
5084 
5085   SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
5086   SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
5087 
5088   SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
5089 
5090   SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
5091   SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
5092 
5093   SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
5094 
5095   SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
5096   return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res);
5097 }
5098 
5099 // Catch division cases where we can use shortcuts with rcp and rsq
5100 // instructions.
5101 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
5102                                               SelectionDAG &DAG) const {
5103   SDLoc SL(Op);
5104   SDValue LHS = Op.getOperand(0);
5105   SDValue RHS = Op.getOperand(1);
5106   EVT VT = Op.getValueType();
5107   const SDNodeFlags Flags = Op->getFlags();
5108   bool Unsafe = DAG.getTarget().Options.UnsafeFPMath ||
5109                 Flags.hasUnsafeAlgebra() || Flags.hasAllowReciprocal();
5110 
5111   if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
5112     return SDValue();
5113 
5114   if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
5115     if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
5116       if (CLHS->isExactlyValue(1.0)) {
5117         // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
5118         // the CI documentation has a worst case error of 1 ulp.
5119         // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
5120         // use it as long as we aren't trying to use denormals.
5121         //
5122         // v_rcp_f16 and v_rsq_f16 DO support denormals.
5123 
5124         // 1.0 / sqrt(x) -> rsq(x)
5125 
5126         // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
5127         // error seems really high at 2^29 ULP.
5128         if (RHS.getOpcode() == ISD::FSQRT)
5129           return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
5130 
5131         // 1.0 / x -> rcp(x)
5132         return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
5133       }
5134 
5135       // Same as for 1.0, but expand the sign out of the constant.
5136       if (CLHS->isExactlyValue(-1.0)) {
5137         // -1.0 / x -> rcp (fneg x)
5138         SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
5139         return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
5140       }
5141     }
5142   }
5143 
5144   if (Unsafe) {
5145     // Turn into multiply by the reciprocal.
5146     // x / y -> x * (1.0 / y)
5147     SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
5148     return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
5149   }
5150 
5151   return SDValue();
5152 }
5153 
5154 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
5155                           EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
5156   if (GlueChain->getNumValues() <= 1) {
5157     return DAG.getNode(Opcode, SL, VT, A, B);
5158   }
5159 
5160   assert(GlueChain->getNumValues() == 3);
5161 
5162   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
5163   switch (Opcode) {
5164   default: llvm_unreachable("no chain equivalent for opcode");
5165   case ISD::FMUL:
5166     Opcode = AMDGPUISD::FMUL_W_CHAIN;
5167     break;
5168   }
5169 
5170   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
5171                      GlueChain.getValue(2));
5172 }
5173 
5174 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
5175                            EVT VT, SDValue A, SDValue B, SDValue C,
5176                            SDValue GlueChain) {
5177   if (GlueChain->getNumValues() <= 1) {
5178     return DAG.getNode(Opcode, SL, VT, A, B, C);
5179   }
5180 
5181   assert(GlueChain->getNumValues() == 3);
5182 
5183   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
5184   switch (Opcode) {
5185   default: llvm_unreachable("no chain equivalent for opcode");
5186   case ISD::FMA:
5187     Opcode = AMDGPUISD::FMA_W_CHAIN;
5188     break;
5189   }
5190 
5191   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
5192                      GlueChain.getValue(2));
5193 }
5194 
5195 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
5196   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
5197     return FastLowered;
5198 
5199   SDLoc SL(Op);
5200   SDValue Src0 = Op.getOperand(0);
5201   SDValue Src1 = Op.getOperand(1);
5202 
5203   SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
5204   SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
5205 
5206   SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
5207   SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
5208 
5209   SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
5210   SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
5211 
5212   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
5213 }
5214 
5215 // Faster 2.5 ULP division that does not support denormals.
5216 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
5217   SDLoc SL(Op);
5218   SDValue LHS = Op.getOperand(1);
5219   SDValue RHS = Op.getOperand(2);
5220 
5221   SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
5222 
5223   const APFloat K0Val(BitsToFloat(0x6f800000));
5224   const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
5225 
5226   const APFloat K1Val(BitsToFloat(0x2f800000));
5227   const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
5228 
5229   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
5230 
5231   EVT SetCCVT =
5232     getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
5233 
5234   SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
5235 
5236   SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
5237 
5238   // TODO: Should this propagate fast-math-flags?
5239   r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
5240 
5241   // rcp does not support denormals.
5242   SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
5243 
5244   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
5245 
5246   return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
5247 }
5248 
5249 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
5250   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
5251     return FastLowered;
5252 
5253   SDLoc SL(Op);
5254   SDValue LHS = Op.getOperand(0);
5255   SDValue RHS = Op.getOperand(1);
5256 
5257   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
5258 
5259   SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
5260 
5261   SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
5262                                           RHS, RHS, LHS);
5263   SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
5264                                         LHS, RHS, LHS);
5265 
5266   // Denominator is scaled to not be denormal, so using rcp is ok.
5267   SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
5268                                   DenominatorScaled);
5269   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
5270                                      DenominatorScaled);
5271 
5272   const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
5273                                (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
5274                                (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
5275 
5276   const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
5277 
5278   if (!Subtarget->hasFP32Denormals()) {
5279     SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
5280     const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
5281                                                       SL, MVT::i32);
5282     SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
5283                                        DAG.getEntryNode(),
5284                                        EnableDenormValue, BitField);
5285     SDValue Ops[3] = {
5286       NegDivScale0,
5287       EnableDenorm.getValue(0),
5288       EnableDenorm.getValue(1)
5289     };
5290 
5291     NegDivScale0 = DAG.getMergeValues(Ops, SL);
5292   }
5293 
5294   SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
5295                              ApproxRcp, One, NegDivScale0);
5296 
5297   SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
5298                              ApproxRcp, Fma0);
5299 
5300   SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
5301                            Fma1, Fma1);
5302 
5303   SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
5304                              NumeratorScaled, Mul);
5305 
5306   SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
5307 
5308   SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
5309                              NumeratorScaled, Fma3);
5310 
5311   if (!Subtarget->hasFP32Denormals()) {
5312     const SDValue DisableDenormValue =
5313         DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
5314     SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
5315                                         Fma4.getValue(1),
5316                                         DisableDenormValue,
5317                                         BitField,
5318                                         Fma4.getValue(2));
5319 
5320     SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
5321                                       DisableDenorm, DAG.getRoot());
5322     DAG.setRoot(OutputChain);
5323   }
5324 
5325   SDValue Scale = NumeratorScaled.getValue(1);
5326   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
5327                              Fma4, Fma1, Fma3, Scale);
5328 
5329   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
5330 }
5331 
5332 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
5333   if (DAG.getTarget().Options.UnsafeFPMath)
5334     return lowerFastUnsafeFDIV(Op, DAG);
5335 
5336   SDLoc SL(Op);
5337   SDValue X = Op.getOperand(0);
5338   SDValue Y = Op.getOperand(1);
5339 
5340   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
5341 
5342   SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
5343 
5344   SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
5345 
5346   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
5347 
5348   SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
5349 
5350   SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
5351 
5352   SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
5353 
5354   SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
5355 
5356   SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
5357 
5358   SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
5359   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
5360 
5361   SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
5362                              NegDivScale0, Mul, DivScale1);
5363 
5364   SDValue Scale;
5365 
5366   if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
5367     // Workaround a hardware bug on SI where the condition output from div_scale
5368     // is not usable.
5369 
5370     const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
5371 
5372     // Figure out if the scale to use for div_fmas.
5373     SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
5374     SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
5375     SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
5376     SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
5377 
5378     SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
5379     SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
5380 
5381     SDValue Scale0Hi
5382       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
5383     SDValue Scale1Hi
5384       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
5385 
5386     SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
5387     SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
5388     Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
5389   } else {
5390     Scale = DivScale1.getValue(1);
5391   }
5392 
5393   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
5394                              Fma4, Fma3, Mul, Scale);
5395 
5396   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
5397 }
5398 
5399 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
5400   EVT VT = Op.getValueType();
5401 
5402   if (VT == MVT::f32)
5403     return LowerFDIV32(Op, DAG);
5404 
5405   if (VT == MVT::f64)
5406     return LowerFDIV64(Op, DAG);
5407 
5408   if (VT == MVT::f16)
5409     return LowerFDIV16(Op, DAG);
5410 
5411   llvm_unreachable("Unexpected type for fdiv");
5412 }
5413 
5414 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
5415   SDLoc DL(Op);
5416   StoreSDNode *Store = cast<StoreSDNode>(Op);
5417   EVT VT = Store->getMemoryVT();
5418 
5419   if (VT == MVT::i1) {
5420     return DAG.getTruncStore(Store->getChain(), DL,
5421        DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
5422        Store->getBasePtr(), MVT::i1, Store->getMemOperand());
5423   }
5424 
5425   assert(VT.isVector() &&
5426          Store->getValue().getValueType().getScalarType() == MVT::i32);
5427 
5428   unsigned AS = Store->getAddressSpace();
5429   if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
5430                           AS, Store->getAlignment())) {
5431     return expandUnalignedStore(Store, DAG);
5432   }
5433 
5434   MachineFunction &MF = DAG.getMachineFunction();
5435   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
5436   // If there is a possibilty that flat instruction access scratch memory
5437   // then we need to use the same legalization rules we use for private.
5438   if (AS == AMDGPUASI.FLAT_ADDRESS)
5439     AS = MFI->hasFlatScratchInit() ?
5440          AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
5441 
5442   unsigned NumElements = VT.getVectorNumElements();
5443   if (AS == AMDGPUASI.GLOBAL_ADDRESS ||
5444       AS == AMDGPUASI.FLAT_ADDRESS) {
5445     if (NumElements > 4)
5446       return SplitVectorStore(Op, DAG);
5447     return SDValue();
5448   } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
5449     switch (Subtarget->getMaxPrivateElementSize()) {
5450     case 4:
5451       return scalarizeVectorStore(Store, DAG);
5452     case 8:
5453       if (NumElements > 2)
5454         return SplitVectorStore(Op, DAG);
5455       return SDValue();
5456     case 16:
5457       if (NumElements > 4)
5458         return SplitVectorStore(Op, DAG);
5459       return SDValue();
5460     default:
5461       llvm_unreachable("unsupported private_element_size");
5462     }
5463   } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
5464     if (NumElements > 2)
5465       return SplitVectorStore(Op, DAG);
5466 
5467     if (NumElements == 2)
5468       return Op;
5469 
5470     // If properly aligned, if we split we might be able to use ds_write_b64.
5471     return SplitVectorStore(Op, DAG);
5472   } else {
5473     llvm_unreachable("unhandled address space");
5474   }
5475 }
5476 
5477 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
5478   SDLoc DL(Op);
5479   EVT VT = Op.getValueType();
5480   SDValue Arg = Op.getOperand(0);
5481   // TODO: Should this propagate fast-math-flags?
5482   SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT,
5483                                   DAG.getNode(ISD::FMUL, DL, VT, Arg,
5484                                               DAG.getConstantFP(0.5/M_PI, DL,
5485                                                                 VT)));
5486 
5487   switch (Op.getOpcode()) {
5488   case ISD::FCOS:
5489     return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart);
5490   case ISD::FSIN:
5491     return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart);
5492   default:
5493     llvm_unreachable("Wrong trig opcode");
5494   }
5495 }
5496 
5497 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
5498   AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
5499   assert(AtomicNode->isCompareAndSwap());
5500   unsigned AS = AtomicNode->getAddressSpace();
5501 
5502   // No custom lowering required for local address space
5503   if (!isFlatGlobalAddrSpace(AS, AMDGPUASI))
5504     return Op;
5505 
5506   // Non-local address space requires custom lowering for atomic compare
5507   // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
5508   SDLoc DL(Op);
5509   SDValue ChainIn = Op.getOperand(0);
5510   SDValue Addr = Op.getOperand(1);
5511   SDValue Old = Op.getOperand(2);
5512   SDValue New = Op.getOperand(3);
5513   EVT VT = Op.getValueType();
5514   MVT SimpleVT = VT.getSimpleVT();
5515   MVT VecType = MVT::getVectorVT(SimpleVT, 2);
5516 
5517   SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
5518   SDValue Ops[] = { ChainIn, Addr, NewOld };
5519 
5520   return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
5521                                  Ops, VT, AtomicNode->getMemOperand());
5522 }
5523 
5524 //===----------------------------------------------------------------------===//
5525 // Custom DAG optimizations
5526 //===----------------------------------------------------------------------===//
5527 
5528 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
5529                                                      DAGCombinerInfo &DCI) const {
5530   EVT VT = N->getValueType(0);
5531   EVT ScalarVT = VT.getScalarType();
5532   if (ScalarVT != MVT::f32)
5533     return SDValue();
5534 
5535   SelectionDAG &DAG = DCI.DAG;
5536   SDLoc DL(N);
5537 
5538   SDValue Src = N->getOperand(0);
5539   EVT SrcVT = Src.getValueType();
5540 
5541   // TODO: We could try to match extracting the higher bytes, which would be
5542   // easier if i8 vectors weren't promoted to i32 vectors, particularly after
5543   // types are legalized. v4i8 -> v4f32 is probably the only case to worry
5544   // about in practice.
5545   if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) {
5546     if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
5547       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
5548       DCI.AddToWorklist(Cvt.getNode());
5549       return Cvt;
5550     }
5551   }
5552 
5553   return SDValue();
5554 }
5555 
5556 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
5557 
5558 // This is a variant of
5559 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
5560 //
5561 // The normal DAG combiner will do this, but only if the add has one use since
5562 // that would increase the number of instructions.
5563 //
5564 // This prevents us from seeing a constant offset that can be folded into a
5565 // memory instruction's addressing mode. If we know the resulting add offset of
5566 // a pointer can be folded into an addressing offset, we can replace the pointer
5567 // operand with the add of new constant offset. This eliminates one of the uses,
5568 // and may allow the remaining use to also be simplified.
5569 //
5570 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
5571                                                unsigned AddrSpace,
5572                                                EVT MemVT,
5573                                                DAGCombinerInfo &DCI) const {
5574   SDValue N0 = N->getOperand(0);
5575   SDValue N1 = N->getOperand(1);
5576 
5577   // We only do this to handle cases where it's profitable when there are
5578   // multiple uses of the add, so defer to the standard combine.
5579   if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
5580       N0->hasOneUse())
5581     return SDValue();
5582 
5583   const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
5584   if (!CN1)
5585     return SDValue();
5586 
5587   const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5588   if (!CAdd)
5589     return SDValue();
5590 
5591   // If the resulting offset is too large, we can't fold it into the addressing
5592   // mode offset.
5593   APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
5594   Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
5595 
5596   AddrMode AM;
5597   AM.HasBaseReg = true;
5598   AM.BaseOffs = Offset.getSExtValue();
5599   if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
5600     return SDValue();
5601 
5602   SelectionDAG &DAG = DCI.DAG;
5603   SDLoc SL(N);
5604   EVT VT = N->getValueType(0);
5605 
5606   SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
5607   SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
5608 
5609   SDNodeFlags Flags;
5610   Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
5611                           (N0.getOpcode() == ISD::OR ||
5612                            N0->getFlags().hasNoUnsignedWrap()));
5613 
5614   return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
5615 }
5616 
5617 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
5618                                                   DAGCombinerInfo &DCI) const {
5619   SDValue Ptr = N->getBasePtr();
5620   SelectionDAG &DAG = DCI.DAG;
5621   SDLoc SL(N);
5622 
5623   // TODO: We could also do this for multiplies.
5624   if (Ptr.getOpcode() == ISD::SHL) {
5625     SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(),  N->getAddressSpace(),
5626                                           N->getMemoryVT(), DCI);
5627     if (NewPtr) {
5628       SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
5629 
5630       NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
5631       return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
5632     }
5633   }
5634 
5635   return SDValue();
5636 }
5637 
5638 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
5639   return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
5640          (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
5641          (Opc == ISD::XOR && Val == 0);
5642 }
5643 
5644 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
5645 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
5646 // integer combine opportunities since most 64-bit operations are decomposed
5647 // this way.  TODO: We won't want this for SALU especially if it is an inline
5648 // immediate.
5649 SDValue SITargetLowering::splitBinaryBitConstantOp(
5650   DAGCombinerInfo &DCI,
5651   const SDLoc &SL,
5652   unsigned Opc, SDValue LHS,
5653   const ConstantSDNode *CRHS) const {
5654   uint64_t Val = CRHS->getZExtValue();
5655   uint32_t ValLo = Lo_32(Val);
5656   uint32_t ValHi = Hi_32(Val);
5657   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
5658 
5659     if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
5660          bitOpWithConstantIsReducible(Opc, ValHi)) ||
5661         (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
5662     // If we need to materialize a 64-bit immediate, it will be split up later
5663     // anyway. Avoid creating the harder to understand 64-bit immediate
5664     // materialization.
5665     return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
5666   }
5667 
5668   return SDValue();
5669 }
5670 
5671 // Returns true if argument is a boolean value which is not serialized into
5672 // memory or argument and does not require v_cmdmask_b32 to be deserialized.
5673 static bool isBoolSGPR(SDValue V) {
5674   if (V.getValueType() != MVT::i1)
5675     return false;
5676   switch (V.getOpcode()) {
5677   default: break;
5678   case ISD::SETCC:
5679   case ISD::AND:
5680   case ISD::OR:
5681   case ISD::XOR:
5682   case AMDGPUISD::FP_CLASS:
5683     return true;
5684   }
5685   return false;
5686 }
5687 
5688 SDValue SITargetLowering::performAndCombine(SDNode *N,
5689                                             DAGCombinerInfo &DCI) const {
5690   if (DCI.isBeforeLegalize())
5691     return SDValue();
5692 
5693   SelectionDAG &DAG = DCI.DAG;
5694   EVT VT = N->getValueType(0);
5695   SDValue LHS = N->getOperand(0);
5696   SDValue RHS = N->getOperand(1);
5697 
5698 
5699   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
5700   if (VT == MVT::i64 && CRHS) {
5701     if (SDValue Split
5702         = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
5703       return Split;
5704   }
5705 
5706   if (CRHS && VT == MVT::i32) {
5707     // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
5708     // nb = number of trailing zeroes in mask
5709     // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
5710     // given that we are selecting 8 or 16 bit fields starting at byte boundary.
5711     uint64_t Mask = CRHS->getZExtValue();
5712     unsigned Bits = countPopulation(Mask);
5713     if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
5714         (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
5715       if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
5716         unsigned Shift = CShift->getZExtValue();
5717         unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
5718         unsigned Offset = NB + Shift;
5719         if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
5720           SDLoc SL(N);
5721           SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
5722                                     LHS->getOperand(0),
5723                                     DAG.getConstant(Offset, SL, MVT::i32),
5724                                     DAG.getConstant(Bits, SL, MVT::i32));
5725           EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
5726           SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
5727                                     DAG.getValueType(NarrowVT));
5728           SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
5729                                     DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
5730           return Shl;
5731         }
5732       }
5733     }
5734   }
5735 
5736   // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
5737   // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
5738   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
5739     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5740     ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
5741 
5742     SDValue X = LHS.getOperand(0);
5743     SDValue Y = RHS.getOperand(0);
5744     if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
5745       return SDValue();
5746 
5747     if (LCC == ISD::SETO) {
5748       if (X != LHS.getOperand(1))
5749         return SDValue();
5750 
5751       if (RCC == ISD::SETUNE) {
5752         const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
5753         if (!C1 || !C1->isInfinity() || C1->isNegative())
5754           return SDValue();
5755 
5756         const uint32_t Mask = SIInstrFlags::N_NORMAL |
5757                               SIInstrFlags::N_SUBNORMAL |
5758                               SIInstrFlags::N_ZERO |
5759                               SIInstrFlags::P_ZERO |
5760                               SIInstrFlags::P_SUBNORMAL |
5761                               SIInstrFlags::P_NORMAL;
5762 
5763         static_assert(((~(SIInstrFlags::S_NAN |
5764                           SIInstrFlags::Q_NAN |
5765                           SIInstrFlags::N_INFINITY |
5766                           SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
5767                       "mask not equal");
5768 
5769         SDLoc DL(N);
5770         return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
5771                            X, DAG.getConstant(Mask, DL, MVT::i32));
5772       }
5773     }
5774   }
5775 
5776   if (VT == MVT::i32 &&
5777       (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
5778     // and x, (sext cc from i1) => select cc, x, 0
5779     if (RHS.getOpcode() != ISD::SIGN_EXTEND)
5780       std::swap(LHS, RHS);
5781     if (isBoolSGPR(RHS.getOperand(0)))
5782       return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
5783                            LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
5784   }
5785 
5786   return SDValue();
5787 }
5788 
5789 SDValue SITargetLowering::performOrCombine(SDNode *N,
5790                                            DAGCombinerInfo &DCI) const {
5791   SelectionDAG &DAG = DCI.DAG;
5792   SDValue LHS = N->getOperand(0);
5793   SDValue RHS = N->getOperand(1);
5794 
5795   EVT VT = N->getValueType(0);
5796   if (VT == MVT::i1) {
5797     // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
5798     if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
5799         RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
5800       SDValue Src = LHS.getOperand(0);
5801       if (Src != RHS.getOperand(0))
5802         return SDValue();
5803 
5804       const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
5805       const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
5806       if (!CLHS || !CRHS)
5807         return SDValue();
5808 
5809       // Only 10 bits are used.
5810       static const uint32_t MaxMask = 0x3ff;
5811 
5812       uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
5813       SDLoc DL(N);
5814       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
5815                          Src, DAG.getConstant(NewMask, DL, MVT::i32));
5816     }
5817 
5818     return SDValue();
5819   }
5820 
5821   if (VT != MVT::i64)
5822     return SDValue();
5823 
5824   // TODO: This could be a generic combine with a predicate for extracting the
5825   // high half of an integer being free.
5826 
5827   // (or i64:x, (zero_extend i32:y)) ->
5828   //   i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
5829   if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
5830       RHS.getOpcode() != ISD::ZERO_EXTEND)
5831     std::swap(LHS, RHS);
5832 
5833   if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
5834     SDValue ExtSrc = RHS.getOperand(0);
5835     EVT SrcVT = ExtSrc.getValueType();
5836     if (SrcVT == MVT::i32) {
5837       SDLoc SL(N);
5838       SDValue LowLHS, HiBits;
5839       std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
5840       SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
5841 
5842       DCI.AddToWorklist(LowOr.getNode());
5843       DCI.AddToWorklist(HiBits.getNode());
5844 
5845       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
5846                                 LowOr, HiBits);
5847       return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
5848     }
5849   }
5850 
5851   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
5852   if (CRHS) {
5853     if (SDValue Split
5854           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
5855       return Split;
5856   }
5857 
5858   return SDValue();
5859 }
5860 
5861 SDValue SITargetLowering::performXorCombine(SDNode *N,
5862                                             DAGCombinerInfo &DCI) const {
5863   EVT VT = N->getValueType(0);
5864   if (VT != MVT::i64)
5865     return SDValue();
5866 
5867   SDValue LHS = N->getOperand(0);
5868   SDValue RHS = N->getOperand(1);
5869 
5870   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
5871   if (CRHS) {
5872     if (SDValue Split
5873           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
5874       return Split;
5875   }
5876 
5877   return SDValue();
5878 }
5879 
5880 // Instructions that will be lowered with a final instruction that zeros the
5881 // high result bits.
5882 // XXX - probably only need to list legal operations.
5883 static bool fp16SrcZerosHighBits(unsigned Opc) {
5884   switch (Opc) {
5885   case ISD::FADD:
5886   case ISD::FSUB:
5887   case ISD::FMUL:
5888   case ISD::FDIV:
5889   case ISD::FREM:
5890   case ISD::FMA:
5891   case ISD::FMAD:
5892   case ISD::FCANONICALIZE:
5893   case ISD::FP_ROUND:
5894   case ISD::UINT_TO_FP:
5895   case ISD::SINT_TO_FP:
5896   case ISD::FABS:
5897     // Fabs is lowered to a bit operation, but it's an and which will clear the
5898     // high bits anyway.
5899   case ISD::FSQRT:
5900   case ISD::FSIN:
5901   case ISD::FCOS:
5902   case ISD::FPOWI:
5903   case ISD::FPOW:
5904   case ISD::FLOG:
5905   case ISD::FLOG2:
5906   case ISD::FLOG10:
5907   case ISD::FEXP:
5908   case ISD::FEXP2:
5909   case ISD::FCEIL:
5910   case ISD::FTRUNC:
5911   case ISD::FRINT:
5912   case ISD::FNEARBYINT:
5913   case ISD::FROUND:
5914   case ISD::FFLOOR:
5915   case ISD::FMINNUM:
5916   case ISD::FMAXNUM:
5917   case AMDGPUISD::FRACT:
5918   case AMDGPUISD::CLAMP:
5919   case AMDGPUISD::COS_HW:
5920   case AMDGPUISD::SIN_HW:
5921   case AMDGPUISD::FMIN3:
5922   case AMDGPUISD::FMAX3:
5923   case AMDGPUISD::FMED3:
5924   case AMDGPUISD::FMAD_FTZ:
5925   case AMDGPUISD::RCP:
5926   case AMDGPUISD::RSQ:
5927   case AMDGPUISD::LDEXP:
5928     return true;
5929   default:
5930     // fcopysign, select and others may be lowered to 32-bit bit operations
5931     // which don't zero the high bits.
5932     return false;
5933   }
5934 }
5935 
5936 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
5937                                                    DAGCombinerInfo &DCI) const {
5938   if (!Subtarget->has16BitInsts() ||
5939       DCI.getDAGCombineLevel() < AfterLegalizeDAG)
5940     return SDValue();
5941 
5942   EVT VT = N->getValueType(0);
5943   if (VT != MVT::i32)
5944     return SDValue();
5945 
5946   SDValue Src = N->getOperand(0);
5947   if (Src.getValueType() != MVT::i16)
5948     return SDValue();
5949 
5950   // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
5951   // FIXME: It is not universally true that the high bits are zeroed on gfx9.
5952   if (Src.getOpcode() == ISD::BITCAST) {
5953     SDValue BCSrc = Src.getOperand(0);
5954     if (BCSrc.getValueType() == MVT::f16 &&
5955         fp16SrcZerosHighBits(BCSrc.getOpcode()))
5956       return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
5957   }
5958 
5959   return SDValue();
5960 }
5961 
5962 SDValue SITargetLowering::performClassCombine(SDNode *N,
5963                                               DAGCombinerInfo &DCI) const {
5964   SelectionDAG &DAG = DCI.DAG;
5965   SDValue Mask = N->getOperand(1);
5966 
5967   // fp_class x, 0 -> false
5968   if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
5969     if (CMask->isNullValue())
5970       return DAG.getConstant(0, SDLoc(N), MVT::i1);
5971   }
5972 
5973   if (N->getOperand(0).isUndef())
5974     return DAG.getUNDEF(MVT::i1);
5975 
5976   return SDValue();
5977 }
5978 
5979 static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) {
5980   if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions())
5981     return true;
5982 
5983   return DAG.isKnownNeverNaN(Op);
5984 }
5985 
5986 static bool isCanonicalized(SelectionDAG &DAG, SDValue Op,
5987                             const SISubtarget *ST, unsigned MaxDepth=5) {
5988   // If source is a result of another standard FP operation it is already in
5989   // canonical form.
5990 
5991   switch (Op.getOpcode()) {
5992   default:
5993     break;
5994 
5995   // These will flush denorms if required.
5996   case ISD::FADD:
5997   case ISD::FSUB:
5998   case ISD::FMUL:
5999   case ISD::FSQRT:
6000   case ISD::FCEIL:
6001   case ISD::FFLOOR:
6002   case ISD::FMA:
6003   case ISD::FMAD:
6004 
6005   case ISD::FCANONICALIZE:
6006     return true;
6007 
6008   case ISD::FP_ROUND:
6009     return Op.getValueType().getScalarType() != MVT::f16 ||
6010            ST->hasFP16Denormals();
6011 
6012   case ISD::FP_EXTEND:
6013     return Op.getOperand(0).getValueType().getScalarType() != MVT::f16 ||
6014            ST->hasFP16Denormals();
6015 
6016   case ISD::FP16_TO_FP:
6017   case ISD::FP_TO_FP16:
6018     return ST->hasFP16Denormals();
6019 
6020   // It can/will be lowered or combined as a bit operation.
6021   // Need to check their input recursively to handle.
6022   case ISD::FNEG:
6023   case ISD::FABS:
6024     return (MaxDepth > 0) &&
6025            isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1);
6026 
6027   case ISD::FSIN:
6028   case ISD::FCOS:
6029   case ISD::FSINCOS:
6030     return Op.getValueType().getScalarType() != MVT::f16;
6031 
6032   // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms.
6033   // For such targets need to check their input recursively.
6034   case ISD::FMINNUM:
6035   case ISD::FMAXNUM:
6036   case ISD::FMINNAN:
6037   case ISD::FMAXNAN:
6038 
6039     if (ST->supportsMinMaxDenormModes() &&
6040         DAG.isKnownNeverNaN(Op.getOperand(0)) &&
6041         DAG.isKnownNeverNaN(Op.getOperand(1)))
6042       return true;
6043 
6044     return (MaxDepth > 0) &&
6045            isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1) &&
6046            isCanonicalized(DAG, Op.getOperand(1), ST, MaxDepth - 1);
6047 
6048   case ISD::ConstantFP: {
6049     auto F = cast<ConstantFPSDNode>(Op)->getValueAPF();
6050     return !F.isDenormal() && !(F.isNaN() && F.isSignaling());
6051   }
6052   }
6053   return false;
6054 }
6055 
6056 // Constant fold canonicalize.
6057 SDValue SITargetLowering::performFCanonicalizeCombine(
6058   SDNode *N,
6059   DAGCombinerInfo &DCI) const {
6060   SelectionDAG &DAG = DCI.DAG;
6061   ConstantFPSDNode *CFP = isConstOrConstSplatFP(N->getOperand(0));
6062 
6063   if (!CFP) {
6064     SDValue N0 = N->getOperand(0);
6065     EVT VT = N0.getValueType().getScalarType();
6066     auto ST = getSubtarget();
6067 
6068     if (((VT == MVT::f32 && ST->hasFP32Denormals()) ||
6069          (VT == MVT::f64 && ST->hasFP64Denormals()) ||
6070          (VT == MVT::f16 && ST->hasFP16Denormals())) &&
6071         DAG.isKnownNeverNaN(N0))
6072       return N0;
6073 
6074     bool IsIEEEMode = Subtarget->enableIEEEBit(DAG.getMachineFunction());
6075 
6076     if ((IsIEEEMode || isKnownNeverSNan(DAG, N0)) &&
6077         isCanonicalized(DAG, N0, ST))
6078       return N0;
6079 
6080     return SDValue();
6081   }
6082 
6083   const APFloat &C = CFP->getValueAPF();
6084 
6085   // Flush denormals to 0 if not enabled.
6086   if (C.isDenormal()) {
6087     EVT VT = N->getValueType(0);
6088     EVT SVT = VT.getScalarType();
6089     if (SVT == MVT::f32 && !Subtarget->hasFP32Denormals())
6090       return DAG.getConstantFP(0.0, SDLoc(N), VT);
6091 
6092     if (SVT == MVT::f64 && !Subtarget->hasFP64Denormals())
6093       return DAG.getConstantFP(0.0, SDLoc(N), VT);
6094 
6095     if (SVT == MVT::f16 && !Subtarget->hasFP16Denormals())
6096       return DAG.getConstantFP(0.0, SDLoc(N), VT);
6097   }
6098 
6099   if (C.isNaN()) {
6100     EVT VT = N->getValueType(0);
6101     APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
6102     if (C.isSignaling()) {
6103       // Quiet a signaling NaN.
6104       return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
6105     }
6106 
6107     // Make sure it is the canonical NaN bitpattern.
6108     //
6109     // TODO: Can we use -1 as the canonical NaN value since it's an inline
6110     // immediate?
6111     if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
6112       return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
6113   }
6114 
6115   return N->getOperand(0);
6116 }
6117 
6118 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
6119   switch (Opc) {
6120   case ISD::FMAXNUM:
6121     return AMDGPUISD::FMAX3;
6122   case ISD::SMAX:
6123     return AMDGPUISD::SMAX3;
6124   case ISD::UMAX:
6125     return AMDGPUISD::UMAX3;
6126   case ISD::FMINNUM:
6127     return AMDGPUISD::FMIN3;
6128   case ISD::SMIN:
6129     return AMDGPUISD::SMIN3;
6130   case ISD::UMIN:
6131     return AMDGPUISD::UMIN3;
6132   default:
6133     llvm_unreachable("Not a min/max opcode");
6134   }
6135 }
6136 
6137 SDValue SITargetLowering::performIntMed3ImmCombine(
6138   SelectionDAG &DAG, const SDLoc &SL,
6139   SDValue Op0, SDValue Op1, bool Signed) const {
6140   ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
6141   if (!K1)
6142     return SDValue();
6143 
6144   ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
6145   if (!K0)
6146     return SDValue();
6147 
6148   if (Signed) {
6149     if (K0->getAPIntValue().sge(K1->getAPIntValue()))
6150       return SDValue();
6151   } else {
6152     if (K0->getAPIntValue().uge(K1->getAPIntValue()))
6153       return SDValue();
6154   }
6155 
6156   EVT VT = K0->getValueType(0);
6157   unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
6158   if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
6159     return DAG.getNode(Med3Opc, SL, VT,
6160                        Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
6161   }
6162 
6163   // If there isn't a 16-bit med3 operation, convert to 32-bit.
6164   MVT NVT = MVT::i32;
6165   unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6166 
6167   SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
6168   SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
6169   SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
6170 
6171   SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
6172   return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
6173 }
6174 
6175 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
6176   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
6177     return C;
6178 
6179   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
6180     if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
6181       return C;
6182   }
6183 
6184   return nullptr;
6185 }
6186 
6187 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
6188                                                   const SDLoc &SL,
6189                                                   SDValue Op0,
6190                                                   SDValue Op1) const {
6191   ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
6192   if (!K1)
6193     return SDValue();
6194 
6195   ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
6196   if (!K0)
6197     return SDValue();
6198 
6199   // Ordered >= (although NaN inputs should have folded away by now).
6200   APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
6201   if (Cmp == APFloat::cmpGreaterThan)
6202     return SDValue();
6203 
6204   // TODO: Check IEEE bit enabled?
6205   EVT VT = Op0.getValueType();
6206   if (Subtarget->enableDX10Clamp()) {
6207     // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
6208     // hardware fmed3 behavior converting to a min.
6209     // FIXME: Should this be allowing -0.0?
6210     if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
6211       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
6212   }
6213 
6214   // med3 for f16 is only available on gfx9+, and not available for v2f16.
6215   if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
6216     // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
6217     // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
6218     // then give the other result, which is different from med3 with a NaN
6219     // input.
6220     SDValue Var = Op0.getOperand(0);
6221     if (!isKnownNeverSNan(DAG, Var))
6222       return SDValue();
6223 
6224     return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
6225                        Var, SDValue(K0, 0), SDValue(K1, 0));
6226   }
6227 
6228   return SDValue();
6229 }
6230 
6231 SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
6232                                                DAGCombinerInfo &DCI) const {
6233   SelectionDAG &DAG = DCI.DAG;
6234 
6235   EVT VT = N->getValueType(0);
6236   unsigned Opc = N->getOpcode();
6237   SDValue Op0 = N->getOperand(0);
6238   SDValue Op1 = N->getOperand(1);
6239 
6240   // Only do this if the inner op has one use since this will just increases
6241   // register pressure for no benefit.
6242 
6243 
6244   if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
6245       VT != MVT::f64 &&
6246       ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) {
6247     // max(max(a, b), c) -> max3(a, b, c)
6248     // min(min(a, b), c) -> min3(a, b, c)
6249     if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
6250       SDLoc DL(N);
6251       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
6252                          DL,
6253                          N->getValueType(0),
6254                          Op0.getOperand(0),
6255                          Op0.getOperand(1),
6256                          Op1);
6257     }
6258 
6259     // Try commuted.
6260     // max(a, max(b, c)) -> max3(a, b, c)
6261     // min(a, min(b, c)) -> min3(a, b, c)
6262     if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
6263       SDLoc DL(N);
6264       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
6265                          DL,
6266                          N->getValueType(0),
6267                          Op0,
6268                          Op1.getOperand(0),
6269                          Op1.getOperand(1));
6270     }
6271   }
6272 
6273   // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
6274   if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
6275     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
6276       return Med3;
6277   }
6278 
6279   if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
6280     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
6281       return Med3;
6282   }
6283 
6284   // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
6285   if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
6286        (Opc == AMDGPUISD::FMIN_LEGACY &&
6287         Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
6288       (VT == MVT::f32 || VT == MVT::f64 ||
6289        (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
6290        (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
6291       Op0.hasOneUse()) {
6292     if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
6293       return Res;
6294   }
6295 
6296   return SDValue();
6297 }
6298 
6299 static bool isClampZeroToOne(SDValue A, SDValue B) {
6300   if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
6301     if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
6302       // FIXME: Should this be allowing -0.0?
6303       return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
6304              (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
6305     }
6306   }
6307 
6308   return false;
6309 }
6310 
6311 // FIXME: Should only worry about snans for version with chain.
6312 SDValue SITargetLowering::performFMed3Combine(SDNode *N,
6313                                               DAGCombinerInfo &DCI) const {
6314   EVT VT = N->getValueType(0);
6315   // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
6316   // NaNs. With a NaN input, the order of the operands may change the result.
6317 
6318   SelectionDAG &DAG = DCI.DAG;
6319   SDLoc SL(N);
6320 
6321   SDValue Src0 = N->getOperand(0);
6322   SDValue Src1 = N->getOperand(1);
6323   SDValue Src2 = N->getOperand(2);
6324 
6325   if (isClampZeroToOne(Src0, Src1)) {
6326     // const_a, const_b, x -> clamp is safe in all cases including signaling
6327     // nans.
6328     // FIXME: Should this be allowing -0.0?
6329     return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
6330   }
6331 
6332   // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
6333   // handling no dx10-clamp?
6334   if (Subtarget->enableDX10Clamp()) {
6335     // If NaNs is clamped to 0, we are free to reorder the inputs.
6336 
6337     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
6338       std::swap(Src0, Src1);
6339 
6340     if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
6341       std::swap(Src1, Src2);
6342 
6343     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
6344       std::swap(Src0, Src1);
6345 
6346     if (isClampZeroToOne(Src1, Src2))
6347       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
6348   }
6349 
6350   return SDValue();
6351 }
6352 
6353 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
6354                                                  DAGCombinerInfo &DCI) const {
6355   SDValue Src0 = N->getOperand(0);
6356   SDValue Src1 = N->getOperand(1);
6357   if (Src0.isUndef() && Src1.isUndef())
6358     return DCI.DAG.getUNDEF(N->getValueType(0));
6359   return SDValue();
6360 }
6361 
6362 SDValue SITargetLowering::performExtractVectorEltCombine(
6363   SDNode *N, DAGCombinerInfo &DCI) const {
6364   SDValue Vec = N->getOperand(0);
6365 
6366   SelectionDAG &DAG = DCI.DAG;
6367   if (Vec.getOpcode() == ISD::FNEG && allUsesHaveSourceMods(N)) {
6368     SDLoc SL(N);
6369     EVT EltVT = N->getValueType(0);
6370     SDValue Idx = N->getOperand(1);
6371     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
6372                               Vec.getOperand(0), Idx);
6373     return DAG.getNode(ISD::FNEG, SL, EltVT, Elt);
6374   }
6375 
6376   return SDValue();
6377 }
6378 
6379 static bool convertBuildVectorCastElt(SelectionDAG &DAG,
6380                                       SDValue &Lo, SDValue &Hi) {
6381   if (Hi.getOpcode() == ISD::BITCAST &&
6382       Hi.getOperand(0).getValueType() == MVT::f16 &&
6383       (isa<ConstantSDNode>(Lo) || Lo.isUndef())) {
6384     Lo = DAG.getNode(ISD::BITCAST, SDLoc(Lo), MVT::f16, Lo);
6385     Hi = Hi.getOperand(0);
6386     return true;
6387   }
6388 
6389   return false;
6390 }
6391 
6392 SDValue SITargetLowering::performBuildVectorCombine(
6393   SDNode *N, DAGCombinerInfo &DCI) const {
6394   SDLoc SL(N);
6395 
6396   if (!isTypeLegal(MVT::v2i16))
6397     return SDValue();
6398   SelectionDAG &DAG = DCI.DAG;
6399   EVT VT = N->getValueType(0);
6400 
6401   if (VT == MVT::v2i16) {
6402     SDValue Lo = N->getOperand(0);
6403     SDValue Hi = N->getOperand(1);
6404 
6405     // v2i16 build_vector (const|undef), (bitcast f16:$x)
6406     // -> bitcast (v2f16 build_vector const|undef, $x
6407     if (convertBuildVectorCastElt(DAG, Lo, Hi)) {
6408       SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Lo, Hi  });
6409       return DAG.getNode(ISD::BITCAST, SL, VT, NewVec);
6410     }
6411 
6412     if (convertBuildVectorCastElt(DAG, Hi, Lo)) {
6413       SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Hi, Lo  });
6414       return DAG.getNode(ISD::BITCAST, SL, VT, NewVec);
6415     }
6416   }
6417 
6418   return SDValue();
6419 }
6420 
6421 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
6422                                           const SDNode *N0,
6423                                           const SDNode *N1) const {
6424   EVT VT = N0->getValueType(0);
6425 
6426   // Only do this if we are not trying to support denormals. v_mad_f32 does not
6427   // support denormals ever.
6428   if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
6429       (VT == MVT::f16 && !Subtarget->hasFP16Denormals()))
6430     return ISD::FMAD;
6431 
6432   const TargetOptions &Options = DAG.getTarget().Options;
6433   if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
6434        (N0->getFlags().hasUnsafeAlgebra() &&
6435         N1->getFlags().hasUnsafeAlgebra())) &&
6436       isFMAFasterThanFMulAndFAdd(VT)) {
6437     return ISD::FMA;
6438   }
6439 
6440   return 0;
6441 }
6442 
6443 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
6444                            EVT VT,
6445                            SDValue N0, SDValue N1, SDValue N2,
6446                            bool Signed) {
6447   unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
6448   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
6449   SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
6450   return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
6451 }
6452 
6453 SDValue SITargetLowering::performAddCombine(SDNode *N,
6454                                             DAGCombinerInfo &DCI) const {
6455   SelectionDAG &DAG = DCI.DAG;
6456   EVT VT = N->getValueType(0);
6457   SDLoc SL(N);
6458   SDValue LHS = N->getOperand(0);
6459   SDValue RHS = N->getOperand(1);
6460 
6461   if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
6462       && Subtarget->hasMad64_32() &&
6463       !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
6464       VT.getScalarSizeInBits() <= 64) {
6465     if (LHS.getOpcode() != ISD::MUL)
6466       std::swap(LHS, RHS);
6467 
6468     SDValue MulLHS = LHS.getOperand(0);
6469     SDValue MulRHS = LHS.getOperand(1);
6470     SDValue AddRHS = RHS;
6471 
6472     // TODO: Maybe restrict if SGPR inputs.
6473     if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
6474         numBitsUnsigned(MulRHS, DAG) <= 32) {
6475       MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
6476       MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
6477       AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
6478       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
6479     }
6480 
6481     if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
6482       MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
6483       MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
6484       AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
6485       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
6486     }
6487 
6488     return SDValue();
6489   }
6490 
6491   if (VT != MVT::i32)
6492     return SDValue();
6493 
6494   // add x, zext (setcc) => addcarry x, 0, setcc
6495   // add x, sext (setcc) => subcarry x, 0, setcc
6496   unsigned Opc = LHS.getOpcode();
6497   if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
6498       Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
6499     std::swap(RHS, LHS);
6500 
6501   Opc = RHS.getOpcode();
6502   switch (Opc) {
6503   default: break;
6504   case ISD::ZERO_EXTEND:
6505   case ISD::SIGN_EXTEND:
6506   case ISD::ANY_EXTEND: {
6507     auto Cond = RHS.getOperand(0);
6508     if (!isBoolSGPR(Cond))
6509       break;
6510     SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
6511     SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
6512     Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
6513     return DAG.getNode(Opc, SL, VTList, Args);
6514   }
6515   case ISD::ADDCARRY: {
6516     // add x, (addcarry y, 0, cc) => addcarry x, y, cc
6517     auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
6518     if (!C || C->getZExtValue() != 0) break;
6519     SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
6520     return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
6521   }
6522   }
6523   return SDValue();
6524 }
6525 
6526 SDValue SITargetLowering::performSubCombine(SDNode *N,
6527                                             DAGCombinerInfo &DCI) const {
6528   SelectionDAG &DAG = DCI.DAG;
6529   EVT VT = N->getValueType(0);
6530 
6531   if (VT != MVT::i32)
6532     return SDValue();
6533 
6534   SDLoc SL(N);
6535   SDValue LHS = N->getOperand(0);
6536   SDValue RHS = N->getOperand(1);
6537 
6538   unsigned Opc = LHS.getOpcode();
6539   if (Opc != ISD::SUBCARRY)
6540     std::swap(RHS, LHS);
6541 
6542   if (LHS.getOpcode() == ISD::SUBCARRY) {
6543     // sub (subcarry x, 0, cc), y => subcarry x, y, cc
6544     auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
6545     if (!C || C->getZExtValue() != 0)
6546       return SDValue();
6547     SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
6548     return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
6549   }
6550   return SDValue();
6551 }
6552 
6553 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
6554   DAGCombinerInfo &DCI) const {
6555 
6556   if (N->getValueType(0) != MVT::i32)
6557     return SDValue();
6558 
6559   auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
6560   if (!C || C->getZExtValue() != 0)
6561     return SDValue();
6562 
6563   SelectionDAG &DAG = DCI.DAG;
6564   SDValue LHS = N->getOperand(0);
6565 
6566   // addcarry (add x, y), 0, cc => addcarry x, y, cc
6567   // subcarry (sub x, y), 0, cc => subcarry x, y, cc
6568   unsigned LHSOpc = LHS.getOpcode();
6569   unsigned Opc = N->getOpcode();
6570   if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
6571       (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
6572     SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
6573     return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
6574   }
6575   return SDValue();
6576 }
6577 
6578 SDValue SITargetLowering::performFAddCombine(SDNode *N,
6579                                              DAGCombinerInfo &DCI) const {
6580   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
6581     return SDValue();
6582 
6583   SelectionDAG &DAG = DCI.DAG;
6584   EVT VT = N->getValueType(0);
6585 
6586   SDLoc SL(N);
6587   SDValue LHS = N->getOperand(0);
6588   SDValue RHS = N->getOperand(1);
6589 
6590   // These should really be instruction patterns, but writing patterns with
6591   // source modiifiers is a pain.
6592 
6593   // fadd (fadd (a, a), b) -> mad 2.0, a, b
6594   if (LHS.getOpcode() == ISD::FADD) {
6595     SDValue A = LHS.getOperand(0);
6596     if (A == LHS.getOperand(1)) {
6597       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
6598       if (FusedOp != 0) {
6599         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
6600         return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
6601       }
6602     }
6603   }
6604 
6605   // fadd (b, fadd (a, a)) -> mad 2.0, a, b
6606   if (RHS.getOpcode() == ISD::FADD) {
6607     SDValue A = RHS.getOperand(0);
6608     if (A == RHS.getOperand(1)) {
6609       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
6610       if (FusedOp != 0) {
6611         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
6612         return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
6613       }
6614     }
6615   }
6616 
6617   return SDValue();
6618 }
6619 
6620 SDValue SITargetLowering::performFSubCombine(SDNode *N,
6621                                              DAGCombinerInfo &DCI) const {
6622   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
6623     return SDValue();
6624 
6625   SelectionDAG &DAG = DCI.DAG;
6626   SDLoc SL(N);
6627   EVT VT = N->getValueType(0);
6628   assert(!VT.isVector());
6629 
6630   // Try to get the fneg to fold into the source modifier. This undoes generic
6631   // DAG combines and folds them into the mad.
6632   //
6633   // Only do this if we are not trying to support denormals. v_mad_f32 does
6634   // not support denormals ever.
6635   SDValue LHS = N->getOperand(0);
6636   SDValue RHS = N->getOperand(1);
6637   if (LHS.getOpcode() == ISD::FADD) {
6638     // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
6639     SDValue A = LHS.getOperand(0);
6640     if (A == LHS.getOperand(1)) {
6641       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
6642       if (FusedOp != 0){
6643         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
6644         SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
6645 
6646         return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
6647       }
6648     }
6649   }
6650 
6651   if (RHS.getOpcode() == ISD::FADD) {
6652     // (fsub c, (fadd a, a)) -> mad -2.0, a, c
6653 
6654     SDValue A = RHS.getOperand(0);
6655     if (A == RHS.getOperand(1)) {
6656       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
6657       if (FusedOp != 0){
6658         const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
6659         return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
6660       }
6661     }
6662   }
6663 
6664   return SDValue();
6665 }
6666 
6667 SDValue SITargetLowering::performSetCCCombine(SDNode *N,
6668                                               DAGCombinerInfo &DCI) const {
6669   SelectionDAG &DAG = DCI.DAG;
6670   SDLoc SL(N);
6671 
6672   SDValue LHS = N->getOperand(0);
6673   SDValue RHS = N->getOperand(1);
6674   EVT VT = LHS.getValueType();
6675   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
6676 
6677   auto CRHS = dyn_cast<ConstantSDNode>(RHS);
6678   if (!CRHS) {
6679     CRHS = dyn_cast<ConstantSDNode>(LHS);
6680     if (CRHS) {
6681       std::swap(LHS, RHS);
6682       CC = getSetCCSwappedOperands(CC);
6683     }
6684   }
6685 
6686   if (CRHS && VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
6687       isBoolSGPR(LHS.getOperand(0))) {
6688     // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
6689     // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
6690     // setcc (sext from i1 cc),  0, eq|sge|ule) => not cc => xor cc, -1
6691     // setcc (sext from i1 cc),  0, ne|ugt|slt) => cc
6692     if ((CRHS->isAllOnesValue() &&
6693          (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
6694         (CRHS->isNullValue() &&
6695          (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
6696       return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
6697                          DAG.getConstant(-1, SL, MVT::i1));
6698     if ((CRHS->isAllOnesValue() &&
6699          (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
6700         (CRHS->isNullValue() &&
6701          (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
6702       return LHS.getOperand(0);
6703   }
6704 
6705   if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
6706                                            VT != MVT::f16))
6707     return SDValue();
6708 
6709   // Match isinf pattern
6710   // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
6711   if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) {
6712     const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
6713     if (!CRHS)
6714       return SDValue();
6715 
6716     const APFloat &APF = CRHS->getValueAPF();
6717     if (APF.isInfinity() && !APF.isNegative()) {
6718       unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY;
6719       return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
6720                          DAG.getConstant(Mask, SL, MVT::i32));
6721     }
6722   }
6723 
6724   return SDValue();
6725 }
6726 
6727 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
6728                                                      DAGCombinerInfo &DCI) const {
6729   SelectionDAG &DAG = DCI.DAG;
6730   SDLoc SL(N);
6731   unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
6732 
6733   SDValue Src = N->getOperand(0);
6734   SDValue Srl = N->getOperand(0);
6735   if (Srl.getOpcode() == ISD::ZERO_EXTEND)
6736     Srl = Srl.getOperand(0);
6737 
6738   // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
6739   if (Srl.getOpcode() == ISD::SRL) {
6740     // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
6741     // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
6742     // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
6743 
6744     if (const ConstantSDNode *C =
6745         dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
6746       Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
6747                                EVT(MVT::i32));
6748 
6749       unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
6750       if (SrcOffset < 32 && SrcOffset % 8 == 0) {
6751         return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
6752                            MVT::f32, Srl);
6753       }
6754     }
6755   }
6756 
6757   APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
6758 
6759   KnownBits Known;
6760   TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
6761                                         !DCI.isBeforeLegalizeOps());
6762   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6763   if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) ||
6764       TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
6765     DCI.CommitTargetLoweringOpt(TLO);
6766   }
6767 
6768   return SDValue();
6769 }
6770 
6771 SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
6772                                             DAGCombinerInfo &DCI) const {
6773   switch (N->getOpcode()) {
6774   default:
6775     return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
6776   case ISD::ADD:
6777     return performAddCombine(N, DCI);
6778   case ISD::SUB:
6779     return performSubCombine(N, DCI);
6780   case ISD::ADDCARRY:
6781   case ISD::SUBCARRY:
6782     return performAddCarrySubCarryCombine(N, DCI);
6783   case ISD::FADD:
6784     return performFAddCombine(N, DCI);
6785   case ISD::FSUB:
6786     return performFSubCombine(N, DCI);
6787   case ISD::SETCC:
6788     return performSetCCCombine(N, DCI);
6789   case ISD::FMAXNUM:
6790   case ISD::FMINNUM:
6791   case ISD::SMAX:
6792   case ISD::SMIN:
6793   case ISD::UMAX:
6794   case ISD::UMIN:
6795   case AMDGPUISD::FMIN_LEGACY:
6796   case AMDGPUISD::FMAX_LEGACY: {
6797     if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG &&
6798         getTargetMachine().getOptLevel() > CodeGenOpt::None)
6799       return performMinMaxCombine(N, DCI);
6800     break;
6801   }
6802   case ISD::LOAD:
6803   case ISD::STORE:
6804   case ISD::ATOMIC_LOAD:
6805   case ISD::ATOMIC_STORE:
6806   case ISD::ATOMIC_CMP_SWAP:
6807   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
6808   case ISD::ATOMIC_SWAP:
6809   case ISD::ATOMIC_LOAD_ADD:
6810   case ISD::ATOMIC_LOAD_SUB:
6811   case ISD::ATOMIC_LOAD_AND:
6812   case ISD::ATOMIC_LOAD_OR:
6813   case ISD::ATOMIC_LOAD_XOR:
6814   case ISD::ATOMIC_LOAD_NAND:
6815   case ISD::ATOMIC_LOAD_MIN:
6816   case ISD::ATOMIC_LOAD_MAX:
6817   case ISD::ATOMIC_LOAD_UMIN:
6818   case ISD::ATOMIC_LOAD_UMAX:
6819   case AMDGPUISD::ATOMIC_INC:
6820   case AMDGPUISD::ATOMIC_DEC: // TODO: Target mem intrinsics.
6821     if (DCI.isBeforeLegalize())
6822       break;
6823     return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
6824   case ISD::AND:
6825     return performAndCombine(N, DCI);
6826   case ISD::OR:
6827     return performOrCombine(N, DCI);
6828   case ISD::XOR:
6829     return performXorCombine(N, DCI);
6830   case ISD::ZERO_EXTEND:
6831     return performZeroExtendCombine(N, DCI);
6832   case AMDGPUISD::FP_CLASS:
6833     return performClassCombine(N, DCI);
6834   case ISD::FCANONICALIZE:
6835     return performFCanonicalizeCombine(N, DCI);
6836   case AMDGPUISD::FRACT:
6837   case AMDGPUISD::RCP:
6838   case AMDGPUISD::RSQ:
6839   case AMDGPUISD::RCP_LEGACY:
6840   case AMDGPUISD::RSQ_LEGACY:
6841   case AMDGPUISD::RSQ_CLAMP:
6842   case AMDGPUISD::LDEXP: {
6843     SDValue Src = N->getOperand(0);
6844     if (Src.isUndef())
6845       return Src;
6846     break;
6847   }
6848   case ISD::SINT_TO_FP:
6849   case ISD::UINT_TO_FP:
6850     return performUCharToFloatCombine(N, DCI);
6851   case AMDGPUISD::CVT_F32_UBYTE0:
6852   case AMDGPUISD::CVT_F32_UBYTE1:
6853   case AMDGPUISD::CVT_F32_UBYTE2:
6854   case AMDGPUISD::CVT_F32_UBYTE3:
6855     return performCvtF32UByteNCombine(N, DCI);
6856   case AMDGPUISD::FMED3:
6857     return performFMed3Combine(N, DCI);
6858   case AMDGPUISD::CVT_PKRTZ_F16_F32:
6859     return performCvtPkRTZCombine(N, DCI);
6860   case ISD::SCALAR_TO_VECTOR: {
6861     SelectionDAG &DAG = DCI.DAG;
6862     EVT VT = N->getValueType(0);
6863 
6864     // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
6865     if (VT == MVT::v2i16 || VT == MVT::v2f16) {
6866       SDLoc SL(N);
6867       SDValue Src = N->getOperand(0);
6868       EVT EltVT = Src.getValueType();
6869       if (EltVT == MVT::f16)
6870         Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
6871 
6872       SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
6873       return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
6874     }
6875 
6876     break;
6877   }
6878   case ISD::EXTRACT_VECTOR_ELT:
6879     return performExtractVectorEltCombine(N, DCI);
6880   case ISD::BUILD_VECTOR:
6881     return performBuildVectorCombine(N, DCI);
6882   }
6883   return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
6884 }
6885 
6886 /// \brief Helper function for adjustWritemask
6887 static unsigned SubIdx2Lane(unsigned Idx) {
6888   switch (Idx) {
6889   default: return 0;
6890   case AMDGPU::sub0: return 0;
6891   case AMDGPU::sub1: return 1;
6892   case AMDGPU::sub2: return 2;
6893   case AMDGPU::sub3: return 3;
6894   }
6895 }
6896 
6897 /// \brief Adjust the writemask of MIMG instructions
6898 SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
6899                                           SelectionDAG &DAG) const {
6900   SDNode *Users[4] = { nullptr };
6901   unsigned Lane = 0;
6902   unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3;
6903   unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
6904   unsigned NewDmask = 0;
6905   bool HasChain = Node->getNumValues() > 1;
6906 
6907   if (OldDmask == 0) {
6908     // These are folded out, but on the chance it happens don't assert.
6909     return Node;
6910   }
6911 
6912   // Try to figure out the used register components
6913   for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
6914        I != E; ++I) {
6915 
6916     // Don't look at users of the chain.
6917     if (I.getUse().getResNo() != 0)
6918       continue;
6919 
6920     // Abort if we can't understand the usage
6921     if (!I->isMachineOpcode() ||
6922         I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
6923       return Node;
6924 
6925     // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
6926     // Note that subregs are packed, i.e. Lane==0 is the first bit set
6927     // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
6928     // set, etc.
6929     Lane = SubIdx2Lane(I->getConstantOperandVal(1));
6930 
6931     // Set which texture component corresponds to the lane.
6932     unsigned Comp;
6933     for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) {
6934       Comp = countTrailingZeros(Dmask);
6935       Dmask &= ~(1 << Comp);
6936     }
6937 
6938     // Abort if we have more than one user per component
6939     if (Users[Lane])
6940       return Node;
6941 
6942     Users[Lane] = *I;
6943     NewDmask |= 1 << Comp;
6944   }
6945 
6946   // Abort if there's no change
6947   if (NewDmask == OldDmask)
6948     return Node;
6949 
6950   unsigned BitsSet = countPopulation(NewDmask);
6951 
6952   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
6953   int NewOpcode = AMDGPU::getMaskedMIMGOp(*TII,
6954                                           Node->getMachineOpcode(), BitsSet);
6955   assert(NewOpcode != -1 &&
6956          NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
6957          "failed to find equivalent MIMG op");
6958 
6959   // Adjust the writemask in the node
6960   SmallVector<SDValue, 12> Ops;
6961   Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
6962   Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
6963   Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
6964 
6965   MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
6966 
6967   MVT ResultVT = BitsSet == 1 ?
6968     SVT : MVT::getVectorVT(SVT, BitsSet == 3 ? 4 : BitsSet);
6969   SDVTList NewVTList = HasChain ?
6970     DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
6971 
6972 
6973   MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
6974                                               NewVTList, Ops);
6975 
6976   if (HasChain) {
6977     // Update chain.
6978     NewNode->setMemRefs(Node->memoperands_begin(), Node->memoperands_end());
6979     DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
6980   }
6981 
6982   if (BitsSet == 1) {
6983     assert(Node->hasNUsesOfValue(1, 0));
6984     SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
6985                                       SDLoc(Node), Users[Lane]->getValueType(0),
6986                                       SDValue(NewNode, 0));
6987     DAG.ReplaceAllUsesWith(Users[Lane], Copy);
6988     return nullptr;
6989   }
6990 
6991   // Update the users of the node with the new indices
6992   for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) {
6993     SDNode *User = Users[i];
6994     if (!User)
6995       continue;
6996 
6997     SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
6998     DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
6999 
7000     switch (Idx) {
7001     default: break;
7002     case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
7003     case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
7004     case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
7005     }
7006   }
7007 
7008   DAG.RemoveDeadNode(Node);
7009   return nullptr;
7010 }
7011 
7012 static bool isFrameIndexOp(SDValue Op) {
7013   if (Op.getOpcode() == ISD::AssertZext)
7014     Op = Op.getOperand(0);
7015 
7016   return isa<FrameIndexSDNode>(Op);
7017 }
7018 
7019 /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG)
7020 /// with frame index operands.
7021 /// LLVM assumes that inputs are to these instructions are registers.
7022 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
7023                                                         SelectionDAG &DAG) const {
7024   if (Node->getOpcode() == ISD::CopyToReg) {
7025     RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
7026     SDValue SrcVal = Node->getOperand(2);
7027 
7028     // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
7029     // to try understanding copies to physical registers.
7030     if (SrcVal.getValueType() == MVT::i1 &&
7031         TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
7032       SDLoc SL(Node);
7033       MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
7034       SDValue VReg = DAG.getRegister(
7035         MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
7036 
7037       SDNode *Glued = Node->getGluedNode();
7038       SDValue ToVReg
7039         = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
7040                          SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
7041       SDValue ToResultReg
7042         = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
7043                            VReg, ToVReg.getValue(1));
7044       DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
7045       DAG.RemoveDeadNode(Node);
7046       return ToResultReg.getNode();
7047     }
7048   }
7049 
7050   SmallVector<SDValue, 8> Ops;
7051   for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
7052     if (!isFrameIndexOp(Node->getOperand(i))) {
7053       Ops.push_back(Node->getOperand(i));
7054       continue;
7055     }
7056 
7057     SDLoc DL(Node);
7058     Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
7059                                      Node->getOperand(i).getValueType(),
7060                                      Node->getOperand(i)), 0));
7061   }
7062 
7063   return DAG.UpdateNodeOperands(Node, Ops);
7064 }
7065 
7066 /// \brief Fold the instructions after selecting them.
7067 /// Returns null if users were already updated.
7068 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
7069                                           SelectionDAG &DAG) const {
7070   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7071   unsigned Opcode = Node->getMachineOpcode();
7072 
7073   if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
7074       !TII->isGather4(Opcode)) {
7075     return adjustWritemask(Node, DAG);
7076   }
7077 
7078   if (Opcode == AMDGPU::INSERT_SUBREG ||
7079       Opcode == AMDGPU::REG_SEQUENCE) {
7080     legalizeTargetIndependentNode(Node, DAG);
7081     return Node;
7082   }
7083 
7084   switch (Opcode) {
7085   case AMDGPU::V_DIV_SCALE_F32:
7086   case AMDGPU::V_DIV_SCALE_F64: {
7087     // Satisfy the operand register constraint when one of the inputs is
7088     // undefined. Ordinarily each undef value will have its own implicit_def of
7089     // a vreg, so force these to use a single register.
7090     SDValue Src0 = Node->getOperand(0);
7091     SDValue Src1 = Node->getOperand(1);
7092     SDValue Src2 = Node->getOperand(2);
7093 
7094     if ((Src0.isMachineOpcode() &&
7095          Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
7096         (Src0 == Src1 || Src0 == Src2))
7097       break;
7098 
7099     MVT VT = Src0.getValueType().getSimpleVT();
7100     const TargetRegisterClass *RC = getRegClassFor(VT);
7101 
7102     MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
7103     SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
7104 
7105     SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
7106                                       UndefReg, Src0, SDValue());
7107 
7108     // src0 must be the same register as src1 or src2, even if the value is
7109     // undefined, so make sure we don't violate this constraint.
7110     if (Src0.isMachineOpcode() &&
7111         Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
7112       if (Src1.isMachineOpcode() &&
7113           Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
7114         Src0 = Src1;
7115       else if (Src2.isMachineOpcode() &&
7116                Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
7117         Src0 = Src2;
7118       else {
7119         assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
7120         Src0 = UndefReg;
7121         Src1 = UndefReg;
7122       }
7123     } else
7124       break;
7125 
7126     SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
7127     for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
7128       Ops.push_back(Node->getOperand(I));
7129 
7130     Ops.push_back(ImpDef.getValue(1));
7131     return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
7132   }
7133   default:
7134     break;
7135   }
7136 
7137   return Node;
7138 }
7139 
7140 /// \brief Assign the register class depending on the number of
7141 /// bits set in the writemask
7142 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
7143                                                      SDNode *Node) const {
7144   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7145 
7146   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
7147 
7148   if (TII->isVOP3(MI.getOpcode())) {
7149     // Make sure constant bus requirements are respected.
7150     TII->legalizeOperandsVOP3(MRI, MI);
7151     return;
7152   }
7153 
7154   // Replace unused atomics with the no return version.
7155   int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
7156   if (NoRetAtomicOp != -1) {
7157     if (!Node->hasAnyUseOfValue(0)) {
7158       MI.setDesc(TII->get(NoRetAtomicOp));
7159       MI.RemoveOperand(0);
7160       return;
7161     }
7162 
7163     // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
7164     // instruction, because the return type of these instructions is a vec2 of
7165     // the memory type, so it can be tied to the input operand.
7166     // This means these instructions always have a use, so we need to add a
7167     // special case to check if the atomic has only one extract_subreg use,
7168     // which itself has no uses.
7169     if ((Node->hasNUsesOfValue(1, 0) &&
7170          Node->use_begin()->isMachineOpcode() &&
7171          Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
7172          !Node->use_begin()->hasAnyUseOfValue(0))) {
7173       unsigned Def = MI.getOperand(0).getReg();
7174 
7175       // Change this into a noret atomic.
7176       MI.setDesc(TII->get(NoRetAtomicOp));
7177       MI.RemoveOperand(0);
7178 
7179       // If we only remove the def operand from the atomic instruction, the
7180       // extract_subreg will be left with a use of a vreg without a def.
7181       // So we need to insert an implicit_def to avoid machine verifier
7182       // errors.
7183       BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
7184               TII->get(AMDGPU::IMPLICIT_DEF), Def);
7185     }
7186     return;
7187   }
7188 }
7189 
7190 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
7191                               uint64_t Val) {
7192   SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
7193   return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
7194 }
7195 
7196 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
7197                                                 const SDLoc &DL,
7198                                                 SDValue Ptr) const {
7199   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7200 
7201   // Build the half of the subregister with the constants before building the
7202   // full 128-bit register. If we are building multiple resource descriptors,
7203   // this will allow CSEing of the 2-component register.
7204   const SDValue Ops0[] = {
7205     DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
7206     buildSMovImm32(DAG, DL, 0),
7207     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
7208     buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
7209     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
7210   };
7211 
7212   SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
7213                                                 MVT::v2i32, Ops0), 0);
7214 
7215   // Combine the constants and the pointer.
7216   const SDValue Ops1[] = {
7217     DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
7218     Ptr,
7219     DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
7220     SubRegHi,
7221     DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
7222   };
7223 
7224   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
7225 }
7226 
7227 /// \brief Return a resource descriptor with the 'Add TID' bit enabled
7228 ///        The TID (Thread ID) is multiplied by the stride value (bits [61:48]
7229 ///        of the resource descriptor) to create an offset, which is added to
7230 ///        the resource pointer.
7231 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
7232                                            SDValue Ptr, uint32_t RsrcDword1,
7233                                            uint64_t RsrcDword2And3) const {
7234   SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
7235   SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
7236   if (RsrcDword1) {
7237     PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
7238                                      DAG.getConstant(RsrcDword1, DL, MVT::i32)),
7239                     0);
7240   }
7241 
7242   SDValue DataLo = buildSMovImm32(DAG, DL,
7243                                   RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
7244   SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
7245 
7246   const SDValue Ops[] = {
7247     DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
7248     PtrLo,
7249     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
7250     PtrHi,
7251     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
7252     DataLo,
7253     DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
7254     DataHi,
7255     DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
7256   };
7257 
7258   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
7259 }
7260 
7261 //===----------------------------------------------------------------------===//
7262 //                         SI Inline Assembly Support
7263 //===----------------------------------------------------------------------===//
7264 
7265 std::pair<unsigned, const TargetRegisterClass *>
7266 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
7267                                                StringRef Constraint,
7268                                                MVT VT) const {
7269   if (!isTypeLegal(VT))
7270     return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
7271 
7272   if (Constraint.size() == 1) {
7273     switch (Constraint[0]) {
7274     case 's':
7275     case 'r':
7276       switch (VT.getSizeInBits()) {
7277       default:
7278         return std::make_pair(0U, nullptr);
7279       case 32:
7280       case 16:
7281         return std::make_pair(0U, &AMDGPU::SReg_32_XM0RegClass);
7282       case 64:
7283         return std::make_pair(0U, &AMDGPU::SGPR_64RegClass);
7284       case 128:
7285         return std::make_pair(0U, &AMDGPU::SReg_128RegClass);
7286       case 256:
7287         return std::make_pair(0U, &AMDGPU::SReg_256RegClass);
7288       case 512:
7289         return std::make_pair(0U, &AMDGPU::SReg_512RegClass);
7290       }
7291 
7292     case 'v':
7293       switch (VT.getSizeInBits()) {
7294       default:
7295         return std::make_pair(0U, nullptr);
7296       case 32:
7297       case 16:
7298         return std::make_pair(0U, &AMDGPU::VGPR_32RegClass);
7299       case 64:
7300         return std::make_pair(0U, &AMDGPU::VReg_64RegClass);
7301       case 96:
7302         return std::make_pair(0U, &AMDGPU::VReg_96RegClass);
7303       case 128:
7304         return std::make_pair(0U, &AMDGPU::VReg_128RegClass);
7305       case 256:
7306         return std::make_pair(0U, &AMDGPU::VReg_256RegClass);
7307       case 512:
7308         return std::make_pair(0U, &AMDGPU::VReg_512RegClass);
7309       }
7310     }
7311   }
7312 
7313   if (Constraint.size() > 1) {
7314     const TargetRegisterClass *RC = nullptr;
7315     if (Constraint[1] == 'v') {
7316       RC = &AMDGPU::VGPR_32RegClass;
7317     } else if (Constraint[1] == 's') {
7318       RC = &AMDGPU::SGPR_32RegClass;
7319     }
7320 
7321     if (RC) {
7322       uint32_t Idx;
7323       bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
7324       if (!Failed && Idx < RC->getNumRegs())
7325         return std::make_pair(RC->getRegister(Idx), RC);
7326     }
7327   }
7328   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
7329 }
7330 
7331 SITargetLowering::ConstraintType
7332 SITargetLowering::getConstraintType(StringRef Constraint) const {
7333   if (Constraint.size() == 1) {
7334     switch (Constraint[0]) {
7335     default: break;
7336     case 's':
7337     case 'v':
7338       return C_RegisterClass;
7339     }
7340   }
7341   return TargetLowering::getConstraintType(Constraint);
7342 }
7343 
7344 // Figure out which registers should be reserved for stack access. Only after
7345 // the function is legalized do we know all of the non-spill stack objects or if
7346 // calls are present.
7347 void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
7348   MachineRegisterInfo &MRI = MF.getRegInfo();
7349   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
7350   const MachineFrameInfo &MFI = MF.getFrameInfo();
7351   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
7352   const SIRegisterInfo *TRI = ST.getRegisterInfo();
7353 
7354   if (Info->isEntryFunction()) {
7355     // Callable functions have fixed registers used for stack access.
7356     reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
7357   }
7358 
7359   // We have to assume the SP is needed in case there are calls in the function
7360   // during lowering. Calls are only detected after the function is
7361   // lowered. We're about to reserve registers, so don't bother using it if we
7362   // aren't really going to use it.
7363   bool NeedSP = !Info->isEntryFunction() ||
7364     MFI.hasVarSizedObjects() ||
7365     MFI.hasCalls();
7366 
7367   if (NeedSP) {
7368     unsigned ReservedStackPtrOffsetReg = TRI->reservedStackPtrOffsetReg(MF);
7369     Info->setStackPtrOffsetReg(ReservedStackPtrOffsetReg);
7370 
7371     assert(Info->getStackPtrOffsetReg() != Info->getFrameOffsetReg());
7372     assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
7373                                Info->getStackPtrOffsetReg()));
7374     MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
7375   }
7376 
7377   MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
7378   MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
7379   MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
7380                      Info->getScratchWaveOffsetReg());
7381 
7382   TargetLoweringBase::finalizeLowering(MF);
7383 }
7384 
7385 void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
7386                                                      KnownBits &Known,
7387                                                      const APInt &DemandedElts,
7388                                                      const SelectionDAG &DAG,
7389                                                      unsigned Depth) const {
7390   TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
7391                                                 DAG, Depth);
7392 
7393   if (getSubtarget()->enableHugePrivateBuffer())
7394     return;
7395 
7396   // Technically it may be possible to have a dispatch with a single workitem
7397   // that uses the full private memory size, but that's not really useful. We
7398   // can't use vaddr in MUBUF instructions if we don't know the address
7399   // calculation won't overflow, so assume the sign bit is never set.
7400   Known.Zero.setHighBits(AssumeFrameIndexHighZeroBits);
7401 }
7402