1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief Custom DAG lowering for SI
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifdef _MSC_VER
16 // Provide M_PI.
17 #define _USE_MATH_DEFINES
18 #endif
19 
20 #include "SIISelLowering.h"
21 #include "AMDGPU.h"
22 #include "AMDGPUIntrinsicInfo.h"
23 #include "AMDGPUSubtarget.h"
24 #include "AMDGPUTargetMachine.h"
25 #include "SIDefines.h"
26 #include "SIInstrInfo.h"
27 #include "SIMachineFunctionInfo.h"
28 #include "SIRegisterInfo.h"
29 #include "Utils/AMDGPUBaseInfo.h"
30 #include "llvm/ADT/APFloat.h"
31 #include "llvm/ADT/APInt.h"
32 #include "llvm/ADT/ArrayRef.h"
33 #include "llvm/ADT/BitVector.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringRef.h"
37 #include "llvm/ADT/StringSwitch.h"
38 #include "llvm/ADT/Twine.h"
39 #include "llvm/CodeGen/Analysis.h"
40 #include "llvm/CodeGen/CallingConvLower.h"
41 #include "llvm/CodeGen/DAGCombine.h"
42 #include "llvm/CodeGen/ISDOpcodes.h"
43 #include "llvm/CodeGen/MachineBasicBlock.h"
44 #include "llvm/CodeGen/MachineFrameInfo.h"
45 #include "llvm/CodeGen/MachineFunction.h"
46 #include "llvm/CodeGen/MachineInstr.h"
47 #include "llvm/CodeGen/MachineInstrBuilder.h"
48 #include "llvm/CodeGen/MachineMemOperand.h"
49 #include "llvm/CodeGen/MachineModuleInfo.h"
50 #include "llvm/CodeGen/MachineOperand.h"
51 #include "llvm/CodeGen/MachineRegisterInfo.h"
52 #include "llvm/CodeGen/MachineValueType.h"
53 #include "llvm/CodeGen/SelectionDAG.h"
54 #include "llvm/CodeGen/SelectionDAGNodes.h"
55 #include "llvm/CodeGen/TargetCallingConv.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/Constants.h"
59 #include "llvm/IR/DataLayout.h"
60 #include "llvm/IR/DebugLoc.h"
61 #include "llvm/IR/DerivedTypes.h"
62 #include "llvm/IR/DiagnosticInfo.h"
63 #include "llvm/IR/Function.h"
64 #include "llvm/IR/GlobalValue.h"
65 #include "llvm/IR/InstrTypes.h"
66 #include "llvm/IR/Instruction.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/IntrinsicInst.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/Support/Casting.h"
71 #include "llvm/Support/CodeGen.h"
72 #include "llvm/Support/CommandLine.h"
73 #include "llvm/Support/Compiler.h"
74 #include "llvm/Support/ErrorHandling.h"
75 #include "llvm/Support/KnownBits.h"
76 #include "llvm/Support/MathExtras.h"
77 #include "llvm/Target/TargetOptions.h"
78 #include <cassert>
79 #include <cmath>
80 #include <cstdint>
81 #include <iterator>
82 #include <tuple>
83 #include <utility>
84 #include <vector>
85 
86 using namespace llvm;
87 
88 #define DEBUG_TYPE "si-lower"
89 
90 STATISTIC(NumTailCalls, "Number of tail calls");
91 
92 static cl::opt<bool> EnableVGPRIndexMode(
93   "amdgpu-vgpr-index-mode",
94   cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
95   cl::init(false));
96 
97 static cl::opt<unsigned> AssumeFrameIndexHighZeroBits(
98   "amdgpu-frame-index-zero-bits",
99   cl::desc("High bits of frame index assumed to be zero"),
100   cl::init(5),
101   cl::ReallyHidden);
102 
103 static unsigned findFirstFreeSGPR(CCState &CCInfo) {
104   unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
105   for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
106     if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
107       return AMDGPU::SGPR0 + Reg;
108     }
109   }
110   llvm_unreachable("Cannot allocate sgpr");
111 }
112 
113 SITargetLowering::SITargetLowering(const TargetMachine &TM,
114                                    const SISubtarget &STI)
115     : AMDGPUTargetLowering(TM, STI) {
116   addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
117   addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
118 
119   addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
120   addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
121 
122   addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
123   addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
124   addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
125 
126   addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
127   addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
128 
129   addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
130   addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
131 
132   addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
133   addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
134 
135   addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
136   addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
137 
138   if (Subtarget->has16BitInsts()) {
139     addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
140     addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
141   }
142 
143   if (Subtarget->hasVOP3PInsts()) {
144     addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
145     addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
146   }
147 
148   computeRegisterProperties(STI.getRegisterInfo());
149 
150   // We need to custom lower vector stores from local memory
151   setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
152   setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
153   setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
154   setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
155   setOperationAction(ISD::LOAD, MVT::i1, Custom);
156 
157   setOperationAction(ISD::STORE, MVT::v2i32, Custom);
158   setOperationAction(ISD::STORE, MVT::v4i32, Custom);
159   setOperationAction(ISD::STORE, MVT::v8i32, Custom);
160   setOperationAction(ISD::STORE, MVT::v16i32, Custom);
161   setOperationAction(ISD::STORE, MVT::i1, Custom);
162 
163   setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
164   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
165   setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
166   setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
167   setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
168   setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
169   setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
170   setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
171   setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
172   setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
173 
174   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
175   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
176   setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand);
177 
178   setOperationAction(ISD::SELECT, MVT::i1, Promote);
179   setOperationAction(ISD::SELECT, MVT::i64, Custom);
180   setOperationAction(ISD::SELECT, MVT::f64, Promote);
181   AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
182 
183   setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
184   setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
185   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
186   setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
187   setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
188 
189   setOperationAction(ISD::SETCC, MVT::i1, Promote);
190   setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
191   setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
192   AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
193 
194   setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
195   setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
196 
197   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
198   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
199   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
200   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
201   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
202   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
203   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
204 
205   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
206   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
207   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
208   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
209 
210   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
211 
212   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
213   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
214   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
215 
216   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
217   setOperationAction(ISD::BR_CC, MVT::i1, Expand);
218   setOperationAction(ISD::BR_CC, MVT::i32, Expand);
219   setOperationAction(ISD::BR_CC, MVT::i64, Expand);
220   setOperationAction(ISD::BR_CC, MVT::f32, Expand);
221   setOperationAction(ISD::BR_CC, MVT::f64, Expand);
222 
223   setOperationAction(ISD::UADDO, MVT::i32, Legal);
224   setOperationAction(ISD::USUBO, MVT::i32, Legal);
225 
226   setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
227   setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
228 
229   // We only support LOAD/STORE and vector manipulation ops for vectors
230   // with > 4 elements.
231   for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
232         MVT::v2i64, MVT::v2f64}) {
233     for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
234       switch (Op) {
235       case ISD::LOAD:
236       case ISD::STORE:
237       case ISD::BUILD_VECTOR:
238       case ISD::BITCAST:
239       case ISD::EXTRACT_VECTOR_ELT:
240       case ISD::INSERT_VECTOR_ELT:
241       case ISD::INSERT_SUBVECTOR:
242       case ISD::EXTRACT_SUBVECTOR:
243       case ISD::SCALAR_TO_VECTOR:
244         break;
245       case ISD::CONCAT_VECTORS:
246         setOperationAction(Op, VT, Custom);
247         break;
248       default:
249         setOperationAction(Op, VT, Expand);
250         break;
251       }
252     }
253   }
254 
255   // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
256   // is expanded to avoid having two separate loops in case the index is a VGPR.
257 
258   // Most operations are naturally 32-bit vector operations. We only support
259   // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
260   for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
261     setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
262     AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
263 
264     setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
265     AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
266 
267     setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
268     AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
269 
270     setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
271     AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
272   }
273 
274   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
275   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
276   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
277   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
278 
279   // Avoid stack access for these.
280   // TODO: Generalize to more vector types.
281   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
282   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
283   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
284   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
285 
286   // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
287   // and output demarshalling
288   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
289   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
290 
291   // We can't return success/failure, only the old value,
292   // let LLVM add the comparison
293   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
294   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
295 
296   if (getSubtarget()->hasFlatAddressSpace()) {
297     setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
298     setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
299   }
300 
301   setOperationAction(ISD::BSWAP, MVT::i32, Legal);
302   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
303 
304   // On SI this is s_memtime and s_memrealtime on VI.
305   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
306   setOperationAction(ISD::TRAP, MVT::Other, Custom);
307   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
308 
309   setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
310   setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
311 
312   if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) {
313     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
314     setOperationAction(ISD::FCEIL, MVT::f64, Legal);
315     setOperationAction(ISD::FRINT, MVT::f64, Legal);
316   }
317 
318   setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
319 
320   setOperationAction(ISD::FSIN, MVT::f32, Custom);
321   setOperationAction(ISD::FCOS, MVT::f32, Custom);
322   setOperationAction(ISD::FDIV, MVT::f32, Custom);
323   setOperationAction(ISD::FDIV, MVT::f64, Custom);
324 
325   if (Subtarget->has16BitInsts()) {
326     setOperationAction(ISD::Constant, MVT::i16, Legal);
327 
328     setOperationAction(ISD::SMIN, MVT::i16, Legal);
329     setOperationAction(ISD::SMAX, MVT::i16, Legal);
330 
331     setOperationAction(ISD::UMIN, MVT::i16, Legal);
332     setOperationAction(ISD::UMAX, MVT::i16, Legal);
333 
334     setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
335     AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
336 
337     setOperationAction(ISD::ROTR, MVT::i16, Promote);
338     setOperationAction(ISD::ROTL, MVT::i16, Promote);
339 
340     setOperationAction(ISD::SDIV, MVT::i16, Promote);
341     setOperationAction(ISD::UDIV, MVT::i16, Promote);
342     setOperationAction(ISD::SREM, MVT::i16, Promote);
343     setOperationAction(ISD::UREM, MVT::i16, Promote);
344 
345     setOperationAction(ISD::BSWAP, MVT::i16, Promote);
346     setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
347 
348     setOperationAction(ISD::CTTZ, MVT::i16, Promote);
349     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
350     setOperationAction(ISD::CTLZ, MVT::i16, Promote);
351     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
352 
353     setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
354 
355     setOperationAction(ISD::BR_CC, MVT::i16, Expand);
356 
357     setOperationAction(ISD::LOAD, MVT::i16, Custom);
358 
359     setTruncStoreAction(MVT::i64, MVT::i16, Expand);
360 
361     setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
362     AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
363     setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
364     AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
365 
366     setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
367     setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
368     setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
369     setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
370 
371     // F16 - Constant Actions.
372     setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
373 
374     // F16 - Load/Store Actions.
375     setOperationAction(ISD::LOAD, MVT::f16, Promote);
376     AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
377     setOperationAction(ISD::STORE, MVT::f16, Promote);
378     AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
379 
380     // F16 - VOP1 Actions.
381     setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
382     setOperationAction(ISD::FCOS, MVT::f16, Promote);
383     setOperationAction(ISD::FSIN, MVT::f16, Promote);
384     setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
385     setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
386     setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
387     setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
388     setOperationAction(ISD::FROUND, MVT::f16, Custom);
389 
390     // F16 - VOP2 Actions.
391     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
392     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
393     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
394     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
395     setOperationAction(ISD::FDIV, MVT::f16, Custom);
396 
397     // F16 - VOP3 Actions.
398     setOperationAction(ISD::FMA, MVT::f16, Legal);
399     if (!Subtarget->hasFP16Denormals())
400       setOperationAction(ISD::FMAD, MVT::f16, Legal);
401   }
402 
403   if (Subtarget->hasVOP3PInsts()) {
404     for (MVT VT : {MVT::v2i16, MVT::v2f16}) {
405       for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
406         switch (Op) {
407         case ISD::LOAD:
408         case ISD::STORE:
409         case ISD::BUILD_VECTOR:
410         case ISD::BITCAST:
411         case ISD::EXTRACT_VECTOR_ELT:
412         case ISD::INSERT_VECTOR_ELT:
413         case ISD::INSERT_SUBVECTOR:
414         case ISD::EXTRACT_SUBVECTOR:
415         case ISD::SCALAR_TO_VECTOR:
416           break;
417         case ISD::CONCAT_VECTORS:
418           setOperationAction(Op, VT, Custom);
419           break;
420         default:
421           setOperationAction(Op, VT, Expand);
422           break;
423         }
424       }
425     }
426 
427     // XXX - Do these do anything? Vector constants turn into build_vector.
428     setOperationAction(ISD::Constant, MVT::v2i16, Legal);
429     setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
430 
431     setOperationAction(ISD::STORE, MVT::v2i16, Promote);
432     AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
433     setOperationAction(ISD::STORE, MVT::v2f16, Promote);
434     AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
435 
436     setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
437     AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
438     setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
439     AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
440 
441     setOperationAction(ISD::AND, MVT::v2i16, Promote);
442     AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
443     setOperationAction(ISD::OR, MVT::v2i16, Promote);
444     AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
445     setOperationAction(ISD::XOR, MVT::v2i16, Promote);
446     AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
447     setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
448     AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
449     setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
450     AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
451 
452     setOperationAction(ISD::ADD, MVT::v2i16, Legal);
453     setOperationAction(ISD::SUB, MVT::v2i16, Legal);
454     setOperationAction(ISD::MUL, MVT::v2i16, Legal);
455     setOperationAction(ISD::SHL, MVT::v2i16, Legal);
456     setOperationAction(ISD::SRL, MVT::v2i16, Legal);
457     setOperationAction(ISD::SRA, MVT::v2i16, Legal);
458     setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
459     setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
460     setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
461     setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
462 
463     setOperationAction(ISD::FADD, MVT::v2f16, Legal);
464     setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
465     setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
466     setOperationAction(ISD::FMA, MVT::v2f16, Legal);
467     setOperationAction(ISD::FMINNUM, MVT::v2f16, Legal);
468     setOperationAction(ISD::FMAXNUM, MVT::v2f16, Legal);
469 
470     // This isn't really legal, but this avoids the legalizer unrolling it (and
471     // allows matching fneg (fabs x) patterns)
472     setOperationAction(ISD::FABS, MVT::v2f16, Legal);
473 
474     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
475     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
476 
477     setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
478     setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
479     setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
480     setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
481   } else {
482     setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
483     setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
484   }
485 
486   for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
487     setOperationAction(ISD::SELECT, VT, Custom);
488   }
489 
490   setTargetDAGCombine(ISD::ADD);
491   setTargetDAGCombine(ISD::ADDCARRY);
492   setTargetDAGCombine(ISD::SUB);
493   setTargetDAGCombine(ISD::SUBCARRY);
494   setTargetDAGCombine(ISD::FADD);
495   setTargetDAGCombine(ISD::FSUB);
496   setTargetDAGCombine(ISD::FMINNUM);
497   setTargetDAGCombine(ISD::FMAXNUM);
498   setTargetDAGCombine(ISD::SMIN);
499   setTargetDAGCombine(ISD::SMAX);
500   setTargetDAGCombine(ISD::UMIN);
501   setTargetDAGCombine(ISD::UMAX);
502   setTargetDAGCombine(ISD::SETCC);
503   setTargetDAGCombine(ISD::AND);
504   setTargetDAGCombine(ISD::OR);
505   setTargetDAGCombine(ISD::XOR);
506   setTargetDAGCombine(ISD::SINT_TO_FP);
507   setTargetDAGCombine(ISD::UINT_TO_FP);
508   setTargetDAGCombine(ISD::FCANONICALIZE);
509   setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
510   setTargetDAGCombine(ISD::ZERO_EXTEND);
511   setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
512   setTargetDAGCombine(ISD::BUILD_VECTOR);
513 
514   // All memory operations. Some folding on the pointer operand is done to help
515   // matching the constant offsets in the addressing modes.
516   setTargetDAGCombine(ISD::LOAD);
517   setTargetDAGCombine(ISD::STORE);
518   setTargetDAGCombine(ISD::ATOMIC_LOAD);
519   setTargetDAGCombine(ISD::ATOMIC_STORE);
520   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
521   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
522   setTargetDAGCombine(ISD::ATOMIC_SWAP);
523   setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
524   setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
525   setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
526   setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
527   setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
528   setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
529   setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
530   setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
531   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
532   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
533 
534   setSchedulingPreference(Sched::RegPressure);
535 }
536 
537 const SISubtarget *SITargetLowering::getSubtarget() const {
538   return static_cast<const SISubtarget *>(Subtarget);
539 }
540 
541 //===----------------------------------------------------------------------===//
542 // TargetLowering queries
543 //===----------------------------------------------------------------------===//
544 
545 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
546   // SI has some legal vector types, but no legal vector operations. Say no
547   // shuffles are legal in order to prefer scalarizing some vector operations.
548   return false;
549 }
550 
551 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
552                                           const CallInst &CI,
553                                           unsigned IntrID) const {
554   switch (IntrID) {
555   case Intrinsic::amdgcn_atomic_inc:
556   case Intrinsic::amdgcn_atomic_dec: {
557     Info.opc = ISD::INTRINSIC_W_CHAIN;
558     Info.memVT = MVT::getVT(CI.getType());
559     Info.ptrVal = CI.getOperand(0);
560     Info.align = 0;
561 
562     const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
563     Info.vol = !Vol || !Vol->isZero();
564     Info.readMem = true;
565     Info.writeMem = true;
566     return true;
567   }
568   default:
569     return false;
570   }
571 }
572 
573 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
574                                             SmallVectorImpl<Value*> &Ops,
575                                             Type *&AccessTy) const {
576   switch (II->getIntrinsicID()) {
577   case Intrinsic::amdgcn_atomic_inc:
578   case Intrinsic::amdgcn_atomic_dec: {
579     Value *Ptr = II->getArgOperand(0);
580     AccessTy = II->getType();
581     Ops.push_back(Ptr);
582     return true;
583   }
584   default:
585     return false;
586   }
587 }
588 
589 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
590   if (!Subtarget->hasFlatInstOffsets()) {
591     // Flat instructions do not have offsets, and only have the register
592     // address.
593     return AM.BaseOffs == 0 && AM.Scale == 0;
594   }
595 
596   // GFX9 added a 13-bit signed offset. When using regular flat instructions,
597   // the sign bit is ignored and is treated as a 12-bit unsigned offset.
598 
599   // Just r + i
600   return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
601 }
602 
603 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
604   if (Subtarget->hasFlatGlobalInsts())
605     return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
606 
607   if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
608       // Assume the we will use FLAT for all global memory accesses
609       // on VI.
610       // FIXME: This assumption is currently wrong.  On VI we still use
611       // MUBUF instructions for the r + i addressing mode.  As currently
612       // implemented, the MUBUF instructions only work on buffer < 4GB.
613       // It may be possible to support > 4GB buffers with MUBUF instructions,
614       // by setting the stride value in the resource descriptor which would
615       // increase the size limit to (stride * 4GB).  However, this is risky,
616       // because it has never been validated.
617     return isLegalFlatAddressingMode(AM);
618   }
619 
620   return isLegalMUBUFAddressingMode(AM);
621 }
622 
623 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
624   // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
625   // additionally can do r + r + i with addr64. 32-bit has more addressing
626   // mode options. Depending on the resource constant, it can also do
627   // (i64 r0) + (i32 r1) * (i14 i).
628   //
629   // Private arrays end up using a scratch buffer most of the time, so also
630   // assume those use MUBUF instructions. Scratch loads / stores are currently
631   // implemented as mubuf instructions with offen bit set, so slightly
632   // different than the normal addr64.
633   if (!isUInt<12>(AM.BaseOffs))
634     return false;
635 
636   // FIXME: Since we can split immediate into soffset and immediate offset,
637   // would it make sense to allow any immediate?
638 
639   switch (AM.Scale) {
640   case 0: // r + i or just i, depending on HasBaseReg.
641     return true;
642   case 1:
643     return true; // We have r + r or r + i.
644   case 2:
645     if (AM.HasBaseReg) {
646       // Reject 2 * r + r.
647       return false;
648     }
649 
650     // Allow 2 * r as r + r
651     // Or  2 * r + i is allowed as r + r + i.
652     return true;
653   default: // Don't allow n * r
654     return false;
655   }
656 }
657 
658 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
659                                              const AddrMode &AM, Type *Ty,
660                                              unsigned AS, Instruction *I) const {
661   // No global is ever allowed as a base.
662   if (AM.BaseGV)
663     return false;
664 
665   if (AS == AMDGPUASI.GLOBAL_ADDRESS)
666     return isLegalGlobalAddressingMode(AM);
667 
668   if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
669     // If the offset isn't a multiple of 4, it probably isn't going to be
670     // correctly aligned.
671     // FIXME: Can we get the real alignment here?
672     if (AM.BaseOffs % 4 != 0)
673       return isLegalMUBUFAddressingMode(AM);
674 
675     // There are no SMRD extloads, so if we have to do a small type access we
676     // will use a MUBUF load.
677     // FIXME?: We also need to do this if unaligned, but we don't know the
678     // alignment here.
679     if (DL.getTypeStoreSize(Ty) < 4)
680       return isLegalGlobalAddressingMode(AM);
681 
682     if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
683       // SMRD instructions have an 8-bit, dword offset on SI.
684       if (!isUInt<8>(AM.BaseOffs / 4))
685         return false;
686     } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) {
687       // On CI+, this can also be a 32-bit literal constant offset. If it fits
688       // in 8-bits, it can use a smaller encoding.
689       if (!isUInt<32>(AM.BaseOffs / 4))
690         return false;
691     } else if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
692       // On VI, these use the SMEM format and the offset is 20-bit in bytes.
693       if (!isUInt<20>(AM.BaseOffs))
694         return false;
695     } else
696       llvm_unreachable("unhandled generation");
697 
698     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
699       return true;
700 
701     if (AM.Scale == 1 && AM.HasBaseReg)
702       return true;
703 
704     return false;
705 
706   } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
707     return isLegalMUBUFAddressingMode(AM);
708   } else if (AS == AMDGPUASI.LOCAL_ADDRESS ||
709              AS == AMDGPUASI.REGION_ADDRESS) {
710     // Basic, single offset DS instructions allow a 16-bit unsigned immediate
711     // field.
712     // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
713     // an 8-bit dword offset but we don't know the alignment here.
714     if (!isUInt<16>(AM.BaseOffs))
715       return false;
716 
717     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
718       return true;
719 
720     if (AM.Scale == 1 && AM.HasBaseReg)
721       return true;
722 
723     return false;
724   } else if (AS == AMDGPUASI.FLAT_ADDRESS ||
725              AS == AMDGPUASI.UNKNOWN_ADDRESS_SPACE) {
726     // For an unknown address space, this usually means that this is for some
727     // reason being used for pure arithmetic, and not based on some addressing
728     // computation. We don't have instructions that compute pointers with any
729     // addressing modes, so treat them as having no offset like flat
730     // instructions.
731     return isLegalFlatAddressingMode(AM);
732   } else {
733     llvm_unreachable("unhandled address space");
734   }
735 }
736 
737 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
738                                         const SelectionDAG &DAG) const {
739   if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) {
740     return (MemVT.getSizeInBits() <= 4 * 32);
741   } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
742     unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
743     return (MemVT.getSizeInBits() <= MaxPrivateBits);
744   } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
745     return (MemVT.getSizeInBits() <= 2 * 32);
746   }
747   return true;
748 }
749 
750 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
751                                                       unsigned AddrSpace,
752                                                       unsigned Align,
753                                                       bool *IsFast) const {
754   if (IsFast)
755     *IsFast = false;
756 
757   // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
758   // which isn't a simple VT.
759   // Until MVT is extended to handle this, simply check for the size and
760   // rely on the condition below: allow accesses if the size is a multiple of 4.
761   if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
762                            VT.getStoreSize() > 16)) {
763     return false;
764   }
765 
766   if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS ||
767       AddrSpace == AMDGPUASI.REGION_ADDRESS) {
768     // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
769     // aligned, 8 byte access in a single operation using ds_read2/write2_b32
770     // with adjacent offsets.
771     bool AlignedBy4 = (Align % 4 == 0);
772     if (IsFast)
773       *IsFast = AlignedBy4;
774 
775     return AlignedBy4;
776   }
777 
778   // FIXME: We have to be conservative here and assume that flat operations
779   // will access scratch.  If we had access to the IR function, then we
780   // could determine if any private memory was used in the function.
781   if (!Subtarget->hasUnalignedScratchAccess() &&
782       (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS ||
783        AddrSpace == AMDGPUASI.FLAT_ADDRESS)) {
784     return false;
785   }
786 
787   if (Subtarget->hasUnalignedBufferAccess()) {
788     // If we have an uniform constant load, it still requires using a slow
789     // buffer instruction if unaligned.
790     if (IsFast) {
791       *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS) ?
792         (Align % 4 == 0) : true;
793     }
794 
795     return true;
796   }
797 
798   // Smaller than dword value must be aligned.
799   if (VT.bitsLT(MVT::i32))
800     return false;
801 
802   // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
803   // byte-address are ignored, thus forcing Dword alignment.
804   // This applies to private, global, and constant memory.
805   if (IsFast)
806     *IsFast = true;
807 
808   return VT.bitsGT(MVT::i32) && Align % 4 == 0;
809 }
810 
811 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
812                                           unsigned SrcAlign, bool IsMemset,
813                                           bool ZeroMemset,
814                                           bool MemcpyStrSrc,
815                                           MachineFunction &MF) const {
816   // FIXME: Should account for address space here.
817 
818   // The default fallback uses the private pointer size as a guess for a type to
819   // use. Make sure we switch these to 64-bit accesses.
820 
821   if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
822     return MVT::v4i32;
823 
824   if (Size >= 8 && DstAlign >= 4)
825     return MVT::v2i32;
826 
827   // Use the default.
828   return MVT::Other;
829 }
830 
831 static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) {
832   return AS == AMDGPUASI.GLOBAL_ADDRESS ||
833          AS == AMDGPUASI.FLAT_ADDRESS ||
834          AS == AMDGPUASI.CONSTANT_ADDRESS;
835 }
836 
837 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
838                                            unsigned DestAS) const {
839   return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) &&
840          isFlatGlobalAddrSpace(DestAS, AMDGPUASI);
841 }
842 
843 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
844   const MemSDNode *MemNode = cast<MemSDNode>(N);
845   const Value *Ptr = MemNode->getMemOperand()->getValue();
846   const Instruction *I = dyn_cast<Instruction>(Ptr);
847   return I && I->getMetadata("amdgpu.noclobber");
848 }
849 
850 bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
851                                             unsigned DestAS) const {
852   // Flat -> private/local is a simple truncate.
853   // Flat -> global is no-op
854   if (SrcAS == AMDGPUASI.FLAT_ADDRESS)
855     return true;
856 
857   return isNoopAddrSpaceCast(SrcAS, DestAS);
858 }
859 
860 bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
861   const MemSDNode *MemNode = cast<MemSDNode>(N);
862 
863   return AMDGPU::isUniformMMO(MemNode->getMemOperand());
864 }
865 
866 TargetLoweringBase::LegalizeTypeAction
867 SITargetLowering::getPreferredVectorAction(EVT VT) const {
868   if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
869     return TypeSplitVector;
870 
871   return TargetLoweringBase::getPreferredVectorAction(VT);
872 }
873 
874 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
875                                                          Type *Ty) const {
876   // FIXME: Could be smarter if called for vector constants.
877   return true;
878 }
879 
880 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
881   if (Subtarget->has16BitInsts() && VT == MVT::i16) {
882     switch (Op) {
883     case ISD::LOAD:
884     case ISD::STORE:
885 
886     // These operations are done with 32-bit instructions anyway.
887     case ISD::AND:
888     case ISD::OR:
889     case ISD::XOR:
890     case ISD::SELECT:
891       // TODO: Extensions?
892       return true;
893     default:
894       return false;
895     }
896   }
897 
898   // SimplifySetCC uses this function to determine whether or not it should
899   // create setcc with i1 operands.  We don't have instructions for i1 setcc.
900   if (VT == MVT::i1 && Op == ISD::SETCC)
901     return false;
902 
903   return TargetLowering::isTypeDesirableForOp(Op, VT);
904 }
905 
906 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
907                                                    const SDLoc &SL,
908                                                    SDValue Chain,
909                                                    uint64_t Offset) const {
910   const DataLayout &DL = DAG.getDataLayout();
911   MachineFunction &MF = DAG.getMachineFunction();
912   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
913 
914   const ArgDescriptor *InputPtrReg;
915   const TargetRegisterClass *RC;
916 
917   std::tie(InputPtrReg, RC)
918     = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
919 
920   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
921   MVT PtrVT = getPointerTy(DL, AMDGPUASI.CONSTANT_ADDRESS);
922   SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
923     MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
924 
925   return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
926                      DAG.getConstant(Offset, SL, PtrVT));
927 }
928 
929 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
930                                             const SDLoc &SL) const {
931   auto MFI = DAG.getMachineFunction().getInfo<SIMachineFunctionInfo>();
932   uint64_t Offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT);
933   return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
934 }
935 
936 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
937                                          const SDLoc &SL, SDValue Val,
938                                          bool Signed,
939                                          const ISD::InputArg *Arg) const {
940   if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
941       VT.bitsLT(MemVT)) {
942     unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
943     Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
944   }
945 
946   if (MemVT.isFloatingPoint())
947     Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
948   else if (Signed)
949     Val = DAG.getSExtOrTrunc(Val, SL, VT);
950   else
951     Val = DAG.getZExtOrTrunc(Val, SL, VT);
952 
953   return Val;
954 }
955 
956 SDValue SITargetLowering::lowerKernargMemParameter(
957   SelectionDAG &DAG, EVT VT, EVT MemVT,
958   const SDLoc &SL, SDValue Chain,
959   uint64_t Offset, bool Signed,
960   const ISD::InputArg *Arg) const {
961   const DataLayout &DL = DAG.getDataLayout();
962   Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
963   PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
964   MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
965 
966   unsigned Align = DL.getABITypeAlignment(Ty);
967 
968   SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
969   SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
970                              MachineMemOperand::MONonTemporal |
971                              MachineMemOperand::MODereferenceable |
972                              MachineMemOperand::MOInvariant);
973 
974   SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
975   return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
976 }
977 
978 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
979                                               const SDLoc &SL, SDValue Chain,
980                                               const ISD::InputArg &Arg) const {
981   MachineFunction &MF = DAG.getMachineFunction();
982   MachineFrameInfo &MFI = MF.getFrameInfo();
983 
984   if (Arg.Flags.isByVal()) {
985     unsigned Size = Arg.Flags.getByValSize();
986     int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
987     return DAG.getFrameIndex(FrameIdx, MVT::i32);
988   }
989 
990   unsigned ArgOffset = VA.getLocMemOffset();
991   unsigned ArgSize = VA.getValVT().getStoreSize();
992 
993   int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
994 
995   // Create load nodes to retrieve arguments from the stack.
996   SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
997   SDValue ArgValue;
998 
999   // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1000   ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1001   MVT MemVT = VA.getValVT();
1002 
1003   switch (VA.getLocInfo()) {
1004   default:
1005     break;
1006   case CCValAssign::BCvt:
1007     MemVT = VA.getLocVT();
1008     break;
1009   case CCValAssign::SExt:
1010     ExtType = ISD::SEXTLOAD;
1011     break;
1012   case CCValAssign::ZExt:
1013     ExtType = ISD::ZEXTLOAD;
1014     break;
1015   case CCValAssign::AExt:
1016     ExtType = ISD::EXTLOAD;
1017     break;
1018   }
1019 
1020   ArgValue = DAG.getExtLoad(
1021     ExtType, SL, VA.getLocVT(), Chain, FIN,
1022     MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1023     MemVT);
1024   return ArgValue;
1025 }
1026 
1027 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1028   const SIMachineFunctionInfo &MFI,
1029   EVT VT,
1030   AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1031   const ArgDescriptor *Reg;
1032   const TargetRegisterClass *RC;
1033 
1034   std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1035   return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1036 }
1037 
1038 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1039                                    CallingConv::ID CallConv,
1040                                    ArrayRef<ISD::InputArg> Ins,
1041                                    BitVector &Skipped,
1042                                    FunctionType *FType,
1043                                    SIMachineFunctionInfo *Info) {
1044   for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1045     const ISD::InputArg &Arg = Ins[I];
1046 
1047     // First check if it's a PS input addr.
1048     if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() &&
1049         !Arg.Flags.isByVal() && PSInputNum <= 15) {
1050 
1051       if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) {
1052         // We can safely skip PS inputs.
1053         Skipped.set(I);
1054         ++PSInputNum;
1055         continue;
1056       }
1057 
1058       Info->markPSInputAllocated(PSInputNum);
1059       if (Arg.Used)
1060         Info->markPSInputEnabled(PSInputNum);
1061 
1062       ++PSInputNum;
1063     }
1064 
1065     // Second split vertices into their elements.
1066     if (Arg.VT.isVector()) {
1067       ISD::InputArg NewArg = Arg;
1068       NewArg.Flags.setSplit();
1069       NewArg.VT = Arg.VT.getVectorElementType();
1070 
1071       // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
1072       // three or five element vertex only needs three or five registers,
1073       // NOT four or eight.
1074       Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
1075       unsigned NumElements = ParamType->getVectorNumElements();
1076 
1077       for (unsigned J = 0; J != NumElements; ++J) {
1078         Splits.push_back(NewArg);
1079         NewArg.PartOffset += NewArg.VT.getStoreSize();
1080       }
1081     } else {
1082       Splits.push_back(Arg);
1083     }
1084   }
1085 }
1086 
1087 // Allocate special inputs passed in VGPRs.
1088 static void allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1089                                            MachineFunction &MF,
1090                                            const SIRegisterInfo &TRI,
1091                                            SIMachineFunctionInfo &Info) {
1092   if (Info.hasWorkItemIDX()) {
1093     unsigned Reg = AMDGPU::VGPR0;
1094     MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1095 
1096     CCInfo.AllocateReg(Reg);
1097     Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1098   }
1099 
1100   if (Info.hasWorkItemIDY()) {
1101     unsigned Reg = AMDGPU::VGPR1;
1102     MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1103 
1104     CCInfo.AllocateReg(Reg);
1105     Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1106   }
1107 
1108   if (Info.hasWorkItemIDZ()) {
1109     unsigned Reg = AMDGPU::VGPR2;
1110     MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1111 
1112     CCInfo.AllocateReg(Reg);
1113     Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1114   }
1115 }
1116 
1117 // Try to allocate a VGPR at the end of the argument list, or if no argument
1118 // VGPRs are left allocating a stack slot.
1119 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo) {
1120   ArrayRef<MCPhysReg> ArgVGPRs
1121     = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1122   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1123   if (RegIdx == ArgVGPRs.size()) {
1124     // Spill to stack required.
1125     int64_t Offset = CCInfo.AllocateStack(4, 4);
1126 
1127     return ArgDescriptor::createStack(Offset);
1128   }
1129 
1130   unsigned Reg = ArgVGPRs[RegIdx];
1131   Reg = CCInfo.AllocateReg(Reg);
1132   assert(Reg != AMDGPU::NoRegister);
1133 
1134   MachineFunction &MF = CCInfo.getMachineFunction();
1135   MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1136   return ArgDescriptor::createRegister(Reg);
1137 }
1138 
1139 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1140                                              const TargetRegisterClass *RC,
1141                                              unsigned NumArgRegs) {
1142   ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1143   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1144   if (RegIdx == ArgSGPRs.size())
1145     report_fatal_error("ran out of SGPRs for arguments");
1146 
1147   unsigned Reg = ArgSGPRs[RegIdx];
1148   Reg = CCInfo.AllocateReg(Reg);
1149   assert(Reg != AMDGPU::NoRegister);
1150 
1151   MachineFunction &MF = CCInfo.getMachineFunction();
1152   MF.addLiveIn(Reg, RC);
1153   return ArgDescriptor::createRegister(Reg);
1154 }
1155 
1156 static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1157   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1158 }
1159 
1160 static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1161   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1162 }
1163 
1164 static void allocateSpecialInputVGPRs(CCState &CCInfo,
1165                                       MachineFunction &MF,
1166                                       const SIRegisterInfo &TRI,
1167                                       SIMachineFunctionInfo &Info) {
1168   if (Info.hasWorkItemIDX())
1169     Info.setWorkItemIDX(allocateVGPR32Input(CCInfo));
1170 
1171   if (Info.hasWorkItemIDY())
1172     Info.setWorkItemIDY(allocateVGPR32Input(CCInfo));
1173 
1174   if (Info.hasWorkItemIDZ())
1175     Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo));
1176 }
1177 
1178 static void allocateSpecialInputSGPRs(CCState &CCInfo,
1179                                       MachineFunction &MF,
1180                                       const SIRegisterInfo &TRI,
1181                                       SIMachineFunctionInfo &Info) {
1182   auto &ArgInfo = Info.getArgInfo();
1183 
1184   // TODO: Unify handling with private memory pointers.
1185 
1186   if (Info.hasDispatchPtr())
1187     ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1188 
1189   if (Info.hasQueuePtr())
1190     ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1191 
1192   if (Info.hasKernargSegmentPtr())
1193     ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1194 
1195   if (Info.hasDispatchID())
1196     ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1197 
1198   // flat_scratch_init is not applicable for non-kernel functions.
1199 
1200   if (Info.hasWorkGroupIDX())
1201     ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1202 
1203   if (Info.hasWorkGroupIDY())
1204     ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1205 
1206   if (Info.hasWorkGroupIDZ())
1207     ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
1208 
1209   if (Info.hasImplicitArgPtr())
1210     ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
1211 }
1212 
1213 // Allocate special inputs passed in user SGPRs.
1214 static void allocateHSAUserSGPRs(CCState &CCInfo,
1215                                  MachineFunction &MF,
1216                                  const SIRegisterInfo &TRI,
1217                                  SIMachineFunctionInfo &Info) {
1218   if (Info.hasImplicitBufferPtr()) {
1219     unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1220     MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1221     CCInfo.AllocateReg(ImplicitBufferPtrReg);
1222   }
1223 
1224   // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1225   if (Info.hasPrivateSegmentBuffer()) {
1226     unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1227     MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1228     CCInfo.AllocateReg(PrivateSegmentBufferReg);
1229   }
1230 
1231   if (Info.hasDispatchPtr()) {
1232     unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1233     MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1234     CCInfo.AllocateReg(DispatchPtrReg);
1235   }
1236 
1237   if (Info.hasQueuePtr()) {
1238     unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1239     MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1240     CCInfo.AllocateReg(QueuePtrReg);
1241   }
1242 
1243   if (Info.hasKernargSegmentPtr()) {
1244     unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1245     MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1246     CCInfo.AllocateReg(InputPtrReg);
1247   }
1248 
1249   if (Info.hasDispatchID()) {
1250     unsigned DispatchIDReg = Info.addDispatchID(TRI);
1251     MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1252     CCInfo.AllocateReg(DispatchIDReg);
1253   }
1254 
1255   if (Info.hasFlatScratchInit()) {
1256     unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1257     MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1258     CCInfo.AllocateReg(FlatScratchInitReg);
1259   }
1260 
1261   // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1262   // these from the dispatch pointer.
1263 }
1264 
1265 // Allocate special input registers that are initialized per-wave.
1266 static void allocateSystemSGPRs(CCState &CCInfo,
1267                                 MachineFunction &MF,
1268                                 SIMachineFunctionInfo &Info,
1269                                 CallingConv::ID CallConv,
1270                                 bool IsShader) {
1271   if (Info.hasWorkGroupIDX()) {
1272     unsigned Reg = Info.addWorkGroupIDX();
1273     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1274     CCInfo.AllocateReg(Reg);
1275   }
1276 
1277   if (Info.hasWorkGroupIDY()) {
1278     unsigned Reg = Info.addWorkGroupIDY();
1279     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1280     CCInfo.AllocateReg(Reg);
1281   }
1282 
1283   if (Info.hasWorkGroupIDZ()) {
1284     unsigned Reg = Info.addWorkGroupIDZ();
1285     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1286     CCInfo.AllocateReg(Reg);
1287   }
1288 
1289   if (Info.hasWorkGroupInfo()) {
1290     unsigned Reg = Info.addWorkGroupInfo();
1291     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1292     CCInfo.AllocateReg(Reg);
1293   }
1294 
1295   if (Info.hasPrivateSegmentWaveByteOffset()) {
1296     // Scratch wave offset passed in system SGPR.
1297     unsigned PrivateSegmentWaveByteOffsetReg;
1298 
1299     if (IsShader) {
1300       PrivateSegmentWaveByteOffsetReg =
1301         Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1302 
1303       // This is true if the scratch wave byte offset doesn't have a fixed
1304       // location.
1305       if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1306         PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1307         Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1308       }
1309     } else
1310       PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1311 
1312     MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1313     CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1314   }
1315 }
1316 
1317 static void reservePrivateMemoryRegs(const TargetMachine &TM,
1318                                      MachineFunction &MF,
1319                                      const SIRegisterInfo &TRI,
1320                                      SIMachineFunctionInfo &Info) {
1321   // Now that we've figured out where the scratch register inputs are, see if
1322   // should reserve the arguments and use them directly.
1323   MachineFrameInfo &MFI = MF.getFrameInfo();
1324   bool HasStackObjects = MFI.hasStackObjects();
1325 
1326   // Record that we know we have non-spill stack objects so we don't need to
1327   // check all stack objects later.
1328   if (HasStackObjects)
1329     Info.setHasNonSpillStackObjects(true);
1330 
1331   // Everything live out of a block is spilled with fast regalloc, so it's
1332   // almost certain that spilling will be required.
1333   if (TM.getOptLevel() == CodeGenOpt::None)
1334     HasStackObjects = true;
1335 
1336   // For now assume stack access is needed in any callee functions, so we need
1337   // the scratch registers to pass in.
1338   bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1339 
1340   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1341   if (ST.isAmdCodeObjectV2(MF)) {
1342     if (RequiresStackAccess) {
1343       // If we have stack objects, we unquestionably need the private buffer
1344       // resource. For the Code Object V2 ABI, this will be the first 4 user
1345       // SGPR inputs. We can reserve those and use them directly.
1346 
1347       unsigned PrivateSegmentBufferReg = Info.getPreloadedReg(
1348         AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
1349       Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1350 
1351       if (MFI.hasCalls()) {
1352         // If we have calls, we need to keep the frame register in a register
1353         // that won't be clobbered by a call, so ensure it is copied somewhere.
1354 
1355         // This is not a problem for the scratch wave offset, because the same
1356         // registers are reserved in all functions.
1357 
1358         // FIXME: Nothing is really ensuring this is a call preserved register,
1359         // it's just selected from the end so it happens to be.
1360         unsigned ReservedOffsetReg
1361           = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1362         Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1363       } else {
1364         unsigned PrivateSegmentWaveByteOffsetReg = Info.getPreloadedReg(
1365           AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1366         Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
1367       }
1368     } else {
1369       unsigned ReservedBufferReg
1370         = TRI.reservedPrivateSegmentBufferReg(MF);
1371       unsigned ReservedOffsetReg
1372         = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1373 
1374       // We tentatively reserve the last registers (skipping the last two
1375       // which may contain VCC). After register allocation, we'll replace
1376       // these with the ones immediately after those which were really
1377       // allocated. In the prologue copies will be inserted from the argument
1378       // to these reserved registers.
1379       Info.setScratchRSrcReg(ReservedBufferReg);
1380       Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1381     }
1382   } else {
1383     unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1384 
1385     // Without HSA, relocations are used for the scratch pointer and the
1386     // buffer resource setup is always inserted in the prologue. Scratch wave
1387     // offset is still in an input SGPR.
1388     Info.setScratchRSrcReg(ReservedBufferReg);
1389 
1390     if (HasStackObjects && !MFI.hasCalls()) {
1391       unsigned ScratchWaveOffsetReg = Info.getPreloadedReg(
1392         AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1393       Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg);
1394     } else {
1395       unsigned ReservedOffsetReg
1396         = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1397       Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1398     }
1399   }
1400 }
1401 
1402 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1403   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1404   return !Info->isEntryFunction();
1405 }
1406 
1407 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1408 
1409 }
1410 
1411 void SITargetLowering::insertCopiesSplitCSR(
1412   MachineBasicBlock *Entry,
1413   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1414   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1415 
1416   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1417   if (!IStart)
1418     return;
1419 
1420   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1421   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1422   MachineBasicBlock::iterator MBBI = Entry->begin();
1423   for (const MCPhysReg *I = IStart; *I; ++I) {
1424     const TargetRegisterClass *RC = nullptr;
1425     if (AMDGPU::SReg_64RegClass.contains(*I))
1426       RC = &AMDGPU::SGPR_64RegClass;
1427     else if (AMDGPU::SReg_32RegClass.contains(*I))
1428       RC = &AMDGPU::SGPR_32RegClass;
1429     else
1430       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1431 
1432     unsigned NewVR = MRI->createVirtualRegister(RC);
1433     // Create copy from CSR to a virtual register.
1434     Entry->addLiveIn(*I);
1435     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1436       .addReg(*I);
1437 
1438     // Insert the copy-back instructions right before the terminator.
1439     for (auto *Exit : Exits)
1440       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1441               TII->get(TargetOpcode::COPY), *I)
1442         .addReg(NewVR);
1443   }
1444 }
1445 
1446 SDValue SITargetLowering::LowerFormalArguments(
1447     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1448     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1449     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1450   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1451 
1452   MachineFunction &MF = DAG.getMachineFunction();
1453   FunctionType *FType = MF.getFunction()->getFunctionType();
1454   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1455   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1456 
1457   if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
1458     const Function *Fn = MF.getFunction();
1459     DiagnosticInfoUnsupported NoGraphicsHSA(
1460         *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
1461     DAG.getContext()->diagnose(NoGraphicsHSA);
1462     return DAG.getEntryNode();
1463   }
1464 
1465   // Create stack objects that are used for emitting debugger prologue if
1466   // "amdgpu-debugger-emit-prologue" attribute was specified.
1467   if (ST.debuggerEmitPrologue())
1468     createDebuggerPrologueStackObjects(MF);
1469 
1470   SmallVector<ISD::InputArg, 16> Splits;
1471   SmallVector<CCValAssign, 16> ArgLocs;
1472   BitVector Skipped(Ins.size());
1473   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1474                  *DAG.getContext());
1475 
1476   bool IsShader = AMDGPU::isShader(CallConv);
1477   bool IsKernel = AMDGPU::isKernel(CallConv);
1478   bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
1479 
1480   if (!IsEntryFunc) {
1481     // 4 bytes are reserved at offset 0 for the emergency stack slot. Skip over
1482     // this when allocating argument fixed offsets.
1483     CCInfo.AllocateStack(4, 4);
1484   }
1485 
1486   if (IsShader) {
1487     processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1488 
1489     // At least one interpolation mode must be enabled or else the GPU will
1490     // hang.
1491     //
1492     // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1493     // set PSInputAddr, the user wants to enable some bits after the compilation
1494     // based on run-time states. Since we can't know what the final PSInputEna
1495     // will look like, so we shouldn't do anything here and the user should take
1496     // responsibility for the correct programming.
1497     //
1498     // Otherwise, the following restrictions apply:
1499     // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1500     // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1501     //   enabled too.
1502     if (CallConv == CallingConv::AMDGPU_PS) {
1503       if ((Info->getPSInputAddr() & 0x7F) == 0 ||
1504            ((Info->getPSInputAddr() & 0xF) == 0 &&
1505             Info->isPSInputAllocated(11))) {
1506         CCInfo.AllocateReg(AMDGPU::VGPR0);
1507         CCInfo.AllocateReg(AMDGPU::VGPR1);
1508         Info->markPSInputAllocated(0);
1509         Info->markPSInputEnabled(0);
1510       }
1511       if (Subtarget->isAmdPalOS()) {
1512         // For isAmdPalOS, the user does not enable some bits after compilation
1513         // based on run-time states; the register values being generated here are
1514         // the final ones set in hardware. Therefore we need to apply the
1515         // workaround to PSInputAddr and PSInputEnable together.  (The case where
1516         // a bit is set in PSInputAddr but not PSInputEnable is where the
1517         // frontend set up an input arg for a particular interpolation mode, but
1518         // nothing uses that input arg. Really we should have an earlier pass
1519         // that removes such an arg.)
1520         unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
1521         if ((PsInputBits & 0x7F) == 0 ||
1522             ((PsInputBits & 0xF) == 0 &&
1523              (PsInputBits >> 11 & 1)))
1524           Info->markPSInputEnabled(
1525               countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
1526       }
1527     }
1528 
1529     assert(!Info->hasDispatchPtr() &&
1530            !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1531            !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1532            !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1533            !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1534            !Info->hasWorkItemIDZ());
1535   } else if (IsKernel) {
1536     assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
1537   } else {
1538     Splits.append(Ins.begin(), Ins.end());
1539   }
1540 
1541   if (IsEntryFunc) {
1542     allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
1543     allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
1544   }
1545 
1546   if (IsKernel) {
1547     analyzeFormalArgumentsCompute(CCInfo, Ins);
1548   } else {
1549     CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
1550     CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
1551   }
1552 
1553   SmallVector<SDValue, 16> Chains;
1554 
1555   for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
1556     const ISD::InputArg &Arg = Ins[i];
1557     if (Skipped[i]) {
1558       InVals.push_back(DAG.getUNDEF(Arg.VT));
1559       continue;
1560     }
1561 
1562     CCValAssign &VA = ArgLocs[ArgIdx++];
1563     MVT VT = VA.getLocVT();
1564 
1565     if (IsEntryFunc && VA.isMemLoc()) {
1566       VT = Ins[i].VT;
1567       EVT MemVT = VA.getLocVT();
1568 
1569       const uint64_t Offset = Subtarget->getExplicitKernelArgOffset(MF) +
1570         VA.getLocMemOffset();
1571       Info->setABIArgOffset(Offset + MemVT.getStoreSize());
1572 
1573       // The first 36 bytes of the input buffer contains information about
1574       // thread group and global sizes.
1575       SDValue Arg = lowerKernargMemParameter(
1576         DAG, VT, MemVT, DL, Chain, Offset, Ins[i].Flags.isSExt(), &Ins[i]);
1577       Chains.push_back(Arg.getValue(1));
1578 
1579       auto *ParamTy =
1580         dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
1581       if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
1582           ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
1583         // On SI local pointers are just offsets into LDS, so they are always
1584         // less than 16-bits.  On CI and newer they could potentially be
1585         // real pointers, so we can't guarantee their size.
1586         Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
1587                           DAG.getValueType(MVT::i16));
1588       }
1589 
1590       InVals.push_back(Arg);
1591       continue;
1592     } else if (!IsEntryFunc && VA.isMemLoc()) {
1593       SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
1594       InVals.push_back(Val);
1595       if (!Arg.Flags.isByVal())
1596         Chains.push_back(Val.getValue(1));
1597       continue;
1598     }
1599 
1600     assert(VA.isRegLoc() && "Parameter must be in a register!");
1601 
1602     unsigned Reg = VA.getLocReg();
1603     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
1604     EVT ValVT = VA.getValVT();
1605 
1606     Reg = MF.addLiveIn(Reg, RC);
1607     SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1608 
1609     if (Arg.Flags.isSRet() && !getSubtarget()->enableHugePrivateBuffer()) {
1610       // The return object should be reasonably addressable.
1611 
1612       // FIXME: This helps when the return is a real sret. If it is a
1613       // automatically inserted sret (i.e. CanLowerReturn returns false), an
1614       // extra copy is inserted in SelectionDAGBuilder which obscures this.
1615       unsigned NumBits = 32 - AssumeFrameIndexHighZeroBits;
1616       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1617         DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
1618     }
1619 
1620     // If this is an 8 or 16-bit value, it is really passed promoted
1621     // to 32 bits. Insert an assert[sz]ext to capture this, then
1622     // truncate to the right size.
1623     switch (VA.getLocInfo()) {
1624     case CCValAssign::Full:
1625       break;
1626     case CCValAssign::BCvt:
1627       Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
1628       break;
1629     case CCValAssign::SExt:
1630       Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
1631                         DAG.getValueType(ValVT));
1632       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1633       break;
1634     case CCValAssign::ZExt:
1635       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1636                         DAG.getValueType(ValVT));
1637       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1638       break;
1639     case CCValAssign::AExt:
1640       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1641       break;
1642     default:
1643       llvm_unreachable("Unknown loc info!");
1644     }
1645 
1646     if (IsShader && Arg.VT.isVector()) {
1647       // Build a vector from the registers
1648       Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
1649       unsigned NumElements = ParamType->getVectorNumElements();
1650 
1651       SmallVector<SDValue, 4> Regs;
1652       Regs.push_back(Val);
1653       for (unsigned j = 1; j != NumElements; ++j) {
1654         Reg = ArgLocs[ArgIdx++].getLocReg();
1655         Reg = MF.addLiveIn(Reg, RC);
1656 
1657         SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1658         Regs.push_back(Copy);
1659       }
1660 
1661       // Fill up the missing vector elements
1662       NumElements = Arg.VT.getVectorNumElements() - NumElements;
1663       Regs.append(NumElements, DAG.getUNDEF(VT));
1664 
1665       InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs));
1666       continue;
1667     }
1668 
1669     InVals.push_back(Val);
1670   }
1671 
1672   if (!IsEntryFunc) {
1673     // Special inputs come after user arguments.
1674     allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
1675   }
1676 
1677   // Start adding system SGPRs.
1678   if (IsEntryFunc) {
1679     allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
1680   } else {
1681     CCInfo.AllocateReg(Info->getScratchRSrcReg());
1682     CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
1683     CCInfo.AllocateReg(Info->getFrameOffsetReg());
1684     allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
1685   }
1686 
1687   auto &ArgUsageInfo =
1688     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
1689   ArgUsageInfo.setFuncArgInfo(*MF.getFunction(), Info->getArgInfo());
1690 
1691   unsigned StackArgSize = CCInfo.getNextStackOffset();
1692   Info->setBytesInStackArgArea(StackArgSize);
1693 
1694   return Chains.empty() ? Chain :
1695     DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1696 }
1697 
1698 // TODO: If return values can't fit in registers, we should return as many as
1699 // possible in registers before passing on stack.
1700 bool SITargetLowering::CanLowerReturn(
1701   CallingConv::ID CallConv,
1702   MachineFunction &MF, bool IsVarArg,
1703   const SmallVectorImpl<ISD::OutputArg> &Outs,
1704   LLVMContext &Context) const {
1705   // Replacing returns with sret/stack usage doesn't make sense for shaders.
1706   // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
1707   // for shaders. Vector types should be explicitly handled by CC.
1708   if (AMDGPU::isEntryFunctionCC(CallConv))
1709     return true;
1710 
1711   SmallVector<CCValAssign, 16> RVLocs;
1712   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1713   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
1714 }
1715 
1716 SDValue
1717 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1718                               bool isVarArg,
1719                               const SmallVectorImpl<ISD::OutputArg> &Outs,
1720                               const SmallVectorImpl<SDValue> &OutVals,
1721                               const SDLoc &DL, SelectionDAG &DAG) const {
1722   MachineFunction &MF = DAG.getMachineFunction();
1723   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1724 
1725   if (AMDGPU::isKernel(CallConv)) {
1726     return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
1727                                              OutVals, DL, DAG);
1728   }
1729 
1730   bool IsShader = AMDGPU::isShader(CallConv);
1731 
1732   Info->setIfReturnsVoid(Outs.size() == 0);
1733   bool IsWaveEnd = Info->returnsVoid() && IsShader;
1734 
1735   SmallVector<ISD::OutputArg, 48> Splits;
1736   SmallVector<SDValue, 48> SplitVals;
1737 
1738   // Split vectors into their elements.
1739   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1740     const ISD::OutputArg &Out = Outs[i];
1741 
1742     if (IsShader && Out.VT.isVector()) {
1743       MVT VT = Out.VT.getVectorElementType();
1744       ISD::OutputArg NewOut = Out;
1745       NewOut.Flags.setSplit();
1746       NewOut.VT = VT;
1747 
1748       // We want the original number of vector elements here, e.g.
1749       // three or five, not four or eight.
1750       unsigned NumElements = Out.ArgVT.getVectorNumElements();
1751 
1752       for (unsigned j = 0; j != NumElements; ++j) {
1753         SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i],
1754                                    DAG.getConstant(j, DL, MVT::i32));
1755         SplitVals.push_back(Elem);
1756         Splits.push_back(NewOut);
1757         NewOut.PartOffset += NewOut.VT.getStoreSize();
1758       }
1759     } else {
1760       SplitVals.push_back(OutVals[i]);
1761       Splits.push_back(Out);
1762     }
1763   }
1764 
1765   // CCValAssign - represent the assignment of the return value to a location.
1766   SmallVector<CCValAssign, 48> RVLocs;
1767 
1768   // CCState - Info about the registers and stack slots.
1769   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1770                  *DAG.getContext());
1771 
1772   // Analyze outgoing return values.
1773   CCInfo.AnalyzeReturn(Splits, CCAssignFnForReturn(CallConv, isVarArg));
1774 
1775   SDValue Flag;
1776   SmallVector<SDValue, 48> RetOps;
1777   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
1778 
1779   // Add return address for callable functions.
1780   if (!Info->isEntryFunction()) {
1781     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1782     SDValue ReturnAddrReg = CreateLiveInRegister(
1783       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
1784 
1785     // FIXME: Should be able to use a vreg here, but need a way to prevent it
1786     // from being allcoated to a CSR.
1787 
1788     SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
1789                                                 MVT::i64);
1790 
1791     Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
1792     Flag = Chain.getValue(1);
1793 
1794     RetOps.push_back(PhysReturnAddrReg);
1795   }
1796 
1797   // Copy the result values into the output registers.
1798   for (unsigned i = 0, realRVLocIdx = 0;
1799        i != RVLocs.size();
1800        ++i, ++realRVLocIdx) {
1801     CCValAssign &VA = RVLocs[i];
1802     assert(VA.isRegLoc() && "Can only return in registers!");
1803     // TODO: Partially return in registers if return values don't fit.
1804 
1805     SDValue Arg = SplitVals[realRVLocIdx];
1806 
1807     // Copied from other backends.
1808     switch (VA.getLocInfo()) {
1809     case CCValAssign::Full:
1810       break;
1811     case CCValAssign::BCvt:
1812       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1813       break;
1814     case CCValAssign::SExt:
1815       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1816       break;
1817     case CCValAssign::ZExt:
1818       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1819       break;
1820     case CCValAssign::AExt:
1821       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1822       break;
1823     default:
1824       llvm_unreachable("Unknown loc info!");
1825     }
1826 
1827     Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
1828     Flag = Chain.getValue(1);
1829     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1830   }
1831 
1832   // FIXME: Does sret work properly?
1833   if (!Info->isEntryFunction()) {
1834     const SIRegisterInfo *TRI
1835       = static_cast<const SISubtarget *>(Subtarget)->getRegisterInfo();
1836     const MCPhysReg *I =
1837       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
1838     if (I) {
1839       for (; *I; ++I) {
1840         if (AMDGPU::SReg_64RegClass.contains(*I))
1841           RetOps.push_back(DAG.getRegister(*I, MVT::i64));
1842         else if (AMDGPU::SReg_32RegClass.contains(*I))
1843           RetOps.push_back(DAG.getRegister(*I, MVT::i32));
1844         else
1845           llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1846       }
1847     }
1848   }
1849 
1850   // Update chain and glue.
1851   RetOps[0] = Chain;
1852   if (Flag.getNode())
1853     RetOps.push_back(Flag);
1854 
1855   unsigned Opc = AMDGPUISD::ENDPGM;
1856   if (!IsWaveEnd)
1857     Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
1858   return DAG.getNode(Opc, DL, MVT::Other, RetOps);
1859 }
1860 
1861 SDValue SITargetLowering::LowerCallResult(
1862     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
1863     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1864     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
1865     SDValue ThisVal) const {
1866   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
1867 
1868   // Assign locations to each value returned by this call.
1869   SmallVector<CCValAssign, 16> RVLocs;
1870   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
1871                  *DAG.getContext());
1872   CCInfo.AnalyzeCallResult(Ins, RetCC);
1873 
1874   // Copy all of the result registers out of their specified physreg.
1875   for (unsigned i = 0; i != RVLocs.size(); ++i) {
1876     CCValAssign VA = RVLocs[i];
1877     SDValue Val;
1878 
1879     if (VA.isRegLoc()) {
1880       Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
1881       Chain = Val.getValue(1);
1882       InFlag = Val.getValue(2);
1883     } else if (VA.isMemLoc()) {
1884       report_fatal_error("TODO: return values in memory");
1885     } else
1886       llvm_unreachable("unknown argument location type");
1887 
1888     switch (VA.getLocInfo()) {
1889     case CCValAssign::Full:
1890       break;
1891     case CCValAssign::BCvt:
1892       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
1893       break;
1894     case CCValAssign::ZExt:
1895       Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
1896                         DAG.getValueType(VA.getValVT()));
1897       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
1898       break;
1899     case CCValAssign::SExt:
1900       Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
1901                         DAG.getValueType(VA.getValVT()));
1902       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
1903       break;
1904     case CCValAssign::AExt:
1905       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
1906       break;
1907     default:
1908       llvm_unreachable("Unknown loc info!");
1909     }
1910 
1911     InVals.push_back(Val);
1912   }
1913 
1914   return Chain;
1915 }
1916 
1917 // Add code to pass special inputs required depending on used features separate
1918 // from the explicit user arguments present in the IR.
1919 void SITargetLowering::passSpecialInputs(
1920     CallLoweringInfo &CLI,
1921     const SIMachineFunctionInfo &Info,
1922     SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
1923     SmallVectorImpl<SDValue> &MemOpChains,
1924     SDValue Chain,
1925     SDValue StackPtr) const {
1926   // If we don't have a call site, this was a call inserted by
1927   // legalization. These can never use special inputs.
1928   if (!CLI.CS)
1929     return;
1930 
1931   const Function *CalleeFunc = CLI.CS.getCalledFunction();
1932   assert(CalleeFunc);
1933 
1934   SelectionDAG &DAG = CLI.DAG;
1935   const SDLoc &DL = CLI.DL;
1936 
1937   const SISubtarget *ST = getSubtarget();
1938   const SIRegisterInfo *TRI = ST->getRegisterInfo();
1939 
1940   auto &ArgUsageInfo =
1941     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
1942   const AMDGPUFunctionArgInfo &CalleeArgInfo
1943     = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
1944 
1945   const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
1946 
1947   // TODO: Unify with private memory register handling. This is complicated by
1948   // the fact that at least in kernels, the input argument is not necessarily
1949   // in the same location as the input.
1950   AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
1951     AMDGPUFunctionArgInfo::DISPATCH_PTR,
1952     AMDGPUFunctionArgInfo::QUEUE_PTR,
1953     AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
1954     AMDGPUFunctionArgInfo::DISPATCH_ID,
1955     AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
1956     AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
1957     AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
1958     AMDGPUFunctionArgInfo::WORKITEM_ID_X,
1959     AMDGPUFunctionArgInfo::WORKITEM_ID_Y,
1960     AMDGPUFunctionArgInfo::WORKITEM_ID_Z,
1961     AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
1962   };
1963 
1964   for (auto InputID : InputRegs) {
1965     const ArgDescriptor *OutgoingArg;
1966     const TargetRegisterClass *ArgRC;
1967 
1968     std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
1969     if (!OutgoingArg)
1970       continue;
1971 
1972     const ArgDescriptor *IncomingArg;
1973     const TargetRegisterClass *IncomingArgRC;
1974     std::tie(IncomingArg, IncomingArgRC)
1975       = CallerArgInfo.getPreloadedValue(InputID);
1976     assert(IncomingArgRC == ArgRC);
1977 
1978     // All special arguments are ints for now.
1979     EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
1980     SDValue InputReg;
1981 
1982     if (IncomingArg) {
1983       InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
1984     } else {
1985       // The implicit arg ptr is special because it doesn't have a corresponding
1986       // input for kernels, and is computed from the kernarg segment pointer.
1987       assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
1988       InputReg = getImplicitArgPtr(DAG, DL);
1989     }
1990 
1991     if (OutgoingArg->isRegister()) {
1992       RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
1993     } else {
1994       SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, StackPtr,
1995                                               InputReg,
1996                                               OutgoingArg->getStackOffset());
1997       MemOpChains.push_back(ArgStore);
1998     }
1999   }
2000 }
2001 
2002 static bool canGuaranteeTCO(CallingConv::ID CC) {
2003   return CC == CallingConv::Fast;
2004 }
2005 
2006 /// Return true if we might ever do TCO for calls with this calling convention.
2007 static bool mayTailCallThisCC(CallingConv::ID CC) {
2008   switch (CC) {
2009   case CallingConv::C:
2010     return true;
2011   default:
2012     return canGuaranteeTCO(CC);
2013   }
2014 }
2015 
2016 bool SITargetLowering::isEligibleForTailCallOptimization(
2017     SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2018     const SmallVectorImpl<ISD::OutputArg> &Outs,
2019     const SmallVectorImpl<SDValue> &OutVals,
2020     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2021   if (!mayTailCallThisCC(CalleeCC))
2022     return false;
2023 
2024   MachineFunction &MF = DAG.getMachineFunction();
2025   const Function *CallerF = MF.getFunction();
2026   CallingConv::ID CallerCC = CallerF->getCallingConv();
2027   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2028   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2029 
2030   // Kernels aren't callable, and don't have a live in return address so it
2031   // doesn't make sense to do a tail call with entry functions.
2032   if (!CallerPreserved)
2033     return false;
2034 
2035   bool CCMatch = CallerCC == CalleeCC;
2036 
2037   if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2038     if (canGuaranteeTCO(CalleeCC) && CCMatch)
2039       return true;
2040     return false;
2041   }
2042 
2043   // TODO: Can we handle var args?
2044   if (IsVarArg)
2045     return false;
2046 
2047   for (const Argument &Arg : CallerF->args()) {
2048     if (Arg.hasByValAttr())
2049       return false;
2050   }
2051 
2052   LLVMContext &Ctx = *DAG.getContext();
2053 
2054   // Check that the call results are passed in the same way.
2055   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2056                                   CCAssignFnForCall(CalleeCC, IsVarArg),
2057                                   CCAssignFnForCall(CallerCC, IsVarArg)))
2058     return false;
2059 
2060   // The callee has to preserve all registers the caller needs to preserve.
2061   if (!CCMatch) {
2062     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2063     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2064       return false;
2065   }
2066 
2067   // Nothing more to check if the callee is taking no arguments.
2068   if (Outs.empty())
2069     return true;
2070 
2071   SmallVector<CCValAssign, 16> ArgLocs;
2072   CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2073 
2074   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2075 
2076   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2077   // If the stack arguments for this call do not fit into our own save area then
2078   // the call cannot be made tail.
2079   // TODO: Is this really necessary?
2080   if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2081     return false;
2082 
2083   const MachineRegisterInfo &MRI = MF.getRegInfo();
2084   return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2085 }
2086 
2087 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2088   if (!CI->isTailCall())
2089     return false;
2090 
2091   const Function *ParentFn = CI->getParent()->getParent();
2092   if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2093     return false;
2094 
2095   auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2096   return (Attr.getValueAsString() != "true");
2097 }
2098 
2099 // The wave scratch offset register is used as the global base pointer.
2100 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2101                                     SmallVectorImpl<SDValue> &InVals) const {
2102   SelectionDAG &DAG = CLI.DAG;
2103   const SDLoc &DL = CLI.DL;
2104   SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2105   SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2106   SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2107   SDValue Chain = CLI.Chain;
2108   SDValue Callee = CLI.Callee;
2109   bool &IsTailCall = CLI.IsTailCall;
2110   CallingConv::ID CallConv = CLI.CallConv;
2111   bool IsVarArg = CLI.IsVarArg;
2112   bool IsSibCall = false;
2113   bool IsThisReturn = false;
2114   MachineFunction &MF = DAG.getMachineFunction();
2115 
2116   if (IsVarArg) {
2117     return lowerUnhandledCall(CLI, InVals,
2118                               "unsupported call to variadic function ");
2119   }
2120 
2121   if (!CLI.CS.getCalledFunction()) {
2122     return lowerUnhandledCall(CLI, InVals,
2123                               "unsupported indirect call to function ");
2124   }
2125 
2126   if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2127     return lowerUnhandledCall(CLI, InVals,
2128                               "unsupported required tail call to function ");
2129   }
2130 
2131   // The first 4 bytes are reserved for the callee's emergency stack slot.
2132   const unsigned CalleeUsableStackOffset = 4;
2133 
2134   if (IsTailCall) {
2135     IsTailCall = isEligibleForTailCallOptimization(
2136       Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2137     if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2138       report_fatal_error("failed to perform tail call elimination on a call "
2139                          "site marked musttail");
2140     }
2141 
2142     bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2143 
2144     // A sibling call is one where we're under the usual C ABI and not planning
2145     // to change that but can still do a tail call:
2146     if (!TailCallOpt && IsTailCall)
2147       IsSibCall = true;
2148 
2149     if (IsTailCall)
2150       ++NumTailCalls;
2151   }
2152 
2153   if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee)) {
2154     // FIXME: Remove this hack for function pointer types after removing
2155     // support of old address space mapping. In the new address space
2156     // mapping the pointer in default address space is 64 bit, therefore
2157     // does not need this hack.
2158     if (Callee.getValueType() == MVT::i32) {
2159       const GlobalValue *GV = GA->getGlobal();
2160       Callee = DAG.getGlobalAddress(GV, DL, MVT::i64, GA->getOffset(), false,
2161                                     GA->getTargetFlags());
2162     }
2163   }
2164   assert(Callee.getValueType() == MVT::i64);
2165 
2166   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2167 
2168   // Analyze operands of the call, assigning locations to each operand.
2169   SmallVector<CCValAssign, 16> ArgLocs;
2170   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2171   CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2172   CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2173 
2174   // Get a count of how many bytes are to be pushed on the stack.
2175   unsigned NumBytes = CCInfo.getNextStackOffset();
2176 
2177   if (IsSibCall) {
2178     // Since we're not changing the ABI to make this a tail call, the memory
2179     // operands are already available in the caller's incoming argument space.
2180     NumBytes = 0;
2181   }
2182 
2183   // FPDiff is the byte offset of the call's argument area from the callee's.
2184   // Stores to callee stack arguments will be placed in FixedStackSlots offset
2185   // by this amount for a tail call. In a sibling call it must be 0 because the
2186   // caller will deallocate the entire stack and the callee still expects its
2187   // arguments to begin at SP+0. Completely unused for non-tail calls.
2188   int32_t FPDiff = 0;
2189   MachineFrameInfo &MFI = MF.getFrameInfo();
2190   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2191 
2192   SDValue CallerSavedFP;
2193 
2194   // Adjust the stack pointer for the new arguments...
2195   // These operations are automatically eliminated by the prolog/epilog pass
2196   if (!IsSibCall) {
2197     Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2198 
2199     unsigned OffsetReg = Info->getScratchWaveOffsetReg();
2200 
2201     // In the HSA case, this should be an identity copy.
2202     SDValue ScratchRSrcReg
2203       = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2204     RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2205 
2206     // TODO: Don't hardcode these registers and get from the callee function.
2207     SDValue ScratchWaveOffsetReg
2208       = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32);
2209     RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg);
2210 
2211     if (!Info->isEntryFunction()) {
2212       // Avoid clobbering this function's FP value. In the current convention
2213       // callee will overwrite this, so do save/restore around the call site.
2214       CallerSavedFP = DAG.getCopyFromReg(Chain, DL,
2215                                          Info->getFrameOffsetReg(), MVT::i32);
2216     }
2217   }
2218 
2219   // Stack pointer relative accesses are done by changing the offset SGPR. This
2220   // is just the VGPR offset component.
2221   SDValue StackPtr = DAG.getConstant(CalleeUsableStackOffset, DL, MVT::i32);
2222 
2223   SmallVector<SDValue, 8> MemOpChains;
2224   MVT PtrVT = MVT::i32;
2225 
2226   // Walk the register/memloc assignments, inserting copies/loads.
2227   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2228        ++i, ++realArgIdx) {
2229     CCValAssign &VA = ArgLocs[i];
2230     SDValue Arg = OutVals[realArgIdx];
2231 
2232     // Promote the value if needed.
2233     switch (VA.getLocInfo()) {
2234     case CCValAssign::Full:
2235       break;
2236     case CCValAssign::BCvt:
2237       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2238       break;
2239     case CCValAssign::ZExt:
2240       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2241       break;
2242     case CCValAssign::SExt:
2243       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2244       break;
2245     case CCValAssign::AExt:
2246       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2247       break;
2248     case CCValAssign::FPExt:
2249       Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2250       break;
2251     default:
2252       llvm_unreachable("Unknown loc info!");
2253     }
2254 
2255     if (VA.isRegLoc()) {
2256       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2257     } else {
2258       assert(VA.isMemLoc());
2259 
2260       SDValue DstAddr;
2261       MachinePointerInfo DstInfo;
2262 
2263       unsigned LocMemOffset = VA.getLocMemOffset();
2264       int32_t Offset = LocMemOffset;
2265 
2266       SDValue PtrOff = DAG.getObjectPtrOffset(DL, StackPtr, Offset);
2267 
2268       if (IsTailCall) {
2269         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2270         unsigned OpSize = Flags.isByVal() ?
2271           Flags.getByValSize() : VA.getValVT().getStoreSize();
2272 
2273         Offset = Offset + FPDiff;
2274         int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2275 
2276         DstAddr = DAG.getObjectPtrOffset(DL, DAG.getFrameIndex(FI, PtrVT),
2277                                          StackPtr);
2278         DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2279 
2280         // Make sure any stack arguments overlapping with where we're storing
2281         // are loaded before this eventual operation. Otherwise they'll be
2282         // clobbered.
2283 
2284         // FIXME: Why is this really necessary? This seems to just result in a
2285         // lot of code to copy the stack and write them back to the same
2286         // locations, which are supposed to be immutable?
2287         Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2288       } else {
2289         DstAddr = PtrOff;
2290         DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2291       }
2292 
2293       if (Outs[i].Flags.isByVal()) {
2294         SDValue SizeNode =
2295             DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2296         SDValue Cpy = DAG.getMemcpy(
2297             Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2298             /*isVol = */ false, /*AlwaysInline = */ true,
2299             /*isTailCall = */ false, DstInfo,
2300             MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy(
2301                 *DAG.getContext(), AMDGPUASI.PRIVATE_ADDRESS))));
2302 
2303         MemOpChains.push_back(Cpy);
2304       } else {
2305         SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo);
2306         MemOpChains.push_back(Store);
2307       }
2308     }
2309   }
2310 
2311   // Copy special input registers after user input arguments.
2312   passSpecialInputs(CLI, *Info, RegsToPass, MemOpChains, Chain, StackPtr);
2313 
2314   if (!MemOpChains.empty())
2315     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2316 
2317   // Build a sequence of copy-to-reg nodes chained together with token chain
2318   // and flag operands which copy the outgoing args into the appropriate regs.
2319   SDValue InFlag;
2320   for (auto &RegToPass : RegsToPass) {
2321     Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2322                              RegToPass.second, InFlag);
2323     InFlag = Chain.getValue(1);
2324   }
2325 
2326 
2327   SDValue PhysReturnAddrReg;
2328   if (IsTailCall) {
2329     // Since the return is being combined with the call, we need to pass on the
2330     // return address.
2331 
2332     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2333     SDValue ReturnAddrReg = CreateLiveInRegister(
2334       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2335 
2336     PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2337                                         MVT::i64);
2338     Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2339     InFlag = Chain.getValue(1);
2340   }
2341 
2342   // We don't usually want to end the call-sequence here because we would tidy
2343   // the frame up *after* the call, however in the ABI-changing tail-call case
2344   // we've carefully laid out the parameters so that when sp is reset they'll be
2345   // in the correct location.
2346   if (IsTailCall && !IsSibCall) {
2347     Chain = DAG.getCALLSEQ_END(Chain,
2348                                DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2349                                DAG.getTargetConstant(0, DL, MVT::i32),
2350                                InFlag, DL);
2351     InFlag = Chain.getValue(1);
2352   }
2353 
2354   std::vector<SDValue> Ops;
2355   Ops.push_back(Chain);
2356   Ops.push_back(Callee);
2357 
2358   if (IsTailCall) {
2359     // Each tail call may have to adjust the stack by a different amount, so
2360     // this information must travel along with the operation for eventual
2361     // consumption by emitEpilogue.
2362     Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
2363 
2364     Ops.push_back(PhysReturnAddrReg);
2365   }
2366 
2367   // Add argument registers to the end of the list so that they are known live
2368   // into the call.
2369   for (auto &RegToPass : RegsToPass) {
2370     Ops.push_back(DAG.getRegister(RegToPass.first,
2371                                   RegToPass.second.getValueType()));
2372   }
2373 
2374   // Add a register mask operand representing the call-preserved registers.
2375 
2376   const AMDGPURegisterInfo *TRI = Subtarget->getRegisterInfo();
2377   const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2378   assert(Mask && "Missing call preserved mask for calling convention");
2379   Ops.push_back(DAG.getRegisterMask(Mask));
2380 
2381   if (InFlag.getNode())
2382     Ops.push_back(InFlag);
2383 
2384   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2385 
2386   // If we're doing a tall call, use a TC_RETURN here rather than an
2387   // actual call instruction.
2388   if (IsTailCall) {
2389     MFI.setHasTailCall();
2390     return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
2391   }
2392 
2393   // Returns a chain and a flag for retval copy to use.
2394   SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2395   Chain = Call.getValue(0);
2396   InFlag = Call.getValue(1);
2397 
2398   if (CallerSavedFP) {
2399     SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32);
2400     Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag);
2401     InFlag = Chain.getValue(1);
2402   }
2403 
2404   uint64_t CalleePopBytes = NumBytes;
2405   Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
2406                              DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2407                              InFlag, DL);
2408   if (!Ins.empty())
2409     InFlag = Chain.getValue(1);
2410 
2411   // Handle result values, copying them out of physregs into vregs that we
2412   // return.
2413   return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2414                          InVals, IsThisReturn,
2415                          IsThisReturn ? OutVals[0] : SDValue());
2416 }
2417 
2418 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2419                                              SelectionDAG &DAG) const {
2420   unsigned Reg = StringSwitch<unsigned>(RegName)
2421     .Case("m0", AMDGPU::M0)
2422     .Case("exec", AMDGPU::EXEC)
2423     .Case("exec_lo", AMDGPU::EXEC_LO)
2424     .Case("exec_hi", AMDGPU::EXEC_HI)
2425     .Case("flat_scratch", AMDGPU::FLAT_SCR)
2426     .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2427     .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2428     .Default(AMDGPU::NoRegister);
2429 
2430   if (Reg == AMDGPU::NoRegister) {
2431     report_fatal_error(Twine("invalid register name \""
2432                              + StringRef(RegName)  + "\"."));
2433 
2434   }
2435 
2436   if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
2437       Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2438     report_fatal_error(Twine("invalid register \""
2439                              + StringRef(RegName)  + "\" for subtarget."));
2440   }
2441 
2442   switch (Reg) {
2443   case AMDGPU::M0:
2444   case AMDGPU::EXEC_LO:
2445   case AMDGPU::EXEC_HI:
2446   case AMDGPU::FLAT_SCR_LO:
2447   case AMDGPU::FLAT_SCR_HI:
2448     if (VT.getSizeInBits() == 32)
2449       return Reg;
2450     break;
2451   case AMDGPU::EXEC:
2452   case AMDGPU::FLAT_SCR:
2453     if (VT.getSizeInBits() == 64)
2454       return Reg;
2455     break;
2456   default:
2457     llvm_unreachable("missing register type checking");
2458   }
2459 
2460   report_fatal_error(Twine("invalid type for register \""
2461                            + StringRef(RegName) + "\"."));
2462 }
2463 
2464 // If kill is not the last instruction, split the block so kill is always a
2465 // proper terminator.
2466 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
2467                                                     MachineBasicBlock *BB) const {
2468   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2469 
2470   MachineBasicBlock::iterator SplitPoint(&MI);
2471   ++SplitPoint;
2472 
2473   if (SplitPoint == BB->end()) {
2474     // Don't bother with a new block.
2475     MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
2476     return BB;
2477   }
2478 
2479   MachineFunction *MF = BB->getParent();
2480   MachineBasicBlock *SplitBB
2481     = MF->CreateMachineBasicBlock(BB->getBasicBlock());
2482 
2483   MF->insert(++MachineFunction::iterator(BB), SplitBB);
2484   SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
2485 
2486   SplitBB->transferSuccessorsAndUpdatePHIs(BB);
2487   BB->addSuccessor(SplitBB);
2488 
2489   MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
2490   return SplitBB;
2491 }
2492 
2493 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
2494 // wavefront. If the value is uniform and just happens to be in a VGPR, this
2495 // will only do one iteration. In the worst case, this will loop 64 times.
2496 //
2497 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
2498 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
2499   const SIInstrInfo *TII,
2500   MachineRegisterInfo &MRI,
2501   MachineBasicBlock &OrigBB,
2502   MachineBasicBlock &LoopBB,
2503   const DebugLoc &DL,
2504   const MachineOperand &IdxReg,
2505   unsigned InitReg,
2506   unsigned ResultReg,
2507   unsigned PhiReg,
2508   unsigned InitSaveExecReg,
2509   int Offset,
2510   bool UseGPRIdxMode) {
2511   MachineBasicBlock::iterator I = LoopBB.begin();
2512 
2513   unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2514   unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2515   unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2516   unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2517 
2518   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
2519     .addReg(InitReg)
2520     .addMBB(&OrigBB)
2521     .addReg(ResultReg)
2522     .addMBB(&LoopBB);
2523 
2524   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
2525     .addReg(InitSaveExecReg)
2526     .addMBB(&OrigBB)
2527     .addReg(NewExec)
2528     .addMBB(&LoopBB);
2529 
2530   // Read the next variant <- also loop target.
2531   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
2532     .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
2533 
2534   // Compare the just read M0 value to all possible Idx values.
2535   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
2536     .addReg(CurrentIdxReg)
2537     .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
2538 
2539   if (UseGPRIdxMode) {
2540     unsigned IdxReg;
2541     if (Offset == 0) {
2542       IdxReg = CurrentIdxReg;
2543     } else {
2544       IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2545       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
2546         .addReg(CurrentIdxReg, RegState::Kill)
2547         .addImm(Offset);
2548     }
2549 
2550     MachineInstr *SetIdx =
2551       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_IDX))
2552       .addReg(IdxReg, RegState::Kill);
2553     SetIdx->getOperand(2).setIsUndef();
2554   } else {
2555     // Move index from VCC into M0
2556     if (Offset == 0) {
2557       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2558         .addReg(CurrentIdxReg, RegState::Kill);
2559     } else {
2560       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2561         .addReg(CurrentIdxReg, RegState::Kill)
2562         .addImm(Offset);
2563     }
2564   }
2565 
2566   // Update EXEC, save the original EXEC value to VCC.
2567   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
2568     .addReg(CondReg, RegState::Kill);
2569 
2570   MRI.setSimpleHint(NewExec, CondReg);
2571 
2572   // Update EXEC, switch all done bits to 0 and all todo bits to 1.
2573   MachineInstr *InsertPt =
2574     BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
2575     .addReg(AMDGPU::EXEC)
2576     .addReg(NewExec);
2577 
2578   // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
2579   // s_cbranch_scc0?
2580 
2581   // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
2582   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
2583     .addMBB(&LoopBB);
2584 
2585   return InsertPt->getIterator();
2586 }
2587 
2588 // This has slightly sub-optimal regalloc when the source vector is killed by
2589 // the read. The register allocator does not understand that the kill is
2590 // per-workitem, so is kept alive for the whole loop so we end up not re-using a
2591 // subregister from it, using 1 more VGPR than necessary. This was saved when
2592 // this was expanded after register allocation.
2593 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
2594                                                   MachineBasicBlock &MBB,
2595                                                   MachineInstr &MI,
2596                                                   unsigned InitResultReg,
2597                                                   unsigned PhiReg,
2598                                                   int Offset,
2599                                                   bool UseGPRIdxMode) {
2600   MachineFunction *MF = MBB.getParent();
2601   MachineRegisterInfo &MRI = MF->getRegInfo();
2602   const DebugLoc &DL = MI.getDebugLoc();
2603   MachineBasicBlock::iterator I(&MI);
2604 
2605   unsigned DstReg = MI.getOperand(0).getReg();
2606   unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
2607   unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
2608 
2609   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
2610 
2611   // Save the EXEC mask
2612   BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
2613     .addReg(AMDGPU::EXEC);
2614 
2615   // To insert the loop we need to split the block. Move everything after this
2616   // point to a new block, and insert a new empty block between the two.
2617   MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
2618   MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
2619   MachineFunction::iterator MBBI(MBB);
2620   ++MBBI;
2621 
2622   MF->insert(MBBI, LoopBB);
2623   MF->insert(MBBI, RemainderBB);
2624 
2625   LoopBB->addSuccessor(LoopBB);
2626   LoopBB->addSuccessor(RemainderBB);
2627 
2628   // Move the rest of the block into a new block.
2629   RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
2630   RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
2631 
2632   MBB.addSuccessor(LoopBB);
2633 
2634   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2635 
2636   auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
2637                                       InitResultReg, DstReg, PhiReg, TmpExec,
2638                                       Offset, UseGPRIdxMode);
2639 
2640   MachineBasicBlock::iterator First = RemainderBB->begin();
2641   BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
2642     .addReg(SaveExec);
2643 
2644   return InsPt;
2645 }
2646 
2647 // Returns subreg index, offset
2648 static std::pair<unsigned, int>
2649 computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
2650                             const TargetRegisterClass *SuperRC,
2651                             unsigned VecReg,
2652                             int Offset) {
2653   int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
2654 
2655   // Skip out of bounds offsets, or else we would end up using an undefined
2656   // register.
2657   if (Offset >= NumElts || Offset < 0)
2658     return std::make_pair(AMDGPU::sub0, Offset);
2659 
2660   return std::make_pair(AMDGPU::sub0 + Offset, 0);
2661 }
2662 
2663 // Return true if the index is an SGPR and was set.
2664 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
2665                                  MachineRegisterInfo &MRI,
2666                                  MachineInstr &MI,
2667                                  int Offset,
2668                                  bool UseGPRIdxMode,
2669                                  bool IsIndirectSrc) {
2670   MachineBasicBlock *MBB = MI.getParent();
2671   const DebugLoc &DL = MI.getDebugLoc();
2672   MachineBasicBlock::iterator I(&MI);
2673 
2674   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2675   const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
2676 
2677   assert(Idx->getReg() != AMDGPU::NoRegister);
2678 
2679   if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
2680     return false;
2681 
2682   if (UseGPRIdxMode) {
2683     unsigned IdxMode = IsIndirectSrc ?
2684       VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
2685     if (Offset == 0) {
2686       MachineInstr *SetOn =
2687           BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2688               .add(*Idx)
2689               .addImm(IdxMode);
2690 
2691       SetOn->getOperand(3).setIsUndef();
2692     } else {
2693       unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
2694       BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
2695           .add(*Idx)
2696           .addImm(Offset);
2697       MachineInstr *SetOn =
2698         BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2699         .addReg(Tmp, RegState::Kill)
2700         .addImm(IdxMode);
2701 
2702       SetOn->getOperand(3).setIsUndef();
2703     }
2704 
2705     return true;
2706   }
2707 
2708   if (Offset == 0) {
2709     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2710       .add(*Idx);
2711   } else {
2712     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2713       .add(*Idx)
2714       .addImm(Offset);
2715   }
2716 
2717   return true;
2718 }
2719 
2720 // Control flow needs to be inserted if indexing with a VGPR.
2721 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
2722                                           MachineBasicBlock &MBB,
2723                                           const SISubtarget &ST) {
2724   const SIInstrInfo *TII = ST.getInstrInfo();
2725   const SIRegisterInfo &TRI = TII->getRegisterInfo();
2726   MachineFunction *MF = MBB.getParent();
2727   MachineRegisterInfo &MRI = MF->getRegInfo();
2728 
2729   unsigned Dst = MI.getOperand(0).getReg();
2730   unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
2731   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
2732 
2733   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
2734 
2735   unsigned SubReg;
2736   std::tie(SubReg, Offset)
2737     = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
2738 
2739   bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
2740 
2741   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
2742     MachineBasicBlock::iterator I(&MI);
2743     const DebugLoc &DL = MI.getDebugLoc();
2744 
2745     if (UseGPRIdxMode) {
2746       // TODO: Look at the uses to avoid the copy. This may require rescheduling
2747       // to avoid interfering with other uses, so probably requires a new
2748       // optimization pass.
2749       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
2750         .addReg(SrcReg, RegState::Undef, SubReg)
2751         .addReg(SrcReg, RegState::Implicit)
2752         .addReg(AMDGPU::M0, RegState::Implicit);
2753       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
2754     } else {
2755       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
2756         .addReg(SrcReg, RegState::Undef, SubReg)
2757         .addReg(SrcReg, RegState::Implicit);
2758     }
2759 
2760     MI.eraseFromParent();
2761 
2762     return &MBB;
2763   }
2764 
2765   const DebugLoc &DL = MI.getDebugLoc();
2766   MachineBasicBlock::iterator I(&MI);
2767 
2768   unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2769   unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2770 
2771   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
2772 
2773   if (UseGPRIdxMode) {
2774     MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2775       .addImm(0) // Reset inside loop.
2776       .addImm(VGPRIndexMode::SRC0_ENABLE);
2777     SetOn->getOperand(3).setIsUndef();
2778 
2779     // Disable again after the loop.
2780     BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
2781   }
2782 
2783   auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, UseGPRIdxMode);
2784   MachineBasicBlock *LoopBB = InsPt->getParent();
2785 
2786   if (UseGPRIdxMode) {
2787     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
2788       .addReg(SrcReg, RegState::Undef, SubReg)
2789       .addReg(SrcReg, RegState::Implicit)
2790       .addReg(AMDGPU::M0, RegState::Implicit);
2791   } else {
2792     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
2793       .addReg(SrcReg, RegState::Undef, SubReg)
2794       .addReg(SrcReg, RegState::Implicit);
2795   }
2796 
2797   MI.eraseFromParent();
2798 
2799   return LoopBB;
2800 }
2801 
2802 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
2803                                  const TargetRegisterClass *VecRC) {
2804   switch (TRI.getRegSizeInBits(*VecRC)) {
2805   case 32: // 4 bytes
2806     return AMDGPU::V_MOVRELD_B32_V1;
2807   case 64: // 8 bytes
2808     return AMDGPU::V_MOVRELD_B32_V2;
2809   case 128: // 16 bytes
2810     return AMDGPU::V_MOVRELD_B32_V4;
2811   case 256: // 32 bytes
2812     return AMDGPU::V_MOVRELD_B32_V8;
2813   case 512: // 64 bytes
2814     return AMDGPU::V_MOVRELD_B32_V16;
2815   default:
2816     llvm_unreachable("unsupported size for MOVRELD pseudos");
2817   }
2818 }
2819 
2820 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
2821                                           MachineBasicBlock &MBB,
2822                                           const SISubtarget &ST) {
2823   const SIInstrInfo *TII = ST.getInstrInfo();
2824   const SIRegisterInfo &TRI = TII->getRegisterInfo();
2825   MachineFunction *MF = MBB.getParent();
2826   MachineRegisterInfo &MRI = MF->getRegInfo();
2827 
2828   unsigned Dst = MI.getOperand(0).getReg();
2829   const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
2830   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2831   const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
2832   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
2833   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
2834 
2835   // This can be an immediate, but will be folded later.
2836   assert(Val->getReg());
2837 
2838   unsigned SubReg;
2839   std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
2840                                                          SrcVec->getReg(),
2841                                                          Offset);
2842   bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
2843 
2844   if (Idx->getReg() == AMDGPU::NoRegister) {
2845     MachineBasicBlock::iterator I(&MI);
2846     const DebugLoc &DL = MI.getDebugLoc();
2847 
2848     assert(Offset == 0);
2849 
2850     BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
2851         .add(*SrcVec)
2852         .add(*Val)
2853         .addImm(SubReg);
2854 
2855     MI.eraseFromParent();
2856     return &MBB;
2857   }
2858 
2859   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
2860     MachineBasicBlock::iterator I(&MI);
2861     const DebugLoc &DL = MI.getDebugLoc();
2862 
2863     if (UseGPRIdxMode) {
2864       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
2865           .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
2866           .add(*Val)
2867           .addReg(Dst, RegState::ImplicitDefine)
2868           .addReg(SrcVec->getReg(), RegState::Implicit)
2869           .addReg(AMDGPU::M0, RegState::Implicit);
2870 
2871       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
2872     } else {
2873       const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
2874 
2875       BuildMI(MBB, I, DL, MovRelDesc)
2876           .addReg(Dst, RegState::Define)
2877           .addReg(SrcVec->getReg())
2878           .add(*Val)
2879           .addImm(SubReg - AMDGPU::sub0);
2880     }
2881 
2882     MI.eraseFromParent();
2883     return &MBB;
2884   }
2885 
2886   if (Val->isReg())
2887     MRI.clearKillFlags(Val->getReg());
2888 
2889   const DebugLoc &DL = MI.getDebugLoc();
2890 
2891   if (UseGPRIdxMode) {
2892     MachineBasicBlock::iterator I(&MI);
2893 
2894     MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2895       .addImm(0) // Reset inside loop.
2896       .addImm(VGPRIndexMode::DST_ENABLE);
2897     SetOn->getOperand(3).setIsUndef();
2898 
2899     // Disable again after the loop.
2900     BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
2901   }
2902 
2903   unsigned PhiReg = MRI.createVirtualRegister(VecRC);
2904 
2905   auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
2906                               Offset, UseGPRIdxMode);
2907   MachineBasicBlock *LoopBB = InsPt->getParent();
2908 
2909   if (UseGPRIdxMode) {
2910     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
2911         .addReg(PhiReg, RegState::Undef, SubReg) // vdst
2912         .add(*Val)                               // src0
2913         .addReg(Dst, RegState::ImplicitDefine)
2914         .addReg(PhiReg, RegState::Implicit)
2915         .addReg(AMDGPU::M0, RegState::Implicit);
2916   } else {
2917     const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
2918 
2919     BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
2920         .addReg(Dst, RegState::Define)
2921         .addReg(PhiReg)
2922         .add(*Val)
2923         .addImm(SubReg - AMDGPU::sub0);
2924   }
2925 
2926   MI.eraseFromParent();
2927 
2928   return LoopBB;
2929 }
2930 
2931 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
2932   MachineInstr &MI, MachineBasicBlock *BB) const {
2933 
2934   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2935   MachineFunction *MF = BB->getParent();
2936   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
2937 
2938   if (TII->isMIMG(MI)) {
2939       if (!MI.memoperands_empty())
2940         return BB;
2941     // Add a memoperand for mimg instructions so that they aren't assumed to
2942     // be ordered memory instuctions.
2943 
2944     MachinePointerInfo PtrInfo(MFI->getImagePSV());
2945     MachineMemOperand::Flags Flags = MachineMemOperand::MODereferenceable;
2946     if (MI.mayStore())
2947       Flags |= MachineMemOperand::MOStore;
2948 
2949     if (MI.mayLoad())
2950       Flags |= MachineMemOperand::MOLoad;
2951 
2952     auto MMO = MF->getMachineMemOperand(PtrInfo, Flags, 0, 0);
2953     MI.addMemOperand(*MF, MMO);
2954     return BB;
2955   }
2956 
2957   switch (MI.getOpcode()) {
2958   case AMDGPU::S_ADD_U64_PSEUDO:
2959   case AMDGPU::S_SUB_U64_PSEUDO: {
2960     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
2961     const DebugLoc &DL = MI.getDebugLoc();
2962 
2963     MachineOperand &Dest = MI.getOperand(0);
2964     MachineOperand &Src0 = MI.getOperand(1);
2965     MachineOperand &Src1 = MI.getOperand(2);
2966 
2967     unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
2968     unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
2969 
2970     MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
2971      Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
2972      &AMDGPU::SReg_32_XM0RegClass);
2973     MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
2974       Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
2975       &AMDGPU::SReg_32_XM0RegClass);
2976 
2977     MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
2978       Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
2979       &AMDGPU::SReg_32_XM0RegClass);
2980     MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
2981       Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
2982       &AMDGPU::SReg_32_XM0RegClass);
2983 
2984     bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
2985 
2986     unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
2987     unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
2988     BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
2989       .add(Src0Sub0)
2990       .add(Src1Sub0);
2991     BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
2992       .add(Src0Sub1)
2993       .add(Src1Sub1);
2994     BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
2995       .addReg(DestSub0)
2996       .addImm(AMDGPU::sub0)
2997       .addReg(DestSub1)
2998       .addImm(AMDGPU::sub1);
2999     MI.eraseFromParent();
3000     return BB;
3001   }
3002   case AMDGPU::SI_INIT_M0: {
3003     BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
3004             TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3005         .add(MI.getOperand(0));
3006     MI.eraseFromParent();
3007     return BB;
3008   }
3009   case AMDGPU::SI_INIT_EXEC:
3010     // This should be before all vector instructions.
3011     BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3012             AMDGPU::EXEC)
3013         .addImm(MI.getOperand(0).getImm());
3014     MI.eraseFromParent();
3015     return BB;
3016 
3017   case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3018     // Extract the thread count from an SGPR input and set EXEC accordingly.
3019     // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3020     //
3021     // S_BFE_U32 count, input, {shift, 7}
3022     // S_BFM_B64 exec, count, 0
3023     // S_CMP_EQ_U32 count, 64
3024     // S_CMOV_B64 exec, -1
3025     MachineInstr *FirstMI = &*BB->begin();
3026     MachineRegisterInfo &MRI = MF->getRegInfo();
3027     unsigned InputReg = MI.getOperand(0).getReg();
3028     unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3029     bool Found = false;
3030 
3031     // Move the COPY of the input reg to the beginning, so that we can use it.
3032     for (auto I = BB->begin(); I != &MI; I++) {
3033       if (I->getOpcode() != TargetOpcode::COPY ||
3034           I->getOperand(0).getReg() != InputReg)
3035         continue;
3036 
3037       if (I == FirstMI) {
3038         FirstMI = &*++BB->begin();
3039       } else {
3040         I->removeFromParent();
3041         BB->insert(FirstMI, &*I);
3042       }
3043       Found = true;
3044       break;
3045     }
3046     assert(Found);
3047     (void)Found;
3048 
3049     // This should be before all vector instructions.
3050     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3051         .addReg(InputReg)
3052         .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
3053     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
3054             AMDGPU::EXEC)
3055         .addReg(CountReg)
3056         .addImm(0);
3057     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3058         .addReg(CountReg, RegState::Kill)
3059         .addImm(64);
3060     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
3061             AMDGPU::EXEC)
3062         .addImm(-1);
3063     MI.eraseFromParent();
3064     return BB;
3065   }
3066 
3067   case AMDGPU::GET_GROUPSTATICSIZE: {
3068     DebugLoc DL = MI.getDebugLoc();
3069     BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
3070         .add(MI.getOperand(0))
3071         .addImm(MFI->getLDSSize());
3072     MI.eraseFromParent();
3073     return BB;
3074   }
3075   case AMDGPU::SI_INDIRECT_SRC_V1:
3076   case AMDGPU::SI_INDIRECT_SRC_V2:
3077   case AMDGPU::SI_INDIRECT_SRC_V4:
3078   case AMDGPU::SI_INDIRECT_SRC_V8:
3079   case AMDGPU::SI_INDIRECT_SRC_V16:
3080     return emitIndirectSrc(MI, *BB, *getSubtarget());
3081   case AMDGPU::SI_INDIRECT_DST_V1:
3082   case AMDGPU::SI_INDIRECT_DST_V2:
3083   case AMDGPU::SI_INDIRECT_DST_V4:
3084   case AMDGPU::SI_INDIRECT_DST_V8:
3085   case AMDGPU::SI_INDIRECT_DST_V16:
3086     return emitIndirectDst(MI, *BB, *getSubtarget());
3087   case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3088   case AMDGPU::SI_KILL_I1_PSEUDO:
3089     return splitKillBlock(MI, BB);
3090   case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3091     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3092 
3093     unsigned Dst = MI.getOperand(0).getReg();
3094     unsigned Src0 = MI.getOperand(1).getReg();
3095     unsigned Src1 = MI.getOperand(2).getReg();
3096     const DebugLoc &DL = MI.getDebugLoc();
3097     unsigned SrcCond = MI.getOperand(3).getReg();
3098 
3099     unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3100     unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3101     unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
3102 
3103     BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3104       .addReg(SrcCond);
3105     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3106       .addReg(Src0, 0, AMDGPU::sub0)
3107       .addReg(Src1, 0, AMDGPU::sub0)
3108       .addReg(SrcCondCopy);
3109     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3110       .addReg(Src0, 0, AMDGPU::sub1)
3111       .addReg(Src1, 0, AMDGPU::sub1)
3112       .addReg(SrcCondCopy);
3113 
3114     BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3115       .addReg(DstLo)
3116       .addImm(AMDGPU::sub0)
3117       .addReg(DstHi)
3118       .addImm(AMDGPU::sub1);
3119     MI.eraseFromParent();
3120     return BB;
3121   }
3122   case AMDGPU::SI_BR_UNDEF: {
3123     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3124     const DebugLoc &DL = MI.getDebugLoc();
3125     MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3126                            .add(MI.getOperand(0));
3127     Br->getOperand(1).setIsUndef(true); // read undef SCC
3128     MI.eraseFromParent();
3129     return BB;
3130   }
3131   case AMDGPU::ADJCALLSTACKUP:
3132   case AMDGPU::ADJCALLSTACKDOWN: {
3133     const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3134     MachineInstrBuilder MIB(*MF, &MI);
3135     MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
3136         .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit);
3137     return BB;
3138   }
3139   case AMDGPU::SI_CALL_ISEL:
3140   case AMDGPU::SI_TCRETURN_ISEL: {
3141     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3142     const DebugLoc &DL = MI.getDebugLoc();
3143     unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
3144 
3145     MachineRegisterInfo &MRI = MF->getRegInfo();
3146     unsigned GlobalAddrReg = MI.getOperand(0).getReg();
3147     MachineInstr *PCRel = MRI.getVRegDef(GlobalAddrReg);
3148     assert(PCRel->getOpcode() == AMDGPU::SI_PC_ADD_REL_OFFSET);
3149 
3150     const GlobalValue *G = PCRel->getOperand(1).getGlobal();
3151 
3152     MachineInstrBuilder MIB;
3153     if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
3154       MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg)
3155         .add(MI.getOperand(0))
3156         .addGlobalAddress(G);
3157     } else {
3158       MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_TCRETURN))
3159         .add(MI.getOperand(0))
3160         .addGlobalAddress(G);
3161 
3162       // There is an additional imm operand for tcreturn, but it should be in the
3163       // right place already.
3164     }
3165 
3166     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I)
3167       MIB.add(MI.getOperand(I));
3168 
3169     MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
3170     MI.eraseFromParent();
3171     return BB;
3172   }
3173   default:
3174     return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
3175   }
3176 }
3177 
3178 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3179   return isTypeLegal(VT.getScalarType());
3180 }
3181 
3182 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3183   // This currently forces unfolding various combinations of fsub into fma with
3184   // free fneg'd operands. As long as we have fast FMA (controlled by
3185   // isFMAFasterThanFMulAndFAdd), we should perform these.
3186 
3187   // When fma is quarter rate, for f64 where add / sub are at best half rate,
3188   // most of these combines appear to be cycle neutral but save on instruction
3189   // count / code size.
3190   return true;
3191 }
3192 
3193 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3194                                          EVT VT) const {
3195   if (!VT.isVector()) {
3196     return MVT::i1;
3197   }
3198   return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
3199 }
3200 
3201 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3202   // TODO: Should i16 be used always if legal? For now it would force VALU
3203   // shifts.
3204   return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
3205 }
3206 
3207 // Answering this is somewhat tricky and depends on the specific device which
3208 // have different rates for fma or all f64 operations.
3209 //
3210 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3211 // regardless of which device (although the number of cycles differs between
3212 // devices), so it is always profitable for f64.
3213 //
3214 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3215 // only on full rate devices. Normally, we should prefer selecting v_mad_f32
3216 // which we can always do even without fused FP ops since it returns the same
3217 // result as the separate operations and since it is always full
3218 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3219 // however does not support denormals, so we do report fma as faster if we have
3220 // a fast fma device and require denormals.
3221 //
3222 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3223   VT = VT.getScalarType();
3224 
3225   switch (VT.getSimpleVT().SimpleTy) {
3226   case MVT::f32:
3227     // This is as fast on some subtargets. However, we always have full rate f32
3228     // mad available which returns the same result as the separate operations
3229     // which we should prefer over fma. We can't use this if we want to support
3230     // denormals, so only report this in these cases.
3231     return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32();
3232   case MVT::f64:
3233     return true;
3234   case MVT::f16:
3235     return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
3236   default:
3237     break;
3238   }
3239 
3240   return false;
3241 }
3242 
3243 //===----------------------------------------------------------------------===//
3244 // Custom DAG Lowering Operations
3245 //===----------------------------------------------------------------------===//
3246 
3247 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3248   switch (Op.getOpcode()) {
3249   default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
3250   case ISD::BRCOND: return LowerBRCOND(Op, DAG);
3251   case ISD::LOAD: {
3252     SDValue Result = LowerLOAD(Op, DAG);
3253     assert((!Result.getNode() ||
3254             Result.getNode()->getNumValues() == 2) &&
3255            "Load should return a value and a chain");
3256     return Result;
3257   }
3258 
3259   case ISD::FSIN:
3260   case ISD::FCOS:
3261     return LowerTrig(Op, DAG);
3262   case ISD::SELECT: return LowerSELECT(Op, DAG);
3263   case ISD::FDIV: return LowerFDIV(Op, DAG);
3264   case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
3265   case ISD::STORE: return LowerSTORE(Op, DAG);
3266   case ISD::GlobalAddress: {
3267     MachineFunction &MF = DAG.getMachineFunction();
3268     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3269     return LowerGlobalAddress(MFI, Op, DAG);
3270   }
3271   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3272   case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
3273   case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
3274   case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
3275   case ISD::INSERT_VECTOR_ELT:
3276     return lowerINSERT_VECTOR_ELT(Op, DAG);
3277   case ISD::EXTRACT_VECTOR_ELT:
3278     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3279   case ISD::FP_ROUND:
3280     return lowerFP_ROUND(Op, DAG);
3281   case ISD::TRAP:
3282   case ISD::DEBUGTRAP:
3283     return lowerTRAP(Op, DAG);
3284   }
3285   return SDValue();
3286 }
3287 
3288 void SITargetLowering::ReplaceNodeResults(SDNode *N,
3289                                           SmallVectorImpl<SDValue> &Results,
3290                                           SelectionDAG &DAG) const {
3291   switch (N->getOpcode()) {
3292   case ISD::INSERT_VECTOR_ELT: {
3293     if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
3294       Results.push_back(Res);
3295     return;
3296   }
3297   case ISD::EXTRACT_VECTOR_ELT: {
3298     if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
3299       Results.push_back(Res);
3300     return;
3301   }
3302   case ISD::INTRINSIC_WO_CHAIN: {
3303     unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3304     if (IID == Intrinsic::amdgcn_cvt_pkrtz) {
3305       SDValue Src0 = N->getOperand(1);
3306       SDValue Src1 = N->getOperand(2);
3307       SDLoc SL(N);
3308       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
3309                                 Src0, Src1);
3310       Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
3311       return;
3312     }
3313     break;
3314   }
3315   case ISD::SELECT: {
3316     SDLoc SL(N);
3317     EVT VT = N->getValueType(0);
3318     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3319     SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
3320     SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
3321 
3322     EVT SelectVT = NewVT;
3323     if (NewVT.bitsLT(MVT::i32)) {
3324       LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
3325       RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
3326       SelectVT = MVT::i32;
3327     }
3328 
3329     SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
3330                                     N->getOperand(0), LHS, RHS);
3331 
3332     if (NewVT != SelectVT)
3333       NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
3334     Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
3335     return;
3336   }
3337   default:
3338     break;
3339   }
3340 }
3341 
3342 /// \brief Helper function for LowerBRCOND
3343 static SDNode *findUser(SDValue Value, unsigned Opcode) {
3344 
3345   SDNode *Parent = Value.getNode();
3346   for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
3347        I != E; ++I) {
3348 
3349     if (I.getUse().get() != Value)
3350       continue;
3351 
3352     if (I->getOpcode() == Opcode)
3353       return *I;
3354   }
3355   return nullptr;
3356 }
3357 
3358 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
3359   if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
3360     switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
3361     case Intrinsic::amdgcn_if:
3362       return AMDGPUISD::IF;
3363     case Intrinsic::amdgcn_else:
3364       return AMDGPUISD::ELSE;
3365     case Intrinsic::amdgcn_loop:
3366       return AMDGPUISD::LOOP;
3367     case Intrinsic::amdgcn_end_cf:
3368       llvm_unreachable("should not occur");
3369     default:
3370       return 0;
3371     }
3372   }
3373 
3374   // break, if_break, else_break are all only used as inputs to loop, not
3375   // directly as branch conditions.
3376   return 0;
3377 }
3378 
3379 void SITargetLowering::createDebuggerPrologueStackObjects(
3380     MachineFunction &MF) const {
3381   // Create stack objects that are used for emitting debugger prologue.
3382   //
3383   // Debugger prologue writes work group IDs and work item IDs to scratch memory
3384   // at fixed location in the following format:
3385   //   offset 0:  work group ID x
3386   //   offset 4:  work group ID y
3387   //   offset 8:  work group ID z
3388   //   offset 16: work item ID x
3389   //   offset 20: work item ID y
3390   //   offset 24: work item ID z
3391   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
3392   int ObjectIdx = 0;
3393 
3394   // For each dimension:
3395   for (unsigned i = 0; i < 3; ++i) {
3396     // Create fixed stack object for work group ID.
3397     ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true);
3398     Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx);
3399     // Create fixed stack object for work item ID.
3400     ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true);
3401     Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx);
3402   }
3403 }
3404 
3405 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
3406   const Triple &TT = getTargetMachine().getTargetTriple();
3407   return GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS &&
3408          AMDGPU::shouldEmitConstantsToTextSection(TT);
3409 }
3410 
3411 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
3412   return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
3413               GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) &&
3414          !shouldEmitFixup(GV) &&
3415          !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3416 }
3417 
3418 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
3419   return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
3420 }
3421 
3422 /// This transforms the control flow intrinsics to get the branch destination as
3423 /// last parameter, also switches branch target with BR if the need arise
3424 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
3425                                       SelectionDAG &DAG) const {
3426   SDLoc DL(BRCOND);
3427 
3428   SDNode *Intr = BRCOND.getOperand(1).getNode();
3429   SDValue Target = BRCOND.getOperand(2);
3430   SDNode *BR = nullptr;
3431   SDNode *SetCC = nullptr;
3432 
3433   if (Intr->getOpcode() == ISD::SETCC) {
3434     // As long as we negate the condition everything is fine
3435     SetCC = Intr;
3436     Intr = SetCC->getOperand(0).getNode();
3437 
3438   } else {
3439     // Get the target from BR if we don't negate the condition
3440     BR = findUser(BRCOND, ISD::BR);
3441     Target = BR->getOperand(1);
3442   }
3443 
3444   // FIXME: This changes the types of the intrinsics instead of introducing new
3445   // nodes with the correct types.
3446   // e.g. llvm.amdgcn.loop
3447 
3448   // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
3449   // =>     t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
3450 
3451   unsigned CFNode = isCFIntrinsic(Intr);
3452   if (CFNode == 0) {
3453     // This is a uniform branch so we don't need to legalize.
3454     return BRCOND;
3455   }
3456 
3457   bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
3458                    Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
3459 
3460   assert(!SetCC ||
3461         (SetCC->getConstantOperandVal(1) == 1 &&
3462          cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
3463                                                              ISD::SETNE));
3464 
3465   // operands of the new intrinsic call
3466   SmallVector<SDValue, 4> Ops;
3467   if (HaveChain)
3468     Ops.push_back(BRCOND.getOperand(0));
3469 
3470   Ops.append(Intr->op_begin() + (HaveChain ?  2 : 1), Intr->op_end());
3471   Ops.push_back(Target);
3472 
3473   ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
3474 
3475   // build the new intrinsic call
3476   SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
3477 
3478   if (!HaveChain) {
3479     SDValue Ops[] =  {
3480       SDValue(Result, 0),
3481       BRCOND.getOperand(0)
3482     };
3483 
3484     Result = DAG.getMergeValues(Ops, DL).getNode();
3485   }
3486 
3487   if (BR) {
3488     // Give the branch instruction our target
3489     SDValue Ops[] = {
3490       BR->getOperand(0),
3491       BRCOND.getOperand(2)
3492     };
3493     SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
3494     DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
3495     BR = NewBR.getNode();
3496   }
3497 
3498   SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
3499 
3500   // Copy the intrinsic results to registers
3501   for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
3502     SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
3503     if (!CopyToReg)
3504       continue;
3505 
3506     Chain = DAG.getCopyToReg(
3507       Chain, DL,
3508       CopyToReg->getOperand(1),
3509       SDValue(Result, i - 1),
3510       SDValue());
3511 
3512     DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
3513   }
3514 
3515   // Remove the old intrinsic from the chain
3516   DAG.ReplaceAllUsesOfValueWith(
3517     SDValue(Intr, Intr->getNumValues() - 1),
3518     Intr->getOperand(0));
3519 
3520   return Chain;
3521 }
3522 
3523 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
3524                                             SDValue Op,
3525                                             const SDLoc &DL,
3526                                             EVT VT) const {
3527   return Op.getValueType().bitsLE(VT) ?
3528       DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
3529       DAG.getNode(ISD::FTRUNC, DL, VT, Op);
3530 }
3531 
3532 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
3533   assert(Op.getValueType() == MVT::f16 &&
3534          "Do not know how to custom lower FP_ROUND for non-f16 type");
3535 
3536   SDValue Src = Op.getOperand(0);
3537   EVT SrcVT = Src.getValueType();
3538   if (SrcVT != MVT::f64)
3539     return Op;
3540 
3541   SDLoc DL(Op);
3542 
3543   SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
3544   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
3545   return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
3546 }
3547 
3548 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
3549   SDLoc SL(Op);
3550   MachineFunction &MF = DAG.getMachineFunction();
3551   SDValue Chain = Op.getOperand(0);
3552 
3553   unsigned TrapID = Op.getOpcode() == ISD::DEBUGTRAP ?
3554     SISubtarget::TrapIDLLVMDebugTrap : SISubtarget::TrapIDLLVMTrap;
3555 
3556   if (Subtarget->getTrapHandlerAbi() == SISubtarget::TrapHandlerAbiHsa &&
3557       Subtarget->isTrapHandlerEnabled()) {
3558     SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
3559     unsigned UserSGPR = Info->getQueuePtrUserSGPR();
3560     assert(UserSGPR != AMDGPU::NoRegister);
3561 
3562     SDValue QueuePtr = CreateLiveInRegister(
3563       DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
3564 
3565     SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
3566 
3567     SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
3568                                      QueuePtr, SDValue());
3569 
3570     SDValue Ops[] = {
3571       ToReg,
3572       DAG.getTargetConstant(TrapID, SL, MVT::i16),
3573       SGPR01,
3574       ToReg.getValue(1)
3575     };
3576 
3577     return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
3578   }
3579 
3580   switch (TrapID) {
3581   case SISubtarget::TrapIDLLVMTrap:
3582     return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
3583   case SISubtarget::TrapIDLLVMDebugTrap: {
3584     DiagnosticInfoUnsupported NoTrap(*MF.getFunction(),
3585                                      "debugtrap handler not supported",
3586                                      Op.getDebugLoc(),
3587                                      DS_Warning);
3588     LLVMContext &Ctx = MF.getFunction()->getContext();
3589     Ctx.diagnose(NoTrap);
3590     return Chain;
3591   }
3592   default:
3593     llvm_unreachable("unsupported trap handler type!");
3594   }
3595 
3596   return Chain;
3597 }
3598 
3599 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
3600                                              SelectionDAG &DAG) const {
3601   // FIXME: Use inline constants (src_{shared, private}_base) instead.
3602   if (Subtarget->hasApertureRegs()) {
3603     unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ?
3604         AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
3605         AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
3606     unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ?
3607         AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
3608         AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
3609     unsigned Encoding =
3610         AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
3611         Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
3612         WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
3613 
3614     SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
3615     SDValue ApertureReg = SDValue(
3616         DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
3617     SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
3618     return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
3619   }
3620 
3621   MachineFunction &MF = DAG.getMachineFunction();
3622   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
3623   unsigned UserSGPR = Info->getQueuePtrUserSGPR();
3624   assert(UserSGPR != AMDGPU::NoRegister);
3625 
3626   SDValue QueuePtr = CreateLiveInRegister(
3627     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
3628 
3629   // Offset into amd_queue_t for group_segment_aperture_base_hi /
3630   // private_segment_aperture_base_hi.
3631   uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44;
3632 
3633   SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
3634 
3635   // TODO: Use custom target PseudoSourceValue.
3636   // TODO: We should use the value from the IR intrinsic call, but it might not
3637   // be available and how do we get it?
3638   Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
3639                                               AMDGPUASI.CONSTANT_ADDRESS));
3640 
3641   MachinePointerInfo PtrInfo(V, StructOffset);
3642   return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
3643                      MinAlign(64, StructOffset),
3644                      MachineMemOperand::MODereferenceable |
3645                          MachineMemOperand::MOInvariant);
3646 }
3647 
3648 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
3649                                              SelectionDAG &DAG) const {
3650   SDLoc SL(Op);
3651   const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
3652 
3653   SDValue Src = ASC->getOperand(0);
3654   SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
3655 
3656   const AMDGPUTargetMachine &TM =
3657     static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
3658 
3659   // flat -> local/private
3660   if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
3661     unsigned DestAS = ASC->getDestAddressSpace();
3662 
3663     if (DestAS == AMDGPUASI.LOCAL_ADDRESS ||
3664         DestAS == AMDGPUASI.PRIVATE_ADDRESS) {
3665       unsigned NullVal = TM.getNullPointerValue(DestAS);
3666       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
3667       SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
3668       SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
3669 
3670       return DAG.getNode(ISD::SELECT, SL, MVT::i32,
3671                          NonNull, Ptr, SegmentNullPtr);
3672     }
3673   }
3674 
3675   // local/private -> flat
3676   if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
3677     unsigned SrcAS = ASC->getSrcAddressSpace();
3678 
3679     if (SrcAS == AMDGPUASI.LOCAL_ADDRESS ||
3680         SrcAS == AMDGPUASI.PRIVATE_ADDRESS) {
3681       unsigned NullVal = TM.getNullPointerValue(SrcAS);
3682       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
3683 
3684       SDValue NonNull
3685         = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
3686 
3687       SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
3688       SDValue CvtPtr
3689         = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
3690 
3691       return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
3692                          DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
3693                          FlatNullPtr);
3694     }
3695   }
3696 
3697   // global <-> flat are no-ops and never emitted.
3698 
3699   const MachineFunction &MF = DAG.getMachineFunction();
3700   DiagnosticInfoUnsupported InvalidAddrSpaceCast(
3701     *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
3702   DAG.getContext()->diagnose(InvalidAddrSpaceCast);
3703 
3704   return DAG.getUNDEF(ASC->getValueType(0));
3705 }
3706 
3707 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3708                                                  SelectionDAG &DAG) const {
3709   SDValue Idx = Op.getOperand(2);
3710   if (isa<ConstantSDNode>(Idx))
3711     return SDValue();
3712 
3713   // Avoid stack access for dynamic indexing.
3714   SDLoc SL(Op);
3715   SDValue Vec = Op.getOperand(0);
3716   SDValue Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Op.getOperand(1));
3717 
3718   // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
3719   SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Val);
3720 
3721   // Convert vector index to bit-index.
3722   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx,
3723                                   DAG.getConstant(16, SL, MVT::i32));
3724 
3725   SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
3726 
3727   SDValue BFM = DAG.getNode(ISD::SHL, SL, MVT::i32,
3728                             DAG.getConstant(0xffff, SL, MVT::i32),
3729                             ScaledIdx);
3730 
3731   SDValue LHS = DAG.getNode(ISD::AND, SL, MVT::i32, BFM, ExtVal);
3732   SDValue RHS = DAG.getNode(ISD::AND, SL, MVT::i32,
3733                             DAG.getNOT(SL, BFM, MVT::i32), BCVec);
3734 
3735   SDValue BFI = DAG.getNode(ISD::OR, SL, MVT::i32, LHS, RHS);
3736   return DAG.getNode(ISD::BITCAST, SL, Op.getValueType(), BFI);
3737 }
3738 
3739 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3740                                                   SelectionDAG &DAG) const {
3741   SDLoc SL(Op);
3742 
3743   EVT ResultVT = Op.getValueType();
3744   SDValue Vec = Op.getOperand(0);
3745   SDValue Idx = Op.getOperand(1);
3746 
3747   DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
3748 
3749   // Make sure we we do any optimizations that will make it easier to fold
3750   // source modifiers before obscuring it with bit operations.
3751 
3752   // XXX - Why doesn't this get called when vector_shuffle is expanded?
3753   if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
3754     return Combined;
3755 
3756   if (const ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) {
3757     SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
3758 
3759     if (CIdx->getZExtValue() == 1) {
3760       Result = DAG.getNode(ISD::SRL, SL, MVT::i32, Result,
3761                            DAG.getConstant(16, SL, MVT::i32));
3762     } else {
3763       assert(CIdx->getZExtValue() == 0);
3764     }
3765 
3766     if (ResultVT.bitsLT(MVT::i32))
3767       Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result);
3768     return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
3769   }
3770 
3771   SDValue Sixteen = DAG.getConstant(16, SL, MVT::i32);
3772 
3773   // Convert vector index to bit-index.
3774   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, Sixteen);
3775 
3776   SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
3777   SDValue Elt = DAG.getNode(ISD::SRL, SL, MVT::i32, BC, ScaledIdx);
3778 
3779   SDValue Result = Elt;
3780   if (ResultVT.bitsLT(MVT::i32))
3781     Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result);
3782 
3783   return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
3784 }
3785 
3786 bool
3787 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3788   // We can fold offsets for anything that doesn't require a GOT relocation.
3789   return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
3790               GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) &&
3791          !shouldEmitGOTReloc(GA->getGlobal());
3792 }
3793 
3794 static SDValue
3795 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
3796                         const SDLoc &DL, unsigned Offset, EVT PtrVT,
3797                         unsigned GAFlags = SIInstrInfo::MO_NONE) {
3798   // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
3799   // lowered to the following code sequence:
3800   //
3801   // For constant address space:
3802   //   s_getpc_b64 s[0:1]
3803   //   s_add_u32 s0, s0, $symbol
3804   //   s_addc_u32 s1, s1, 0
3805   //
3806   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
3807   //   a fixup or relocation is emitted to replace $symbol with a literal
3808   //   constant, which is a pc-relative offset from the encoding of the $symbol
3809   //   operand to the global variable.
3810   //
3811   // For global address space:
3812   //   s_getpc_b64 s[0:1]
3813   //   s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
3814   //   s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
3815   //
3816   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
3817   //   fixups or relocations are emitted to replace $symbol@*@lo and
3818   //   $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
3819   //   which is a 64-bit pc-relative offset from the encoding of the $symbol
3820   //   operand to the global variable.
3821   //
3822   // What we want here is an offset from the value returned by s_getpc
3823   // (which is the address of the s_add_u32 instruction) to the global
3824   // variable, but since the encoding of $symbol starts 4 bytes after the start
3825   // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
3826   // small. This requires us to add 4 to the global variable offset in order to
3827   // compute the correct address.
3828   SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
3829                                              GAFlags);
3830   SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
3831                                              GAFlags == SIInstrInfo::MO_NONE ?
3832                                              GAFlags : GAFlags + 1);
3833   return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
3834 }
3835 
3836 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
3837                                              SDValue Op,
3838                                              SelectionDAG &DAG) const {
3839   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
3840   const GlobalValue *GV = GSD->getGlobal();
3841 
3842   if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS &&
3843       GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS &&
3844       // FIXME: It isn't correct to rely on the type of the pointer. This should
3845       // be removed when address space 0 is 64-bit.
3846       !GV->getType()->getElementType()->isFunctionTy())
3847     return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
3848 
3849   SDLoc DL(GSD);
3850   EVT PtrVT = Op.getValueType();
3851 
3852   if (shouldEmitFixup(GV))
3853     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
3854   else if (shouldEmitPCReloc(GV))
3855     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
3856                                    SIInstrInfo::MO_REL32);
3857 
3858   SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
3859                                             SIInstrInfo::MO_GOTPCREL32);
3860 
3861   Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
3862   PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
3863   const DataLayout &DataLayout = DAG.getDataLayout();
3864   unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
3865   // FIXME: Use a PseudoSourceValue once those can be assigned an address space.
3866   MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
3867 
3868   return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
3869                      MachineMemOperand::MODereferenceable |
3870                          MachineMemOperand::MOInvariant);
3871 }
3872 
3873 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
3874                                    const SDLoc &DL, SDValue V) const {
3875   // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
3876   // the destination register.
3877   //
3878   // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
3879   // so we will end up with redundant moves to m0.
3880   //
3881   // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
3882 
3883   // A Null SDValue creates a glue result.
3884   SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
3885                                   V, Chain);
3886   return SDValue(M0, 0);
3887 }
3888 
3889 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
3890                                                  SDValue Op,
3891                                                  MVT VT,
3892                                                  unsigned Offset) const {
3893   SDLoc SL(Op);
3894   SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
3895                                            DAG.getEntryNode(), Offset, false);
3896   // The local size values will have the hi 16-bits as zero.
3897   return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
3898                      DAG.getValueType(VT));
3899 }
3900 
3901 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
3902                                         EVT VT) {
3903   DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
3904                                       "non-hsa intrinsic with hsa target",
3905                                       DL.getDebugLoc());
3906   DAG.getContext()->diagnose(BadIntrin);
3907   return DAG.getUNDEF(VT);
3908 }
3909 
3910 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
3911                                          EVT VT) {
3912   DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
3913                                       "intrinsic not supported on subtarget",
3914                                       DL.getDebugLoc());
3915   DAG.getContext()->diagnose(BadIntrin);
3916   return DAG.getUNDEF(VT);
3917 }
3918 
3919 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3920                                                   SelectionDAG &DAG) const {
3921   MachineFunction &MF = DAG.getMachineFunction();
3922   auto MFI = MF.getInfo<SIMachineFunctionInfo>();
3923 
3924   EVT VT = Op.getValueType();
3925   SDLoc DL(Op);
3926   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3927 
3928   // TODO: Should this propagate fast-math-flags?
3929 
3930   switch (IntrinsicID) {
3931   case Intrinsic::amdgcn_implicit_buffer_ptr: {
3932     if (getSubtarget()->isAmdCodeObjectV2(MF))
3933       return emitNonHSAIntrinsicError(DAG, DL, VT);
3934     return getPreloadedValue(DAG, *MFI, VT,
3935                              AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
3936   }
3937   case Intrinsic::amdgcn_dispatch_ptr:
3938   case Intrinsic::amdgcn_queue_ptr: {
3939     if (!Subtarget->isAmdCodeObjectV2(MF)) {
3940       DiagnosticInfoUnsupported BadIntrin(
3941           *MF.getFunction(), "unsupported hsa intrinsic without hsa target",
3942           DL.getDebugLoc());
3943       DAG.getContext()->diagnose(BadIntrin);
3944       return DAG.getUNDEF(VT);
3945     }
3946 
3947     auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
3948       AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
3949     return getPreloadedValue(DAG, *MFI, VT, RegID);
3950   }
3951   case Intrinsic::amdgcn_implicitarg_ptr: {
3952     if (MFI->isEntryFunction())
3953       return getImplicitArgPtr(DAG, DL);
3954     return getPreloadedValue(DAG, *MFI, VT,
3955                              AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
3956   }
3957   case Intrinsic::amdgcn_kernarg_segment_ptr: {
3958     return getPreloadedValue(DAG, *MFI, VT,
3959                              AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
3960   }
3961   case Intrinsic::amdgcn_dispatch_id: {
3962     return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
3963   }
3964   case Intrinsic::amdgcn_rcp:
3965     return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
3966   case Intrinsic::amdgcn_rsq:
3967     return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
3968   case Intrinsic::amdgcn_rsq_legacy:
3969     if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
3970       return emitRemovedIntrinsicError(DAG, DL, VT);
3971 
3972     return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
3973   case Intrinsic::amdgcn_rcp_legacy:
3974     if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
3975       return emitRemovedIntrinsicError(DAG, DL, VT);
3976     return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
3977   case Intrinsic::amdgcn_rsq_clamp: {
3978     if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
3979       return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
3980 
3981     Type *Type = VT.getTypeForEVT(*DAG.getContext());
3982     APFloat Max = APFloat::getLargest(Type->getFltSemantics());
3983     APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
3984 
3985     SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
3986     SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
3987                               DAG.getConstantFP(Max, DL, VT));
3988     return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
3989                        DAG.getConstantFP(Min, DL, VT));
3990   }
3991   case Intrinsic::r600_read_ngroups_x:
3992     if (Subtarget->isAmdHsaOS())
3993       return emitNonHSAIntrinsicError(DAG, DL, VT);
3994 
3995     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
3996                                     SI::KernelInputOffsets::NGROUPS_X, false);
3997   case Intrinsic::r600_read_ngroups_y:
3998     if (Subtarget->isAmdHsaOS())
3999       return emitNonHSAIntrinsicError(DAG, DL, VT);
4000 
4001     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4002                                     SI::KernelInputOffsets::NGROUPS_Y, false);
4003   case Intrinsic::r600_read_ngroups_z:
4004     if (Subtarget->isAmdHsaOS())
4005       return emitNonHSAIntrinsicError(DAG, DL, VT);
4006 
4007     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4008                                     SI::KernelInputOffsets::NGROUPS_Z, false);
4009   case Intrinsic::r600_read_global_size_x:
4010     if (Subtarget->isAmdHsaOS())
4011       return emitNonHSAIntrinsicError(DAG, DL, VT);
4012 
4013     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4014                                     SI::KernelInputOffsets::GLOBAL_SIZE_X, false);
4015   case Intrinsic::r600_read_global_size_y:
4016     if (Subtarget->isAmdHsaOS())
4017       return emitNonHSAIntrinsicError(DAG, DL, VT);
4018 
4019     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4020                                     SI::KernelInputOffsets::GLOBAL_SIZE_Y, false);
4021   case Intrinsic::r600_read_global_size_z:
4022     if (Subtarget->isAmdHsaOS())
4023       return emitNonHSAIntrinsicError(DAG, DL, VT);
4024 
4025     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4026                                     SI::KernelInputOffsets::GLOBAL_SIZE_Z, false);
4027   case Intrinsic::r600_read_local_size_x:
4028     if (Subtarget->isAmdHsaOS())
4029       return emitNonHSAIntrinsicError(DAG, DL, VT);
4030 
4031     return lowerImplicitZextParam(DAG, Op, MVT::i16,
4032                                   SI::KernelInputOffsets::LOCAL_SIZE_X);
4033   case Intrinsic::r600_read_local_size_y:
4034     if (Subtarget->isAmdHsaOS())
4035       return emitNonHSAIntrinsicError(DAG, DL, VT);
4036 
4037     return lowerImplicitZextParam(DAG, Op, MVT::i16,
4038                                   SI::KernelInputOffsets::LOCAL_SIZE_Y);
4039   case Intrinsic::r600_read_local_size_z:
4040     if (Subtarget->isAmdHsaOS())
4041       return emitNonHSAIntrinsicError(DAG, DL, VT);
4042 
4043     return lowerImplicitZextParam(DAG, Op, MVT::i16,
4044                                   SI::KernelInputOffsets::LOCAL_SIZE_Z);
4045   case Intrinsic::amdgcn_workgroup_id_x:
4046   case Intrinsic::r600_read_tgid_x:
4047     return getPreloadedValue(DAG, *MFI, VT,
4048                              AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
4049   case Intrinsic::amdgcn_workgroup_id_y:
4050   case Intrinsic::r600_read_tgid_y:
4051     return getPreloadedValue(DAG, *MFI, VT,
4052                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
4053   case Intrinsic::amdgcn_workgroup_id_z:
4054   case Intrinsic::r600_read_tgid_z:
4055     return getPreloadedValue(DAG, *MFI, VT,
4056                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
4057   case Intrinsic::amdgcn_workitem_id_x: {
4058   case Intrinsic::r600_read_tidig_x:
4059     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4060                           SDLoc(DAG.getEntryNode()),
4061                           MFI->getArgInfo().WorkItemIDX);
4062   }
4063   case Intrinsic::amdgcn_workitem_id_y:
4064   case Intrinsic::r600_read_tidig_y:
4065     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4066                           SDLoc(DAG.getEntryNode()),
4067                           MFI->getArgInfo().WorkItemIDY);
4068   case Intrinsic::amdgcn_workitem_id_z:
4069   case Intrinsic::r600_read_tidig_z:
4070     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4071                           SDLoc(DAG.getEntryNode()),
4072                           MFI->getArgInfo().WorkItemIDZ);
4073   case AMDGPUIntrinsic::SI_load_const: {
4074     SDValue Ops[] = {
4075       Op.getOperand(1),
4076       Op.getOperand(2)
4077     };
4078 
4079     MachineMemOperand *MMO = MF.getMachineMemOperand(
4080         MachinePointerInfo(),
4081         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
4082             MachineMemOperand::MOInvariant,
4083         VT.getStoreSize(), 4);
4084     return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
4085                                    Op->getVTList(), Ops, VT, MMO);
4086   }
4087   case Intrinsic::amdgcn_fdiv_fast:
4088     return lowerFDIV_FAST(Op, DAG);
4089   case Intrinsic::amdgcn_interp_mov: {
4090     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
4091     SDValue Glue = M0.getValue(1);
4092     return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
4093                        Op.getOperand(2), Op.getOperand(3), Glue);
4094   }
4095   case Intrinsic::amdgcn_interp_p1: {
4096     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
4097     SDValue Glue = M0.getValue(1);
4098     return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
4099                        Op.getOperand(2), Op.getOperand(3), Glue);
4100   }
4101   case Intrinsic::amdgcn_interp_p2: {
4102     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
4103     SDValue Glue = SDValue(M0.getNode(), 1);
4104     return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
4105                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
4106                        Glue);
4107   }
4108   case Intrinsic::amdgcn_sin:
4109     return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
4110 
4111   case Intrinsic::amdgcn_cos:
4112     return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
4113 
4114   case Intrinsic::amdgcn_log_clamp: {
4115     if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
4116       return SDValue();
4117 
4118     DiagnosticInfoUnsupported BadIntrin(
4119       *MF.getFunction(), "intrinsic not supported on subtarget",
4120       DL.getDebugLoc());
4121       DAG.getContext()->diagnose(BadIntrin);
4122       return DAG.getUNDEF(VT);
4123   }
4124   case Intrinsic::amdgcn_ldexp:
4125     return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
4126                        Op.getOperand(1), Op.getOperand(2));
4127 
4128   case Intrinsic::amdgcn_fract:
4129     return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
4130 
4131   case Intrinsic::amdgcn_class:
4132     return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
4133                        Op.getOperand(1), Op.getOperand(2));
4134   case Intrinsic::amdgcn_div_fmas:
4135     return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
4136                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
4137                        Op.getOperand(4));
4138 
4139   case Intrinsic::amdgcn_div_fixup:
4140     return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
4141                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4142 
4143   case Intrinsic::amdgcn_trig_preop:
4144     return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
4145                        Op.getOperand(1), Op.getOperand(2));
4146   case Intrinsic::amdgcn_div_scale: {
4147     // 3rd parameter required to be a constant.
4148     const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
4149     if (!Param)
4150       return DAG.getMergeValues({ DAG.getUNDEF(VT), DAG.getUNDEF(MVT::i1) }, DL);
4151 
4152     // Translate to the operands expected by the machine instruction. The
4153     // first parameter must be the same as the first instruction.
4154     SDValue Numerator = Op.getOperand(1);
4155     SDValue Denominator = Op.getOperand(2);
4156 
4157     // Note this order is opposite of the machine instruction's operations,
4158     // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
4159     // intrinsic has the numerator as the first operand to match a normal
4160     // division operation.
4161 
4162     SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
4163 
4164     return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
4165                        Denominator, Numerator);
4166   }
4167   case Intrinsic::amdgcn_icmp: {
4168     const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
4169     if (!CD)
4170       return DAG.getUNDEF(VT);
4171 
4172     int CondCode = CD->getSExtValue();
4173     if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
4174         CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
4175       return DAG.getUNDEF(VT);
4176 
4177     ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
4178     ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
4179     return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
4180                        Op.getOperand(2), DAG.getCondCode(CCOpcode));
4181   }
4182   case Intrinsic::amdgcn_fcmp: {
4183     const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
4184     if (!CD)
4185       return DAG.getUNDEF(VT);
4186 
4187     int CondCode = CD->getSExtValue();
4188     if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
4189         CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE)
4190       return DAG.getUNDEF(VT);
4191 
4192     FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
4193     ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
4194     return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
4195                        Op.getOperand(2), DAG.getCondCode(CCOpcode));
4196   }
4197   case Intrinsic::amdgcn_fmed3:
4198     return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
4199                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4200   case Intrinsic::amdgcn_fmul_legacy:
4201     return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
4202                        Op.getOperand(1), Op.getOperand(2));
4203   case Intrinsic::amdgcn_sffbh:
4204     return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
4205   case Intrinsic::amdgcn_sbfe:
4206     return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
4207                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4208   case Intrinsic::amdgcn_ubfe:
4209     return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
4210                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4211   case Intrinsic::amdgcn_cvt_pkrtz: {
4212     // FIXME: Stop adding cast if v2f16 legal.
4213     EVT VT = Op.getValueType();
4214     SDValue Node = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, DL, MVT::i32,
4215                                Op.getOperand(1), Op.getOperand(2));
4216     return DAG.getNode(ISD::BITCAST, DL, VT, Node);
4217   }
4218   case Intrinsic::amdgcn_wqm: {
4219     SDValue Src = Op.getOperand(1);
4220     return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src),
4221                    0);
4222   }
4223   case Intrinsic::amdgcn_wwm: {
4224     SDValue Src = Op.getOperand(1);
4225     return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src),
4226                    0);
4227   }
4228   default:
4229     return Op;
4230   }
4231 }
4232 
4233 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4234                                                  SelectionDAG &DAG) const {
4235   unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
4236   SDLoc DL(Op);
4237   MachineFunction &MF = DAG.getMachineFunction();
4238 
4239   switch (IntrID) {
4240   case Intrinsic::amdgcn_atomic_inc:
4241   case Intrinsic::amdgcn_atomic_dec: {
4242     MemSDNode *M = cast<MemSDNode>(Op);
4243     unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ?
4244       AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC;
4245     SDValue Ops[] = {
4246       M->getOperand(0), // Chain
4247       M->getOperand(2), // Ptr
4248       M->getOperand(3)  // Value
4249     };
4250 
4251     return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
4252                                    M->getMemoryVT(), M->getMemOperand());
4253   }
4254   case Intrinsic::amdgcn_buffer_load:
4255   case Intrinsic::amdgcn_buffer_load_format: {
4256     SDValue Ops[] = {
4257       Op.getOperand(0), // Chain
4258       Op.getOperand(2), // rsrc
4259       Op.getOperand(3), // vindex
4260       Op.getOperand(4), // offset
4261       Op.getOperand(5), // glc
4262       Op.getOperand(6)  // slc
4263     };
4264     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
4265 
4266     unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
4267         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
4268     EVT VT = Op.getValueType();
4269     EVT IntVT = VT.changeTypeToInteger();
4270 
4271     MachineMemOperand *MMO = MF.getMachineMemOperand(
4272       MachinePointerInfo(MFI->getBufferPSV()),
4273       MachineMemOperand::MOLoad,
4274       VT.getStoreSize(), VT.getStoreSize());
4275 
4276     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, MMO);
4277   }
4278   case Intrinsic::amdgcn_tbuffer_load: {
4279     SDValue Ops[] = {
4280       Op.getOperand(0),  // Chain
4281       Op.getOperand(2),  // rsrc
4282       Op.getOperand(3),  // vindex
4283       Op.getOperand(4),  // voffset
4284       Op.getOperand(5),  // soffset
4285       Op.getOperand(6),  // offset
4286       Op.getOperand(7),  // dfmt
4287       Op.getOperand(8),  // nfmt
4288       Op.getOperand(9),  // glc
4289       Op.getOperand(10)   // slc
4290     };
4291 
4292     EVT VT = Op.getOperand(2).getValueType();
4293 
4294     MachineMemOperand *MMO = MF.getMachineMemOperand(
4295       MachinePointerInfo(),
4296       MachineMemOperand::MOLoad,
4297       VT.getStoreSize(), VT.getStoreSize());
4298     return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
4299                                    Op->getVTList(), Ops, VT, MMO);
4300   }
4301   case Intrinsic::amdgcn_buffer_atomic_swap:
4302   case Intrinsic::amdgcn_buffer_atomic_add:
4303   case Intrinsic::amdgcn_buffer_atomic_sub:
4304   case Intrinsic::amdgcn_buffer_atomic_smin:
4305   case Intrinsic::amdgcn_buffer_atomic_umin:
4306   case Intrinsic::amdgcn_buffer_atomic_smax:
4307   case Intrinsic::amdgcn_buffer_atomic_umax:
4308   case Intrinsic::amdgcn_buffer_atomic_and:
4309   case Intrinsic::amdgcn_buffer_atomic_or:
4310   case Intrinsic::amdgcn_buffer_atomic_xor: {
4311     SDValue Ops[] = {
4312       Op.getOperand(0), // Chain
4313       Op.getOperand(2), // vdata
4314       Op.getOperand(3), // rsrc
4315       Op.getOperand(4), // vindex
4316       Op.getOperand(5), // offset
4317       Op.getOperand(6)  // slc
4318     };
4319     EVT VT = Op.getOperand(3).getValueType();
4320     MachineMemOperand *MMO = MF.getMachineMemOperand(
4321       MachinePointerInfo(),
4322       MachineMemOperand::MOLoad |
4323       MachineMemOperand::MOStore |
4324       MachineMemOperand::MODereferenceable |
4325       MachineMemOperand::MOVolatile,
4326       VT.getStoreSize(), 4);
4327     unsigned Opcode = 0;
4328 
4329     switch (IntrID) {
4330     case Intrinsic::amdgcn_buffer_atomic_swap:
4331       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
4332       break;
4333     case Intrinsic::amdgcn_buffer_atomic_add:
4334       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
4335       break;
4336     case Intrinsic::amdgcn_buffer_atomic_sub:
4337       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
4338       break;
4339     case Intrinsic::amdgcn_buffer_atomic_smin:
4340       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
4341       break;
4342     case Intrinsic::amdgcn_buffer_atomic_umin:
4343       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
4344       break;
4345     case Intrinsic::amdgcn_buffer_atomic_smax:
4346       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
4347       break;
4348     case Intrinsic::amdgcn_buffer_atomic_umax:
4349       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
4350       break;
4351     case Intrinsic::amdgcn_buffer_atomic_and:
4352       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
4353       break;
4354     case Intrinsic::amdgcn_buffer_atomic_or:
4355       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
4356       break;
4357     case Intrinsic::amdgcn_buffer_atomic_xor:
4358       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
4359       break;
4360     default:
4361       llvm_unreachable("unhandled atomic opcode");
4362     }
4363 
4364     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, MMO);
4365   }
4366 
4367   case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
4368     SDValue Ops[] = {
4369       Op.getOperand(0), // Chain
4370       Op.getOperand(2), // src
4371       Op.getOperand(3), // cmp
4372       Op.getOperand(4), // rsrc
4373       Op.getOperand(5), // vindex
4374       Op.getOperand(6), // offset
4375       Op.getOperand(7)  // slc
4376     };
4377     EVT VT = Op.getOperand(4).getValueType();
4378     MachineMemOperand *MMO = MF.getMachineMemOperand(
4379       MachinePointerInfo(),
4380       MachineMemOperand::MOLoad |
4381       MachineMemOperand::MOStore |
4382       MachineMemOperand::MODereferenceable |
4383       MachineMemOperand::MOVolatile,
4384       VT.getStoreSize(), 4);
4385 
4386     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
4387                                    Op->getVTList(), Ops, VT, MMO);
4388   }
4389 
4390   // Basic sample.
4391   case Intrinsic::amdgcn_image_sample:
4392   case Intrinsic::amdgcn_image_sample_cl:
4393   case Intrinsic::amdgcn_image_sample_d:
4394   case Intrinsic::amdgcn_image_sample_d_cl:
4395   case Intrinsic::amdgcn_image_sample_l:
4396   case Intrinsic::amdgcn_image_sample_b:
4397   case Intrinsic::amdgcn_image_sample_b_cl:
4398   case Intrinsic::amdgcn_image_sample_lz:
4399   case Intrinsic::amdgcn_image_sample_cd:
4400   case Intrinsic::amdgcn_image_sample_cd_cl:
4401 
4402   // Sample with comparison.
4403   case Intrinsic::amdgcn_image_sample_c:
4404   case Intrinsic::amdgcn_image_sample_c_cl:
4405   case Intrinsic::amdgcn_image_sample_c_d:
4406   case Intrinsic::amdgcn_image_sample_c_d_cl:
4407   case Intrinsic::amdgcn_image_sample_c_l:
4408   case Intrinsic::amdgcn_image_sample_c_b:
4409   case Intrinsic::amdgcn_image_sample_c_b_cl:
4410   case Intrinsic::amdgcn_image_sample_c_lz:
4411   case Intrinsic::amdgcn_image_sample_c_cd:
4412   case Intrinsic::amdgcn_image_sample_c_cd_cl:
4413 
4414   // Sample with offsets.
4415   case Intrinsic::amdgcn_image_sample_o:
4416   case Intrinsic::amdgcn_image_sample_cl_o:
4417   case Intrinsic::amdgcn_image_sample_d_o:
4418   case Intrinsic::amdgcn_image_sample_d_cl_o:
4419   case Intrinsic::amdgcn_image_sample_l_o:
4420   case Intrinsic::amdgcn_image_sample_b_o:
4421   case Intrinsic::amdgcn_image_sample_b_cl_o:
4422   case Intrinsic::amdgcn_image_sample_lz_o:
4423   case Intrinsic::amdgcn_image_sample_cd_o:
4424   case Intrinsic::amdgcn_image_sample_cd_cl_o:
4425 
4426   // Sample with comparison and offsets.
4427   case Intrinsic::amdgcn_image_sample_c_o:
4428   case Intrinsic::amdgcn_image_sample_c_cl_o:
4429   case Intrinsic::amdgcn_image_sample_c_d_o:
4430   case Intrinsic::amdgcn_image_sample_c_d_cl_o:
4431   case Intrinsic::amdgcn_image_sample_c_l_o:
4432   case Intrinsic::amdgcn_image_sample_c_b_o:
4433   case Intrinsic::amdgcn_image_sample_c_b_cl_o:
4434   case Intrinsic::amdgcn_image_sample_c_lz_o:
4435   case Intrinsic::amdgcn_image_sample_c_cd_o:
4436   case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
4437 
4438   case Intrinsic::amdgcn_image_getlod: {
4439     // Replace dmask with everything disabled with undef.
4440     const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(5));
4441     if (!DMask || DMask->isNullValue()) {
4442       SDValue Undef = DAG.getUNDEF(Op.getValueType());
4443       return DAG.getMergeValues({ Undef, Op.getOperand(0) }, SDLoc(Op));
4444     }
4445 
4446     return SDValue();
4447   }
4448   default:
4449     return SDValue();
4450   }
4451 }
4452 
4453 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4454                                               SelectionDAG &DAG) const {
4455   SDLoc DL(Op);
4456   SDValue Chain = Op.getOperand(0);
4457   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
4458   MachineFunction &MF = DAG.getMachineFunction();
4459 
4460   switch (IntrinsicID) {
4461   case Intrinsic::amdgcn_exp: {
4462     const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
4463     const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
4464     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
4465     const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
4466 
4467     const SDValue Ops[] = {
4468       Chain,
4469       DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
4470       DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8),  // en
4471       Op.getOperand(4), // src0
4472       Op.getOperand(5), // src1
4473       Op.getOperand(6), // src2
4474       Op.getOperand(7), // src3
4475       DAG.getTargetConstant(0, DL, MVT::i1), // compr
4476       DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
4477     };
4478 
4479     unsigned Opc = Done->isNullValue() ?
4480       AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
4481     return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
4482   }
4483   case Intrinsic::amdgcn_exp_compr: {
4484     const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
4485     const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
4486     SDValue Src0 = Op.getOperand(4);
4487     SDValue Src1 = Op.getOperand(5);
4488     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
4489     const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
4490 
4491     SDValue Undef = DAG.getUNDEF(MVT::f32);
4492     const SDValue Ops[] = {
4493       Chain,
4494       DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
4495       DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8),  // en
4496       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
4497       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
4498       Undef, // src2
4499       Undef, // src3
4500       DAG.getTargetConstant(1, DL, MVT::i1), // compr
4501       DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
4502     };
4503 
4504     unsigned Opc = Done->isNullValue() ?
4505       AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
4506     return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
4507   }
4508   case Intrinsic::amdgcn_s_sendmsg:
4509   case Intrinsic::amdgcn_s_sendmsghalt: {
4510     unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ?
4511       AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT;
4512     Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
4513     SDValue Glue = Chain.getValue(1);
4514     return DAG.getNode(NodeOp, DL, MVT::Other, Chain,
4515                        Op.getOperand(2), Glue);
4516   }
4517   case Intrinsic::amdgcn_init_exec: {
4518     return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
4519                        Op.getOperand(2));
4520   }
4521   case Intrinsic::amdgcn_init_exec_from_input: {
4522     return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
4523                        Op.getOperand(2), Op.getOperand(3));
4524   }
4525   case AMDGPUIntrinsic::AMDGPU_kill: {
4526     SDValue Src = Op.getOperand(2);
4527     if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) {
4528       if (!K->isNegative())
4529         return Chain;
4530 
4531       SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32);
4532       return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne);
4533     }
4534 
4535     SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src);
4536     return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast);
4537   }
4538   case Intrinsic::amdgcn_s_barrier: {
4539     if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
4540       const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
4541       unsigned WGSize = ST.getFlatWorkGroupSizes(*MF.getFunction()).second;
4542       if (WGSize <= ST.getWavefrontSize())
4543         return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
4544                                           Op.getOperand(0)), 0);
4545     }
4546     return SDValue();
4547   };
4548   case AMDGPUIntrinsic::SI_tbuffer_store: {
4549 
4550     // Extract vindex and voffset from vaddr as appropriate
4551     const ConstantSDNode *OffEn = cast<ConstantSDNode>(Op.getOperand(10));
4552     const ConstantSDNode *IdxEn = cast<ConstantSDNode>(Op.getOperand(11));
4553     SDValue VAddr = Op.getOperand(5);
4554 
4555     SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
4556 
4557     assert(!(OffEn->isOne() && IdxEn->isOne()) &&
4558            "Legacy intrinsic doesn't support both offset and index - use new version");
4559 
4560     SDValue VIndex = IdxEn->isOne() ? VAddr : Zero;
4561     SDValue VOffset = OffEn->isOne() ? VAddr : Zero;
4562 
4563     // Deal with the vec-3 case
4564     const ConstantSDNode *NumChannels = cast<ConstantSDNode>(Op.getOperand(4));
4565     auto Opcode = NumChannels->getZExtValue() == 3 ?
4566       AMDGPUISD::TBUFFER_STORE_FORMAT_X3 : AMDGPUISD::TBUFFER_STORE_FORMAT;
4567 
4568     SDValue Ops[] = {
4569      Chain,
4570      Op.getOperand(3),  // vdata
4571      Op.getOperand(2),  // rsrc
4572      VIndex,
4573      VOffset,
4574      Op.getOperand(6),  // soffset
4575      Op.getOperand(7),  // inst_offset
4576      Op.getOperand(8),  // dfmt
4577      Op.getOperand(9),  // nfmt
4578      Op.getOperand(12), // glc
4579      Op.getOperand(13), // slc
4580     };
4581 
4582     assert((cast<ConstantSDNode>(Op.getOperand(14)))->getZExtValue() == 0 &&
4583            "Value of tfe other than zero is unsupported");
4584 
4585     EVT VT = Op.getOperand(3).getValueType();
4586     MachineMemOperand *MMO = MF.getMachineMemOperand(
4587       MachinePointerInfo(),
4588       MachineMemOperand::MOStore,
4589       VT.getStoreSize(), 4);
4590     return DAG.getMemIntrinsicNode(Opcode, DL,
4591                                    Op->getVTList(), Ops, VT, MMO);
4592   }
4593 
4594   case Intrinsic::amdgcn_tbuffer_store: {
4595     SDValue Ops[] = {
4596       Chain,
4597       Op.getOperand(2),  // vdata
4598       Op.getOperand(3),  // rsrc
4599       Op.getOperand(4),  // vindex
4600       Op.getOperand(5),  // voffset
4601       Op.getOperand(6),  // soffset
4602       Op.getOperand(7),  // offset
4603       Op.getOperand(8),  // dfmt
4604       Op.getOperand(9),  // nfmt
4605       Op.getOperand(10), // glc
4606       Op.getOperand(11)  // slc
4607     };
4608     EVT VT = Op.getOperand(3).getValueType();
4609     MachineMemOperand *MMO = MF.getMachineMemOperand(
4610       MachinePointerInfo(),
4611       MachineMemOperand::MOStore,
4612       VT.getStoreSize(), 4);
4613     return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL,
4614                                    Op->getVTList(), Ops, VT, MMO);
4615   }
4616 
4617   case Intrinsic::amdgcn_buffer_store:
4618   case Intrinsic::amdgcn_buffer_store_format: {
4619     SDValue Ops[] = {
4620       Chain,
4621       Op.getOperand(2), // vdata
4622       Op.getOperand(3), // rsrc
4623       Op.getOperand(4), // vindex
4624       Op.getOperand(5), // offset
4625       Op.getOperand(6), // glc
4626       Op.getOperand(7)  // slc
4627     };
4628     EVT VT = Op.getOperand(3).getValueType();
4629     MachineMemOperand *MMO = MF.getMachineMemOperand(
4630       MachinePointerInfo(),
4631       MachineMemOperand::MOStore |
4632       MachineMemOperand::MODereferenceable,
4633       VT.getStoreSize(), 4);
4634 
4635     unsigned Opcode = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
4636                         AMDGPUISD::BUFFER_STORE :
4637                         AMDGPUISD::BUFFER_STORE_FORMAT;
4638     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, MMO);
4639   }
4640 
4641   default:
4642     return Op;
4643   }
4644 }
4645 
4646 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
4647   SDLoc DL(Op);
4648   LoadSDNode *Load = cast<LoadSDNode>(Op);
4649   ISD::LoadExtType ExtType = Load->getExtensionType();
4650   EVT MemVT = Load->getMemoryVT();
4651 
4652   if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
4653     if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
4654       return SDValue();
4655 
4656     // FIXME: Copied from PPC
4657     // First, load into 32 bits, then truncate to 1 bit.
4658 
4659     SDValue Chain = Load->getChain();
4660     SDValue BasePtr = Load->getBasePtr();
4661     MachineMemOperand *MMO = Load->getMemOperand();
4662 
4663     EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
4664 
4665     SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
4666                                    BasePtr, RealMemVT, MMO);
4667 
4668     SDValue Ops[] = {
4669       DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
4670       NewLD.getValue(1)
4671     };
4672 
4673     return DAG.getMergeValues(Ops, DL);
4674   }
4675 
4676   if (!MemVT.isVector())
4677     return SDValue();
4678 
4679   assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
4680          "Custom lowering for non-i32 vectors hasn't been implemented.");
4681 
4682   unsigned AS = Load->getAddressSpace();
4683   if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
4684                           AS, Load->getAlignment())) {
4685     SDValue Ops[2];
4686     std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
4687     return DAG.getMergeValues(Ops, DL);
4688   }
4689 
4690   MachineFunction &MF = DAG.getMachineFunction();
4691   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
4692   // If there is a possibilty that flat instruction access scratch memory
4693   // then we need to use the same legalization rules we use for private.
4694   if (AS == AMDGPUASI.FLAT_ADDRESS)
4695     AS = MFI->hasFlatScratchInit() ?
4696          AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
4697 
4698   unsigned NumElements = MemVT.getVectorNumElements();
4699   if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
4700     if (isMemOpUniform(Load))
4701       return SDValue();
4702     // Non-uniform loads will be selected to MUBUF instructions, so they
4703     // have the same legalization requirements as global and private
4704     // loads.
4705     //
4706   }
4707   if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS) {
4708     if (Subtarget->getScalarizeGlobalBehavior() && isMemOpUniform(Load) &&
4709         !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load))
4710       return SDValue();
4711     // Non-uniform loads will be selected to MUBUF instructions, so they
4712     // have the same legalization requirements as global and private
4713     // loads.
4714     //
4715   }
4716   if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS ||
4717       AS == AMDGPUASI.FLAT_ADDRESS) {
4718     if (NumElements > 4)
4719       return SplitVectorLoad(Op, DAG);
4720     // v4 loads are supported for private and global memory.
4721     return SDValue();
4722   }
4723   if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
4724     // Depending on the setting of the private_element_size field in the
4725     // resource descriptor, we can only make private accesses up to a certain
4726     // size.
4727     switch (Subtarget->getMaxPrivateElementSize()) {
4728     case 4:
4729       return scalarizeVectorLoad(Load, DAG);
4730     case 8:
4731       if (NumElements > 2)
4732         return SplitVectorLoad(Op, DAG);
4733       return SDValue();
4734     case 16:
4735       // Same as global/flat
4736       if (NumElements > 4)
4737         return SplitVectorLoad(Op, DAG);
4738       return SDValue();
4739     default:
4740       llvm_unreachable("unsupported private_element_size");
4741     }
4742   } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
4743     if (NumElements > 2)
4744       return SplitVectorLoad(Op, DAG);
4745 
4746     if (NumElements == 2)
4747       return SDValue();
4748 
4749     // If properly aligned, if we split we might be able to use ds_read_b64.
4750     return SplitVectorLoad(Op, DAG);
4751   }
4752   return SDValue();
4753 }
4754 
4755 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
4756   if (Op.getValueType() != MVT::i64)
4757     return SDValue();
4758 
4759   SDLoc DL(Op);
4760   SDValue Cond = Op.getOperand(0);
4761 
4762   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
4763   SDValue One = DAG.getConstant(1, DL, MVT::i32);
4764 
4765   SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
4766   SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
4767 
4768   SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
4769   SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
4770 
4771   SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
4772 
4773   SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
4774   SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
4775 
4776   SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
4777 
4778   SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
4779   return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res);
4780 }
4781 
4782 // Catch division cases where we can use shortcuts with rcp and rsq
4783 // instructions.
4784 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
4785                                               SelectionDAG &DAG) const {
4786   SDLoc SL(Op);
4787   SDValue LHS = Op.getOperand(0);
4788   SDValue RHS = Op.getOperand(1);
4789   EVT VT = Op.getValueType();
4790   const SDNodeFlags Flags = Op->getFlags();
4791   bool Unsafe = DAG.getTarget().Options.UnsafeFPMath ||
4792                 Flags.hasUnsafeAlgebra() || Flags.hasAllowReciprocal();
4793 
4794   if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
4795     return SDValue();
4796 
4797   if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
4798     if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
4799       if (CLHS->isExactlyValue(1.0)) {
4800         // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
4801         // the CI documentation has a worst case error of 1 ulp.
4802         // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
4803         // use it as long as we aren't trying to use denormals.
4804         //
4805         // v_rcp_f16 and v_rsq_f16 DO support denormals.
4806 
4807         // 1.0 / sqrt(x) -> rsq(x)
4808 
4809         // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
4810         // error seems really high at 2^29 ULP.
4811         if (RHS.getOpcode() == ISD::FSQRT)
4812           return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
4813 
4814         // 1.0 / x -> rcp(x)
4815         return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
4816       }
4817 
4818       // Same as for 1.0, but expand the sign out of the constant.
4819       if (CLHS->isExactlyValue(-1.0)) {
4820         // -1.0 / x -> rcp (fneg x)
4821         SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4822         return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
4823       }
4824     }
4825   }
4826 
4827   if (Unsafe) {
4828     // Turn into multiply by the reciprocal.
4829     // x / y -> x * (1.0 / y)
4830     SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
4831     return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
4832   }
4833 
4834   return SDValue();
4835 }
4836 
4837 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
4838                           EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
4839   if (GlueChain->getNumValues() <= 1) {
4840     return DAG.getNode(Opcode, SL, VT, A, B);
4841   }
4842 
4843   assert(GlueChain->getNumValues() == 3);
4844 
4845   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
4846   switch (Opcode) {
4847   default: llvm_unreachable("no chain equivalent for opcode");
4848   case ISD::FMUL:
4849     Opcode = AMDGPUISD::FMUL_W_CHAIN;
4850     break;
4851   }
4852 
4853   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
4854                      GlueChain.getValue(2));
4855 }
4856 
4857 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
4858                            EVT VT, SDValue A, SDValue B, SDValue C,
4859                            SDValue GlueChain) {
4860   if (GlueChain->getNumValues() <= 1) {
4861     return DAG.getNode(Opcode, SL, VT, A, B, C);
4862   }
4863 
4864   assert(GlueChain->getNumValues() == 3);
4865 
4866   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
4867   switch (Opcode) {
4868   default: llvm_unreachable("no chain equivalent for opcode");
4869   case ISD::FMA:
4870     Opcode = AMDGPUISD::FMA_W_CHAIN;
4871     break;
4872   }
4873 
4874   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
4875                      GlueChain.getValue(2));
4876 }
4877 
4878 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
4879   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
4880     return FastLowered;
4881 
4882   SDLoc SL(Op);
4883   SDValue Src0 = Op.getOperand(0);
4884   SDValue Src1 = Op.getOperand(1);
4885 
4886   SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
4887   SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
4888 
4889   SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
4890   SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
4891 
4892   SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
4893   SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
4894 
4895   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
4896 }
4897 
4898 // Faster 2.5 ULP division that does not support denormals.
4899 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
4900   SDLoc SL(Op);
4901   SDValue LHS = Op.getOperand(1);
4902   SDValue RHS = Op.getOperand(2);
4903 
4904   SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
4905 
4906   const APFloat K0Val(BitsToFloat(0x6f800000));
4907   const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
4908 
4909   const APFloat K1Val(BitsToFloat(0x2f800000));
4910   const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
4911 
4912   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
4913 
4914   EVT SetCCVT =
4915     getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
4916 
4917   SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
4918 
4919   SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
4920 
4921   // TODO: Should this propagate fast-math-flags?
4922   r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
4923 
4924   // rcp does not support denormals.
4925   SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
4926 
4927   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
4928 
4929   return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
4930 }
4931 
4932 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
4933   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
4934     return FastLowered;
4935 
4936   SDLoc SL(Op);
4937   SDValue LHS = Op.getOperand(0);
4938   SDValue RHS = Op.getOperand(1);
4939 
4940   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
4941 
4942   SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
4943 
4944   SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
4945                                           RHS, RHS, LHS);
4946   SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
4947                                         LHS, RHS, LHS);
4948 
4949   // Denominator is scaled to not be denormal, so using rcp is ok.
4950   SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
4951                                   DenominatorScaled);
4952   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
4953                                      DenominatorScaled);
4954 
4955   const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
4956                                (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
4957                                (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
4958 
4959   const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
4960 
4961   if (!Subtarget->hasFP32Denormals()) {
4962     SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
4963     const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
4964                                                       SL, MVT::i32);
4965     SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
4966                                        DAG.getEntryNode(),
4967                                        EnableDenormValue, BitField);
4968     SDValue Ops[3] = {
4969       NegDivScale0,
4970       EnableDenorm.getValue(0),
4971       EnableDenorm.getValue(1)
4972     };
4973 
4974     NegDivScale0 = DAG.getMergeValues(Ops, SL);
4975   }
4976 
4977   SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
4978                              ApproxRcp, One, NegDivScale0);
4979 
4980   SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
4981                              ApproxRcp, Fma0);
4982 
4983   SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
4984                            Fma1, Fma1);
4985 
4986   SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
4987                              NumeratorScaled, Mul);
4988 
4989   SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
4990 
4991   SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
4992                              NumeratorScaled, Fma3);
4993 
4994   if (!Subtarget->hasFP32Denormals()) {
4995     const SDValue DisableDenormValue =
4996         DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
4997     SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
4998                                         Fma4.getValue(1),
4999                                         DisableDenormValue,
5000                                         BitField,
5001                                         Fma4.getValue(2));
5002 
5003     SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
5004                                       DisableDenorm, DAG.getRoot());
5005     DAG.setRoot(OutputChain);
5006   }
5007 
5008   SDValue Scale = NumeratorScaled.getValue(1);
5009   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
5010                              Fma4, Fma1, Fma3, Scale);
5011 
5012   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
5013 }
5014 
5015 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
5016   if (DAG.getTarget().Options.UnsafeFPMath)
5017     return lowerFastUnsafeFDIV(Op, DAG);
5018 
5019   SDLoc SL(Op);
5020   SDValue X = Op.getOperand(0);
5021   SDValue Y = Op.getOperand(1);
5022 
5023   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
5024 
5025   SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
5026 
5027   SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
5028 
5029   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
5030 
5031   SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
5032 
5033   SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
5034 
5035   SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
5036 
5037   SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
5038 
5039   SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
5040 
5041   SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
5042   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
5043 
5044   SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
5045                              NegDivScale0, Mul, DivScale1);
5046 
5047   SDValue Scale;
5048 
5049   if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
5050     // Workaround a hardware bug on SI where the condition output from div_scale
5051     // is not usable.
5052 
5053     const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
5054 
5055     // Figure out if the scale to use for div_fmas.
5056     SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
5057     SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
5058     SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
5059     SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
5060 
5061     SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
5062     SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
5063 
5064     SDValue Scale0Hi
5065       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
5066     SDValue Scale1Hi
5067       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
5068 
5069     SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
5070     SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
5071     Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
5072   } else {
5073     Scale = DivScale1.getValue(1);
5074   }
5075 
5076   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
5077                              Fma4, Fma3, Mul, Scale);
5078 
5079   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
5080 }
5081 
5082 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
5083   EVT VT = Op.getValueType();
5084 
5085   if (VT == MVT::f32)
5086     return LowerFDIV32(Op, DAG);
5087 
5088   if (VT == MVT::f64)
5089     return LowerFDIV64(Op, DAG);
5090 
5091   if (VT == MVT::f16)
5092     return LowerFDIV16(Op, DAG);
5093 
5094   llvm_unreachable("Unexpected type for fdiv");
5095 }
5096 
5097 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
5098   SDLoc DL(Op);
5099   StoreSDNode *Store = cast<StoreSDNode>(Op);
5100   EVT VT = Store->getMemoryVT();
5101 
5102   if (VT == MVT::i1) {
5103     return DAG.getTruncStore(Store->getChain(), DL,
5104        DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
5105        Store->getBasePtr(), MVT::i1, Store->getMemOperand());
5106   }
5107 
5108   assert(VT.isVector() &&
5109          Store->getValue().getValueType().getScalarType() == MVT::i32);
5110 
5111   unsigned AS = Store->getAddressSpace();
5112   if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
5113                           AS, Store->getAlignment())) {
5114     return expandUnalignedStore(Store, DAG);
5115   }
5116 
5117   MachineFunction &MF = DAG.getMachineFunction();
5118   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
5119   // If there is a possibilty that flat instruction access scratch memory
5120   // then we need to use the same legalization rules we use for private.
5121   if (AS == AMDGPUASI.FLAT_ADDRESS)
5122     AS = MFI->hasFlatScratchInit() ?
5123          AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
5124 
5125   unsigned NumElements = VT.getVectorNumElements();
5126   if (AS == AMDGPUASI.GLOBAL_ADDRESS ||
5127       AS == AMDGPUASI.FLAT_ADDRESS) {
5128     if (NumElements > 4)
5129       return SplitVectorStore(Op, DAG);
5130     return SDValue();
5131   } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
5132     switch (Subtarget->getMaxPrivateElementSize()) {
5133     case 4:
5134       return scalarizeVectorStore(Store, DAG);
5135     case 8:
5136       if (NumElements > 2)
5137         return SplitVectorStore(Op, DAG);
5138       return SDValue();
5139     case 16:
5140       if (NumElements > 4)
5141         return SplitVectorStore(Op, DAG);
5142       return SDValue();
5143     default:
5144       llvm_unreachable("unsupported private_element_size");
5145     }
5146   } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
5147     if (NumElements > 2)
5148       return SplitVectorStore(Op, DAG);
5149 
5150     if (NumElements == 2)
5151       return Op;
5152 
5153     // If properly aligned, if we split we might be able to use ds_write_b64.
5154     return SplitVectorStore(Op, DAG);
5155   } else {
5156     llvm_unreachable("unhandled address space");
5157   }
5158 }
5159 
5160 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
5161   SDLoc DL(Op);
5162   EVT VT = Op.getValueType();
5163   SDValue Arg = Op.getOperand(0);
5164   // TODO: Should this propagate fast-math-flags?
5165   SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT,
5166                                   DAG.getNode(ISD::FMUL, DL, VT, Arg,
5167                                               DAG.getConstantFP(0.5/M_PI, DL,
5168                                                                 VT)));
5169 
5170   switch (Op.getOpcode()) {
5171   case ISD::FCOS:
5172     return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart);
5173   case ISD::FSIN:
5174     return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart);
5175   default:
5176     llvm_unreachable("Wrong trig opcode");
5177   }
5178 }
5179 
5180 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
5181   AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
5182   assert(AtomicNode->isCompareAndSwap());
5183   unsigned AS = AtomicNode->getAddressSpace();
5184 
5185   // No custom lowering required for local address space
5186   if (!isFlatGlobalAddrSpace(AS, AMDGPUASI))
5187     return Op;
5188 
5189   // Non-local address space requires custom lowering for atomic compare
5190   // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
5191   SDLoc DL(Op);
5192   SDValue ChainIn = Op.getOperand(0);
5193   SDValue Addr = Op.getOperand(1);
5194   SDValue Old = Op.getOperand(2);
5195   SDValue New = Op.getOperand(3);
5196   EVT VT = Op.getValueType();
5197   MVT SimpleVT = VT.getSimpleVT();
5198   MVT VecType = MVT::getVectorVT(SimpleVT, 2);
5199 
5200   SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
5201   SDValue Ops[] = { ChainIn, Addr, NewOld };
5202 
5203   return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
5204                                  Ops, VT, AtomicNode->getMemOperand());
5205 }
5206 
5207 //===----------------------------------------------------------------------===//
5208 // Custom DAG optimizations
5209 //===----------------------------------------------------------------------===//
5210 
5211 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
5212                                                      DAGCombinerInfo &DCI) const {
5213   EVT VT = N->getValueType(0);
5214   EVT ScalarVT = VT.getScalarType();
5215   if (ScalarVT != MVT::f32)
5216     return SDValue();
5217 
5218   SelectionDAG &DAG = DCI.DAG;
5219   SDLoc DL(N);
5220 
5221   SDValue Src = N->getOperand(0);
5222   EVT SrcVT = Src.getValueType();
5223 
5224   // TODO: We could try to match extracting the higher bytes, which would be
5225   // easier if i8 vectors weren't promoted to i32 vectors, particularly after
5226   // types are legalized. v4i8 -> v4f32 is probably the only case to worry
5227   // about in practice.
5228   if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) {
5229     if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
5230       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
5231       DCI.AddToWorklist(Cvt.getNode());
5232       return Cvt;
5233     }
5234   }
5235 
5236   return SDValue();
5237 }
5238 
5239 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
5240 
5241 // This is a variant of
5242 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
5243 //
5244 // The normal DAG combiner will do this, but only if the add has one use since
5245 // that would increase the number of instructions.
5246 //
5247 // This prevents us from seeing a constant offset that can be folded into a
5248 // memory instruction's addressing mode. If we know the resulting add offset of
5249 // a pointer can be folded into an addressing offset, we can replace the pointer
5250 // operand with the add of new constant offset. This eliminates one of the uses,
5251 // and may allow the remaining use to also be simplified.
5252 //
5253 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
5254                                                unsigned AddrSpace,
5255                                                EVT MemVT,
5256                                                DAGCombinerInfo &DCI) const {
5257   SDValue N0 = N->getOperand(0);
5258   SDValue N1 = N->getOperand(1);
5259 
5260   // We only do this to handle cases where it's profitable when there are
5261   // multiple uses of the add, so defer to the standard combine.
5262   if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
5263       N0->hasOneUse())
5264     return SDValue();
5265 
5266   const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
5267   if (!CN1)
5268     return SDValue();
5269 
5270   const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5271   if (!CAdd)
5272     return SDValue();
5273 
5274   // If the resulting offset is too large, we can't fold it into the addressing
5275   // mode offset.
5276   APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
5277   Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
5278 
5279   AddrMode AM;
5280   AM.HasBaseReg = true;
5281   AM.BaseOffs = Offset.getSExtValue();
5282   if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
5283     return SDValue();
5284 
5285   SelectionDAG &DAG = DCI.DAG;
5286   SDLoc SL(N);
5287   EVT VT = N->getValueType(0);
5288 
5289   SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
5290   SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
5291 
5292   SDNodeFlags Flags;
5293   Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
5294                           (N0.getOpcode() == ISD::OR ||
5295                            N0->getFlags().hasNoUnsignedWrap()));
5296 
5297   return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
5298 }
5299 
5300 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
5301                                                   DAGCombinerInfo &DCI) const {
5302   SDValue Ptr = N->getBasePtr();
5303   SelectionDAG &DAG = DCI.DAG;
5304   SDLoc SL(N);
5305 
5306   // TODO: We could also do this for multiplies.
5307   if (Ptr.getOpcode() == ISD::SHL) {
5308     SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(),  N->getAddressSpace(),
5309                                           N->getMemoryVT(), DCI);
5310     if (NewPtr) {
5311       SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
5312 
5313       NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
5314       return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
5315     }
5316   }
5317 
5318   return SDValue();
5319 }
5320 
5321 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
5322   return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
5323          (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
5324          (Opc == ISD::XOR && Val == 0);
5325 }
5326 
5327 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
5328 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
5329 // integer combine opportunities since most 64-bit operations are decomposed
5330 // this way.  TODO: We won't want this for SALU especially if it is an inline
5331 // immediate.
5332 SDValue SITargetLowering::splitBinaryBitConstantOp(
5333   DAGCombinerInfo &DCI,
5334   const SDLoc &SL,
5335   unsigned Opc, SDValue LHS,
5336   const ConstantSDNode *CRHS) const {
5337   uint64_t Val = CRHS->getZExtValue();
5338   uint32_t ValLo = Lo_32(Val);
5339   uint32_t ValHi = Hi_32(Val);
5340   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
5341 
5342     if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
5343          bitOpWithConstantIsReducible(Opc, ValHi)) ||
5344         (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
5345     // If we need to materialize a 64-bit immediate, it will be split up later
5346     // anyway. Avoid creating the harder to understand 64-bit immediate
5347     // materialization.
5348     return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
5349   }
5350 
5351   return SDValue();
5352 }
5353 
5354 // Returns true if argument is a boolean value which is not serialized into
5355 // memory or argument and does not require v_cmdmask_b32 to be deserialized.
5356 static bool isBoolSGPR(SDValue V) {
5357   if (V.getValueType() != MVT::i1)
5358     return false;
5359   switch (V.getOpcode()) {
5360   default: break;
5361   case ISD::SETCC:
5362   case ISD::AND:
5363   case ISD::OR:
5364   case ISD::XOR:
5365   case AMDGPUISD::FP_CLASS:
5366     return true;
5367   }
5368   return false;
5369 }
5370 
5371 SDValue SITargetLowering::performAndCombine(SDNode *N,
5372                                             DAGCombinerInfo &DCI) const {
5373   if (DCI.isBeforeLegalize())
5374     return SDValue();
5375 
5376   SelectionDAG &DAG = DCI.DAG;
5377   EVT VT = N->getValueType(0);
5378   SDValue LHS = N->getOperand(0);
5379   SDValue RHS = N->getOperand(1);
5380 
5381 
5382   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
5383   if (VT == MVT::i64 && CRHS) {
5384     if (SDValue Split
5385         = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
5386       return Split;
5387   }
5388 
5389   if (CRHS && VT == MVT::i32) {
5390     // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
5391     // nb = number of trailing zeroes in mask
5392     // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
5393     // given that we are selecting 8 or 16 bit fields starting at byte boundary.
5394     uint64_t Mask = CRHS->getZExtValue();
5395     unsigned Bits = countPopulation(Mask);
5396     if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
5397         (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
5398       if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
5399         unsigned Shift = CShift->getZExtValue();
5400         unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
5401         unsigned Offset = NB + Shift;
5402         if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
5403           SDLoc SL(N);
5404           SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
5405                                     LHS->getOperand(0),
5406                                     DAG.getConstant(Offset, SL, MVT::i32),
5407                                     DAG.getConstant(Bits, SL, MVT::i32));
5408           EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
5409           SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
5410                                     DAG.getValueType(NarrowVT));
5411           SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
5412                                     DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
5413           return Shl;
5414         }
5415       }
5416     }
5417   }
5418 
5419   // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
5420   // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
5421   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
5422     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5423     ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
5424 
5425     SDValue X = LHS.getOperand(0);
5426     SDValue Y = RHS.getOperand(0);
5427     if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
5428       return SDValue();
5429 
5430     if (LCC == ISD::SETO) {
5431       if (X != LHS.getOperand(1))
5432         return SDValue();
5433 
5434       if (RCC == ISD::SETUNE) {
5435         const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
5436         if (!C1 || !C1->isInfinity() || C1->isNegative())
5437           return SDValue();
5438 
5439         const uint32_t Mask = SIInstrFlags::N_NORMAL |
5440                               SIInstrFlags::N_SUBNORMAL |
5441                               SIInstrFlags::N_ZERO |
5442                               SIInstrFlags::P_ZERO |
5443                               SIInstrFlags::P_SUBNORMAL |
5444                               SIInstrFlags::P_NORMAL;
5445 
5446         static_assert(((~(SIInstrFlags::S_NAN |
5447                           SIInstrFlags::Q_NAN |
5448                           SIInstrFlags::N_INFINITY |
5449                           SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
5450                       "mask not equal");
5451 
5452         SDLoc DL(N);
5453         return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
5454                            X, DAG.getConstant(Mask, DL, MVT::i32));
5455       }
5456     }
5457   }
5458 
5459   if (VT == MVT::i32 &&
5460       (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
5461     // and x, (sext cc from i1) => select cc, x, 0
5462     if (RHS.getOpcode() != ISD::SIGN_EXTEND)
5463       std::swap(LHS, RHS);
5464     if (isBoolSGPR(RHS.getOperand(0)))
5465       return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
5466                            LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
5467   }
5468 
5469   return SDValue();
5470 }
5471 
5472 SDValue SITargetLowering::performOrCombine(SDNode *N,
5473                                            DAGCombinerInfo &DCI) const {
5474   SelectionDAG &DAG = DCI.DAG;
5475   SDValue LHS = N->getOperand(0);
5476   SDValue RHS = N->getOperand(1);
5477 
5478   EVT VT = N->getValueType(0);
5479   if (VT == MVT::i1) {
5480     // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
5481     if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
5482         RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
5483       SDValue Src = LHS.getOperand(0);
5484       if (Src != RHS.getOperand(0))
5485         return SDValue();
5486 
5487       const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
5488       const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
5489       if (!CLHS || !CRHS)
5490         return SDValue();
5491 
5492       // Only 10 bits are used.
5493       static const uint32_t MaxMask = 0x3ff;
5494 
5495       uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
5496       SDLoc DL(N);
5497       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
5498                          Src, DAG.getConstant(NewMask, DL, MVT::i32));
5499     }
5500 
5501     return SDValue();
5502   }
5503 
5504   if (VT != MVT::i64)
5505     return SDValue();
5506 
5507   // TODO: This could be a generic combine with a predicate for extracting the
5508   // high half of an integer being free.
5509 
5510   // (or i64:x, (zero_extend i32:y)) ->
5511   //   i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
5512   if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
5513       RHS.getOpcode() != ISD::ZERO_EXTEND)
5514     std::swap(LHS, RHS);
5515 
5516   if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
5517     SDValue ExtSrc = RHS.getOperand(0);
5518     EVT SrcVT = ExtSrc.getValueType();
5519     if (SrcVT == MVT::i32) {
5520       SDLoc SL(N);
5521       SDValue LowLHS, HiBits;
5522       std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
5523       SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
5524 
5525       DCI.AddToWorklist(LowOr.getNode());
5526       DCI.AddToWorklist(HiBits.getNode());
5527 
5528       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
5529                                 LowOr, HiBits);
5530       return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
5531     }
5532   }
5533 
5534   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
5535   if (CRHS) {
5536     if (SDValue Split
5537           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
5538       return Split;
5539   }
5540 
5541   return SDValue();
5542 }
5543 
5544 SDValue SITargetLowering::performXorCombine(SDNode *N,
5545                                             DAGCombinerInfo &DCI) const {
5546   EVT VT = N->getValueType(0);
5547   if (VT != MVT::i64)
5548     return SDValue();
5549 
5550   SDValue LHS = N->getOperand(0);
5551   SDValue RHS = N->getOperand(1);
5552 
5553   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
5554   if (CRHS) {
5555     if (SDValue Split
5556           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
5557       return Split;
5558   }
5559 
5560   return SDValue();
5561 }
5562 
5563 // Instructions that will be lowered with a final instruction that zeros the
5564 // high result bits.
5565 // XXX - probably only need to list legal operations.
5566 static bool fp16SrcZerosHighBits(unsigned Opc) {
5567   switch (Opc) {
5568   case ISD::FADD:
5569   case ISD::FSUB:
5570   case ISD::FMUL:
5571   case ISD::FDIV:
5572   case ISD::FREM:
5573   case ISD::FMA:
5574   case ISD::FMAD:
5575   case ISD::FCANONICALIZE:
5576   case ISD::FP_ROUND:
5577   case ISD::UINT_TO_FP:
5578   case ISD::SINT_TO_FP:
5579   case ISD::FABS:
5580     // Fabs is lowered to a bit operation, but it's an and which will clear the
5581     // high bits anyway.
5582   case ISD::FSQRT:
5583   case ISD::FSIN:
5584   case ISD::FCOS:
5585   case ISD::FPOWI:
5586   case ISD::FPOW:
5587   case ISD::FLOG:
5588   case ISD::FLOG2:
5589   case ISD::FLOG10:
5590   case ISD::FEXP:
5591   case ISD::FEXP2:
5592   case ISD::FCEIL:
5593   case ISD::FTRUNC:
5594   case ISD::FRINT:
5595   case ISD::FNEARBYINT:
5596   case ISD::FROUND:
5597   case ISD::FFLOOR:
5598   case ISD::FMINNUM:
5599   case ISD::FMAXNUM:
5600   case AMDGPUISD::FRACT:
5601   case AMDGPUISD::CLAMP:
5602   case AMDGPUISD::COS_HW:
5603   case AMDGPUISD::SIN_HW:
5604   case AMDGPUISD::FMIN3:
5605   case AMDGPUISD::FMAX3:
5606   case AMDGPUISD::FMED3:
5607   case AMDGPUISD::FMAD_FTZ:
5608   case AMDGPUISD::RCP:
5609   case AMDGPUISD::RSQ:
5610   case AMDGPUISD::LDEXP:
5611     return true;
5612   default:
5613     // fcopysign, select and others may be lowered to 32-bit bit operations
5614     // which don't zero the high bits.
5615     return false;
5616   }
5617 }
5618 
5619 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
5620                                                    DAGCombinerInfo &DCI) const {
5621   if (!Subtarget->has16BitInsts() ||
5622       DCI.getDAGCombineLevel() < AfterLegalizeDAG)
5623     return SDValue();
5624 
5625   EVT VT = N->getValueType(0);
5626   if (VT != MVT::i32)
5627     return SDValue();
5628 
5629   SDValue Src = N->getOperand(0);
5630   if (Src.getValueType() != MVT::i16)
5631     return SDValue();
5632 
5633   // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
5634   // FIXME: It is not universally true that the high bits are zeroed on gfx9.
5635   if (Src.getOpcode() == ISD::BITCAST) {
5636     SDValue BCSrc = Src.getOperand(0);
5637     if (BCSrc.getValueType() == MVT::f16 &&
5638         fp16SrcZerosHighBits(BCSrc.getOpcode()))
5639       return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
5640   }
5641 
5642   return SDValue();
5643 }
5644 
5645 SDValue SITargetLowering::performClassCombine(SDNode *N,
5646                                               DAGCombinerInfo &DCI) const {
5647   SelectionDAG &DAG = DCI.DAG;
5648   SDValue Mask = N->getOperand(1);
5649 
5650   // fp_class x, 0 -> false
5651   if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
5652     if (CMask->isNullValue())
5653       return DAG.getConstant(0, SDLoc(N), MVT::i1);
5654   }
5655 
5656   if (N->getOperand(0).isUndef())
5657     return DAG.getUNDEF(MVT::i1);
5658 
5659   return SDValue();
5660 }
5661 
5662 static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) {
5663   if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions())
5664     return true;
5665 
5666   return DAG.isKnownNeverNaN(Op);
5667 }
5668 
5669 static bool isCanonicalized(SelectionDAG &DAG, SDValue Op,
5670                             const SISubtarget *ST, unsigned MaxDepth=5) {
5671   // If source is a result of another standard FP operation it is already in
5672   // canonical form.
5673 
5674   switch (Op.getOpcode()) {
5675   default:
5676     break;
5677 
5678   // These will flush denorms if required.
5679   case ISD::FADD:
5680   case ISD::FSUB:
5681   case ISD::FMUL:
5682   case ISD::FSQRT:
5683   case ISD::FCEIL:
5684   case ISD::FFLOOR:
5685   case ISD::FMA:
5686   case ISD::FMAD:
5687 
5688   case ISD::FCANONICALIZE:
5689     return true;
5690 
5691   case ISD::FP_ROUND:
5692     return Op.getValueType().getScalarType() != MVT::f16 ||
5693            ST->hasFP16Denormals();
5694 
5695   case ISD::FP_EXTEND:
5696     return Op.getOperand(0).getValueType().getScalarType() != MVT::f16 ||
5697            ST->hasFP16Denormals();
5698 
5699   case ISD::FP16_TO_FP:
5700   case ISD::FP_TO_FP16:
5701     return ST->hasFP16Denormals();
5702 
5703   // It can/will be lowered or combined as a bit operation.
5704   // Need to check their input recursively to handle.
5705   case ISD::FNEG:
5706   case ISD::FABS:
5707     return (MaxDepth > 0) &&
5708            isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1);
5709 
5710   case ISD::FSIN:
5711   case ISD::FCOS:
5712   case ISD::FSINCOS:
5713     return Op.getValueType().getScalarType() != MVT::f16;
5714 
5715   // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms.
5716   // For such targets need to check their input recursively.
5717   case ISD::FMINNUM:
5718   case ISD::FMAXNUM:
5719   case ISD::FMINNAN:
5720   case ISD::FMAXNAN:
5721 
5722     if (ST->supportsMinMaxDenormModes() &&
5723         DAG.isKnownNeverNaN(Op.getOperand(0)) &&
5724         DAG.isKnownNeverNaN(Op.getOperand(1)))
5725       return true;
5726 
5727     return (MaxDepth > 0) &&
5728            isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1) &&
5729            isCanonicalized(DAG, Op.getOperand(1), ST, MaxDepth - 1);
5730 
5731   case ISD::ConstantFP: {
5732     auto F = cast<ConstantFPSDNode>(Op)->getValueAPF();
5733     return !F.isDenormal() && !(F.isNaN() && F.isSignaling());
5734   }
5735   }
5736   return false;
5737 }
5738 
5739 // Constant fold canonicalize.
5740 SDValue SITargetLowering::performFCanonicalizeCombine(
5741   SDNode *N,
5742   DAGCombinerInfo &DCI) const {
5743   SelectionDAG &DAG = DCI.DAG;
5744   ConstantFPSDNode *CFP = isConstOrConstSplatFP(N->getOperand(0));
5745 
5746   if (!CFP) {
5747     SDValue N0 = N->getOperand(0);
5748     EVT VT = N0.getValueType().getScalarType();
5749     auto ST = getSubtarget();
5750 
5751     if (((VT == MVT::f32 && ST->hasFP32Denormals()) ||
5752          (VT == MVT::f64 && ST->hasFP64Denormals()) ||
5753          (VT == MVT::f16 && ST->hasFP16Denormals())) &&
5754         DAG.isKnownNeverNaN(N0))
5755       return N0;
5756 
5757     bool IsIEEEMode = Subtarget->enableIEEEBit(DAG.getMachineFunction());
5758 
5759     if ((IsIEEEMode || isKnownNeverSNan(DAG, N0)) &&
5760         isCanonicalized(DAG, N0, ST))
5761       return N0;
5762 
5763     return SDValue();
5764   }
5765 
5766   const APFloat &C = CFP->getValueAPF();
5767 
5768   // Flush denormals to 0 if not enabled.
5769   if (C.isDenormal()) {
5770     EVT VT = N->getValueType(0);
5771     EVT SVT = VT.getScalarType();
5772     if (SVT == MVT::f32 && !Subtarget->hasFP32Denormals())
5773       return DAG.getConstantFP(0.0, SDLoc(N), VT);
5774 
5775     if (SVT == MVT::f64 && !Subtarget->hasFP64Denormals())
5776       return DAG.getConstantFP(0.0, SDLoc(N), VT);
5777 
5778     if (SVT == MVT::f16 && !Subtarget->hasFP16Denormals())
5779       return DAG.getConstantFP(0.0, SDLoc(N), VT);
5780   }
5781 
5782   if (C.isNaN()) {
5783     EVT VT = N->getValueType(0);
5784     APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
5785     if (C.isSignaling()) {
5786       // Quiet a signaling NaN.
5787       return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
5788     }
5789 
5790     // Make sure it is the canonical NaN bitpattern.
5791     //
5792     // TODO: Can we use -1 as the canonical NaN value since it's an inline
5793     // immediate?
5794     if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
5795       return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
5796   }
5797 
5798   return N->getOperand(0);
5799 }
5800 
5801 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
5802   switch (Opc) {
5803   case ISD::FMAXNUM:
5804     return AMDGPUISD::FMAX3;
5805   case ISD::SMAX:
5806     return AMDGPUISD::SMAX3;
5807   case ISD::UMAX:
5808     return AMDGPUISD::UMAX3;
5809   case ISD::FMINNUM:
5810     return AMDGPUISD::FMIN3;
5811   case ISD::SMIN:
5812     return AMDGPUISD::SMIN3;
5813   case ISD::UMIN:
5814     return AMDGPUISD::UMIN3;
5815   default:
5816     llvm_unreachable("Not a min/max opcode");
5817   }
5818 }
5819 
5820 SDValue SITargetLowering::performIntMed3ImmCombine(
5821   SelectionDAG &DAG, const SDLoc &SL,
5822   SDValue Op0, SDValue Op1, bool Signed) const {
5823   ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
5824   if (!K1)
5825     return SDValue();
5826 
5827   ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
5828   if (!K0)
5829     return SDValue();
5830 
5831   if (Signed) {
5832     if (K0->getAPIntValue().sge(K1->getAPIntValue()))
5833       return SDValue();
5834   } else {
5835     if (K0->getAPIntValue().uge(K1->getAPIntValue()))
5836       return SDValue();
5837   }
5838 
5839   EVT VT = K0->getValueType(0);
5840   unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
5841   if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
5842     return DAG.getNode(Med3Opc, SL, VT,
5843                        Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
5844   }
5845 
5846   // If there isn't a 16-bit med3 operation, convert to 32-bit.
5847   MVT NVT = MVT::i32;
5848   unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5849 
5850   SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
5851   SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
5852   SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
5853 
5854   SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
5855   return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
5856 }
5857 
5858 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
5859   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
5860     return C;
5861 
5862   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
5863     if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
5864       return C;
5865   }
5866 
5867   return nullptr;
5868 }
5869 
5870 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
5871                                                   const SDLoc &SL,
5872                                                   SDValue Op0,
5873                                                   SDValue Op1) const {
5874   ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
5875   if (!K1)
5876     return SDValue();
5877 
5878   ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
5879   if (!K0)
5880     return SDValue();
5881 
5882   // Ordered >= (although NaN inputs should have folded away by now).
5883   APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
5884   if (Cmp == APFloat::cmpGreaterThan)
5885     return SDValue();
5886 
5887   // TODO: Check IEEE bit enabled?
5888   EVT VT = Op0.getValueType();
5889   if (Subtarget->enableDX10Clamp()) {
5890     // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
5891     // hardware fmed3 behavior converting to a min.
5892     // FIXME: Should this be allowing -0.0?
5893     if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
5894       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
5895   }
5896 
5897   // med3 for f16 is only available on gfx9+, and not available for v2f16.
5898   if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
5899     // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
5900     // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
5901     // then give the other result, which is different from med3 with a NaN
5902     // input.
5903     SDValue Var = Op0.getOperand(0);
5904     if (!isKnownNeverSNan(DAG, Var))
5905       return SDValue();
5906 
5907     return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
5908                        Var, SDValue(K0, 0), SDValue(K1, 0));
5909   }
5910 
5911   return SDValue();
5912 }
5913 
5914 SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
5915                                                DAGCombinerInfo &DCI) const {
5916   SelectionDAG &DAG = DCI.DAG;
5917 
5918   EVT VT = N->getValueType(0);
5919   unsigned Opc = N->getOpcode();
5920   SDValue Op0 = N->getOperand(0);
5921   SDValue Op1 = N->getOperand(1);
5922 
5923   // Only do this if the inner op has one use since this will just increases
5924   // register pressure for no benefit.
5925 
5926 
5927   if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
5928       VT != MVT::f64 &&
5929       ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) {
5930     // max(max(a, b), c) -> max3(a, b, c)
5931     // min(min(a, b), c) -> min3(a, b, c)
5932     if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
5933       SDLoc DL(N);
5934       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
5935                          DL,
5936                          N->getValueType(0),
5937                          Op0.getOperand(0),
5938                          Op0.getOperand(1),
5939                          Op1);
5940     }
5941 
5942     // Try commuted.
5943     // max(a, max(b, c)) -> max3(a, b, c)
5944     // min(a, min(b, c)) -> min3(a, b, c)
5945     if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
5946       SDLoc DL(N);
5947       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
5948                          DL,
5949                          N->getValueType(0),
5950                          Op0,
5951                          Op1.getOperand(0),
5952                          Op1.getOperand(1));
5953     }
5954   }
5955 
5956   // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
5957   if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
5958     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
5959       return Med3;
5960   }
5961 
5962   if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
5963     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
5964       return Med3;
5965   }
5966 
5967   // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
5968   if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
5969        (Opc == AMDGPUISD::FMIN_LEGACY &&
5970         Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
5971       (VT == MVT::f32 || VT == MVT::f64 ||
5972        (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
5973        (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
5974       Op0.hasOneUse()) {
5975     if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
5976       return Res;
5977   }
5978 
5979   return SDValue();
5980 }
5981 
5982 static bool isClampZeroToOne(SDValue A, SDValue B) {
5983   if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
5984     if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
5985       // FIXME: Should this be allowing -0.0?
5986       return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
5987              (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
5988     }
5989   }
5990 
5991   return false;
5992 }
5993 
5994 // FIXME: Should only worry about snans for version with chain.
5995 SDValue SITargetLowering::performFMed3Combine(SDNode *N,
5996                                               DAGCombinerInfo &DCI) const {
5997   EVT VT = N->getValueType(0);
5998   // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
5999   // NaNs. With a NaN input, the order of the operands may change the result.
6000 
6001   SelectionDAG &DAG = DCI.DAG;
6002   SDLoc SL(N);
6003 
6004   SDValue Src0 = N->getOperand(0);
6005   SDValue Src1 = N->getOperand(1);
6006   SDValue Src2 = N->getOperand(2);
6007 
6008   if (isClampZeroToOne(Src0, Src1)) {
6009     // const_a, const_b, x -> clamp is safe in all cases including signaling
6010     // nans.
6011     // FIXME: Should this be allowing -0.0?
6012     return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
6013   }
6014 
6015   // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
6016   // handling no dx10-clamp?
6017   if (Subtarget->enableDX10Clamp()) {
6018     // If NaNs is clamped to 0, we are free to reorder the inputs.
6019 
6020     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
6021       std::swap(Src0, Src1);
6022 
6023     if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
6024       std::swap(Src1, Src2);
6025 
6026     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
6027       std::swap(Src0, Src1);
6028 
6029     if (isClampZeroToOne(Src1, Src2))
6030       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
6031   }
6032 
6033   return SDValue();
6034 }
6035 
6036 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
6037                                                  DAGCombinerInfo &DCI) const {
6038   SDValue Src0 = N->getOperand(0);
6039   SDValue Src1 = N->getOperand(1);
6040   if (Src0.isUndef() && Src1.isUndef())
6041     return DCI.DAG.getUNDEF(N->getValueType(0));
6042   return SDValue();
6043 }
6044 
6045 SDValue SITargetLowering::performExtractVectorEltCombine(
6046   SDNode *N, DAGCombinerInfo &DCI) const {
6047   SDValue Vec = N->getOperand(0);
6048 
6049   SelectionDAG &DAG = DCI.DAG;
6050   if (Vec.getOpcode() == ISD::FNEG && allUsesHaveSourceMods(N)) {
6051     SDLoc SL(N);
6052     EVT EltVT = N->getValueType(0);
6053     SDValue Idx = N->getOperand(1);
6054     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
6055                               Vec.getOperand(0), Idx);
6056     return DAG.getNode(ISD::FNEG, SL, EltVT, Elt);
6057   }
6058 
6059   return SDValue();
6060 }
6061 
6062 static bool convertBuildVectorCastElt(SelectionDAG &DAG,
6063                                       SDValue &Lo, SDValue &Hi) {
6064   if (Hi.getOpcode() == ISD::BITCAST &&
6065       Hi.getOperand(0).getValueType() == MVT::f16 &&
6066       (isa<ConstantSDNode>(Lo) || Lo.isUndef())) {
6067     Lo = DAG.getNode(ISD::BITCAST, SDLoc(Lo), MVT::f16, Lo);
6068     Hi = Hi.getOperand(0);
6069     return true;
6070   }
6071 
6072   return false;
6073 }
6074 
6075 SDValue SITargetLowering::performBuildVectorCombine(
6076   SDNode *N, DAGCombinerInfo &DCI) const {
6077   SDLoc SL(N);
6078 
6079   if (!isTypeLegal(MVT::v2i16))
6080     return SDValue();
6081   SelectionDAG &DAG = DCI.DAG;
6082   EVT VT = N->getValueType(0);
6083 
6084   if (VT == MVT::v2i16) {
6085     SDValue Lo = N->getOperand(0);
6086     SDValue Hi = N->getOperand(1);
6087 
6088     // v2i16 build_vector (const|undef), (bitcast f16:$x)
6089     // -> bitcast (v2f16 build_vector const|undef, $x
6090     if (convertBuildVectorCastElt(DAG, Lo, Hi)) {
6091       SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Lo, Hi  });
6092       return DAG.getNode(ISD::BITCAST, SL, VT, NewVec);
6093     }
6094 
6095     if (convertBuildVectorCastElt(DAG, Hi, Lo)) {
6096       SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Hi, Lo  });
6097       return DAG.getNode(ISD::BITCAST, SL, VT, NewVec);
6098     }
6099   }
6100 
6101   return SDValue();
6102 }
6103 
6104 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
6105                                           const SDNode *N0,
6106                                           const SDNode *N1) const {
6107   EVT VT = N0->getValueType(0);
6108 
6109   // Only do this if we are not trying to support denormals. v_mad_f32 does not
6110   // support denormals ever.
6111   if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
6112       (VT == MVT::f16 && !Subtarget->hasFP16Denormals()))
6113     return ISD::FMAD;
6114 
6115   const TargetOptions &Options = DAG.getTarget().Options;
6116   if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
6117        (N0->getFlags().hasUnsafeAlgebra() &&
6118         N1->getFlags().hasUnsafeAlgebra())) &&
6119       isFMAFasterThanFMulAndFAdd(VT)) {
6120     return ISD::FMA;
6121   }
6122 
6123   return 0;
6124 }
6125 
6126 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
6127                            EVT VT,
6128                            SDValue N0, SDValue N1, SDValue N2,
6129                            bool Signed) {
6130   unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
6131   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
6132   SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
6133   return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
6134 }
6135 
6136 SDValue SITargetLowering::performAddCombine(SDNode *N,
6137                                             DAGCombinerInfo &DCI) const {
6138   SelectionDAG &DAG = DCI.DAG;
6139   EVT VT = N->getValueType(0);
6140   SDLoc SL(N);
6141   SDValue LHS = N->getOperand(0);
6142   SDValue RHS = N->getOperand(1);
6143 
6144   if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
6145       && Subtarget->hasMad64_32() &&
6146       !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
6147       VT.getScalarSizeInBits() <= 64) {
6148     if (LHS.getOpcode() != ISD::MUL)
6149       std::swap(LHS, RHS);
6150 
6151     SDValue MulLHS = LHS.getOperand(0);
6152     SDValue MulRHS = LHS.getOperand(1);
6153     SDValue AddRHS = RHS;
6154 
6155     // TODO: Maybe restrict if SGPR inputs.
6156     if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
6157         numBitsUnsigned(MulRHS, DAG) <= 32) {
6158       MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
6159       MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
6160       AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
6161       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
6162     }
6163 
6164     if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
6165       MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
6166       MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
6167       AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
6168       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
6169     }
6170 
6171     return SDValue();
6172   }
6173 
6174   if (VT != MVT::i32)
6175     return SDValue();
6176 
6177   // add x, zext (setcc) => addcarry x, 0, setcc
6178   // add x, sext (setcc) => subcarry x, 0, setcc
6179   unsigned Opc = LHS.getOpcode();
6180   if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
6181       Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
6182     std::swap(RHS, LHS);
6183 
6184   Opc = RHS.getOpcode();
6185   switch (Opc) {
6186   default: break;
6187   case ISD::ZERO_EXTEND:
6188   case ISD::SIGN_EXTEND:
6189   case ISD::ANY_EXTEND: {
6190     auto Cond = RHS.getOperand(0);
6191     if (!isBoolSGPR(Cond))
6192       break;
6193     SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
6194     SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
6195     Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
6196     return DAG.getNode(Opc, SL, VTList, Args);
6197   }
6198   case ISD::ADDCARRY: {
6199     // add x, (addcarry y, 0, cc) => addcarry x, y, cc
6200     auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
6201     if (!C || C->getZExtValue() != 0) break;
6202     SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
6203     return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
6204   }
6205   }
6206   return SDValue();
6207 }
6208 
6209 SDValue SITargetLowering::performSubCombine(SDNode *N,
6210                                             DAGCombinerInfo &DCI) const {
6211   SelectionDAG &DAG = DCI.DAG;
6212   EVT VT = N->getValueType(0);
6213 
6214   if (VT != MVT::i32)
6215     return SDValue();
6216 
6217   SDLoc SL(N);
6218   SDValue LHS = N->getOperand(0);
6219   SDValue RHS = N->getOperand(1);
6220 
6221   unsigned Opc = LHS.getOpcode();
6222   if (Opc != ISD::SUBCARRY)
6223     std::swap(RHS, LHS);
6224 
6225   if (LHS.getOpcode() == ISD::SUBCARRY) {
6226     // sub (subcarry x, 0, cc), y => subcarry x, y, cc
6227     auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
6228     if (!C || C->getZExtValue() != 0)
6229       return SDValue();
6230     SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
6231     return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
6232   }
6233   return SDValue();
6234 }
6235 
6236 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
6237   DAGCombinerInfo &DCI) const {
6238 
6239   if (N->getValueType(0) != MVT::i32)
6240     return SDValue();
6241 
6242   auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
6243   if (!C || C->getZExtValue() != 0)
6244     return SDValue();
6245 
6246   SelectionDAG &DAG = DCI.DAG;
6247   SDValue LHS = N->getOperand(0);
6248 
6249   // addcarry (add x, y), 0, cc => addcarry x, y, cc
6250   // subcarry (sub x, y), 0, cc => subcarry x, y, cc
6251   unsigned LHSOpc = LHS.getOpcode();
6252   unsigned Opc = N->getOpcode();
6253   if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
6254       (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
6255     SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
6256     return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
6257   }
6258   return SDValue();
6259 }
6260 
6261 SDValue SITargetLowering::performFAddCombine(SDNode *N,
6262                                              DAGCombinerInfo &DCI) const {
6263   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
6264     return SDValue();
6265 
6266   SelectionDAG &DAG = DCI.DAG;
6267   EVT VT = N->getValueType(0);
6268 
6269   SDLoc SL(N);
6270   SDValue LHS = N->getOperand(0);
6271   SDValue RHS = N->getOperand(1);
6272 
6273   // These should really be instruction patterns, but writing patterns with
6274   // source modiifiers is a pain.
6275 
6276   // fadd (fadd (a, a), b) -> mad 2.0, a, b
6277   if (LHS.getOpcode() == ISD::FADD) {
6278     SDValue A = LHS.getOperand(0);
6279     if (A == LHS.getOperand(1)) {
6280       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
6281       if (FusedOp != 0) {
6282         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
6283         return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
6284       }
6285     }
6286   }
6287 
6288   // fadd (b, fadd (a, a)) -> mad 2.0, a, b
6289   if (RHS.getOpcode() == ISD::FADD) {
6290     SDValue A = RHS.getOperand(0);
6291     if (A == RHS.getOperand(1)) {
6292       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
6293       if (FusedOp != 0) {
6294         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
6295         return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
6296       }
6297     }
6298   }
6299 
6300   return SDValue();
6301 }
6302 
6303 SDValue SITargetLowering::performFSubCombine(SDNode *N,
6304                                              DAGCombinerInfo &DCI) const {
6305   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
6306     return SDValue();
6307 
6308   SelectionDAG &DAG = DCI.DAG;
6309   SDLoc SL(N);
6310   EVT VT = N->getValueType(0);
6311   assert(!VT.isVector());
6312 
6313   // Try to get the fneg to fold into the source modifier. This undoes generic
6314   // DAG combines and folds them into the mad.
6315   //
6316   // Only do this if we are not trying to support denormals. v_mad_f32 does
6317   // not support denormals ever.
6318   SDValue LHS = N->getOperand(0);
6319   SDValue RHS = N->getOperand(1);
6320   if (LHS.getOpcode() == ISD::FADD) {
6321     // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
6322     SDValue A = LHS.getOperand(0);
6323     if (A == LHS.getOperand(1)) {
6324       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
6325       if (FusedOp != 0){
6326         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
6327         SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
6328 
6329         return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
6330       }
6331     }
6332   }
6333 
6334   if (RHS.getOpcode() == ISD::FADD) {
6335     // (fsub c, (fadd a, a)) -> mad -2.0, a, c
6336 
6337     SDValue A = RHS.getOperand(0);
6338     if (A == RHS.getOperand(1)) {
6339       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
6340       if (FusedOp != 0){
6341         const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
6342         return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
6343       }
6344     }
6345   }
6346 
6347   return SDValue();
6348 }
6349 
6350 SDValue SITargetLowering::performSetCCCombine(SDNode *N,
6351                                               DAGCombinerInfo &DCI) const {
6352   SelectionDAG &DAG = DCI.DAG;
6353   SDLoc SL(N);
6354 
6355   SDValue LHS = N->getOperand(0);
6356   SDValue RHS = N->getOperand(1);
6357   EVT VT = LHS.getValueType();
6358   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
6359 
6360   auto CRHS = dyn_cast<ConstantSDNode>(RHS);
6361   if (!CRHS) {
6362     CRHS = dyn_cast<ConstantSDNode>(LHS);
6363     if (CRHS) {
6364       std::swap(LHS, RHS);
6365       CC = getSetCCSwappedOperands(CC);
6366     }
6367   }
6368 
6369   if (CRHS && VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
6370       isBoolSGPR(LHS.getOperand(0))) {
6371     // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
6372     // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
6373     // setcc (sext from i1 cc),  0, eq|sge|ule) => not cc => xor cc, -1
6374     // setcc (sext from i1 cc),  0, ne|ugt|slt) => cc
6375     if ((CRHS->isAllOnesValue() &&
6376          (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
6377         (CRHS->isNullValue() &&
6378          (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
6379       return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
6380                          DAG.getConstant(-1, SL, MVT::i1));
6381     if ((CRHS->isAllOnesValue() &&
6382          (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
6383         (CRHS->isNullValue() &&
6384          (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
6385       return LHS.getOperand(0);
6386   }
6387 
6388   if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
6389                                            VT != MVT::f16))
6390     return SDValue();
6391 
6392   // Match isinf pattern
6393   // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
6394   if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) {
6395     const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
6396     if (!CRHS)
6397       return SDValue();
6398 
6399     const APFloat &APF = CRHS->getValueAPF();
6400     if (APF.isInfinity() && !APF.isNegative()) {
6401       unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY;
6402       return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
6403                          DAG.getConstant(Mask, SL, MVT::i32));
6404     }
6405   }
6406 
6407   return SDValue();
6408 }
6409 
6410 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
6411                                                      DAGCombinerInfo &DCI) const {
6412   SelectionDAG &DAG = DCI.DAG;
6413   SDLoc SL(N);
6414   unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
6415 
6416   SDValue Src = N->getOperand(0);
6417   SDValue Srl = N->getOperand(0);
6418   if (Srl.getOpcode() == ISD::ZERO_EXTEND)
6419     Srl = Srl.getOperand(0);
6420 
6421   // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
6422   if (Srl.getOpcode() == ISD::SRL) {
6423     // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
6424     // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
6425     // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
6426 
6427     if (const ConstantSDNode *C =
6428         dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
6429       Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
6430                                EVT(MVT::i32));
6431 
6432       unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
6433       if (SrcOffset < 32 && SrcOffset % 8 == 0) {
6434         return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
6435                            MVT::f32, Srl);
6436       }
6437     }
6438   }
6439 
6440   APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
6441 
6442   KnownBits Known;
6443   TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
6444                                         !DCI.isBeforeLegalizeOps());
6445   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6446   if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) ||
6447       TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
6448     DCI.CommitTargetLoweringOpt(TLO);
6449   }
6450 
6451   return SDValue();
6452 }
6453 
6454 SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
6455                                             DAGCombinerInfo &DCI) const {
6456   switch (N->getOpcode()) {
6457   default:
6458     return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
6459   case ISD::ADD:
6460     return performAddCombine(N, DCI);
6461   case ISD::SUB:
6462     return performSubCombine(N, DCI);
6463   case ISD::ADDCARRY:
6464   case ISD::SUBCARRY:
6465     return performAddCarrySubCarryCombine(N, DCI);
6466   case ISD::FADD:
6467     return performFAddCombine(N, DCI);
6468   case ISD::FSUB:
6469     return performFSubCombine(N, DCI);
6470   case ISD::SETCC:
6471     return performSetCCCombine(N, DCI);
6472   case ISD::FMAXNUM:
6473   case ISD::FMINNUM:
6474   case ISD::SMAX:
6475   case ISD::SMIN:
6476   case ISD::UMAX:
6477   case ISD::UMIN:
6478   case AMDGPUISD::FMIN_LEGACY:
6479   case AMDGPUISD::FMAX_LEGACY: {
6480     if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG &&
6481         getTargetMachine().getOptLevel() > CodeGenOpt::None)
6482       return performMinMaxCombine(N, DCI);
6483     break;
6484   }
6485   case ISD::LOAD:
6486   case ISD::STORE:
6487   case ISD::ATOMIC_LOAD:
6488   case ISD::ATOMIC_STORE:
6489   case ISD::ATOMIC_CMP_SWAP:
6490   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
6491   case ISD::ATOMIC_SWAP:
6492   case ISD::ATOMIC_LOAD_ADD:
6493   case ISD::ATOMIC_LOAD_SUB:
6494   case ISD::ATOMIC_LOAD_AND:
6495   case ISD::ATOMIC_LOAD_OR:
6496   case ISD::ATOMIC_LOAD_XOR:
6497   case ISD::ATOMIC_LOAD_NAND:
6498   case ISD::ATOMIC_LOAD_MIN:
6499   case ISD::ATOMIC_LOAD_MAX:
6500   case ISD::ATOMIC_LOAD_UMIN:
6501   case ISD::ATOMIC_LOAD_UMAX:
6502   case AMDGPUISD::ATOMIC_INC:
6503   case AMDGPUISD::ATOMIC_DEC: // TODO: Target mem intrinsics.
6504     if (DCI.isBeforeLegalize())
6505       break;
6506     return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
6507   case ISD::AND:
6508     return performAndCombine(N, DCI);
6509   case ISD::OR:
6510     return performOrCombine(N, DCI);
6511   case ISD::XOR:
6512     return performXorCombine(N, DCI);
6513   case ISD::ZERO_EXTEND:
6514     return performZeroExtendCombine(N, DCI);
6515   case AMDGPUISD::FP_CLASS:
6516     return performClassCombine(N, DCI);
6517   case ISD::FCANONICALIZE:
6518     return performFCanonicalizeCombine(N, DCI);
6519   case AMDGPUISD::FRACT:
6520   case AMDGPUISD::RCP:
6521   case AMDGPUISD::RSQ:
6522   case AMDGPUISD::RCP_LEGACY:
6523   case AMDGPUISD::RSQ_LEGACY:
6524   case AMDGPUISD::RSQ_CLAMP:
6525   case AMDGPUISD::LDEXP: {
6526     SDValue Src = N->getOperand(0);
6527     if (Src.isUndef())
6528       return Src;
6529     break;
6530   }
6531   case ISD::SINT_TO_FP:
6532   case ISD::UINT_TO_FP:
6533     return performUCharToFloatCombine(N, DCI);
6534   case AMDGPUISD::CVT_F32_UBYTE0:
6535   case AMDGPUISD::CVT_F32_UBYTE1:
6536   case AMDGPUISD::CVT_F32_UBYTE2:
6537   case AMDGPUISD::CVT_F32_UBYTE3:
6538     return performCvtF32UByteNCombine(N, DCI);
6539   case AMDGPUISD::FMED3:
6540     return performFMed3Combine(N, DCI);
6541   case AMDGPUISD::CVT_PKRTZ_F16_F32:
6542     return performCvtPkRTZCombine(N, DCI);
6543   case ISD::SCALAR_TO_VECTOR: {
6544     SelectionDAG &DAG = DCI.DAG;
6545     EVT VT = N->getValueType(0);
6546 
6547     // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
6548     if (VT == MVT::v2i16 || VT == MVT::v2f16) {
6549       SDLoc SL(N);
6550       SDValue Src = N->getOperand(0);
6551       EVT EltVT = Src.getValueType();
6552       if (EltVT == MVT::f16)
6553         Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
6554 
6555       SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
6556       return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
6557     }
6558 
6559     break;
6560   }
6561   case ISD::EXTRACT_VECTOR_ELT:
6562     return performExtractVectorEltCombine(N, DCI);
6563   case ISD::BUILD_VECTOR:
6564     return performBuildVectorCombine(N, DCI);
6565   }
6566   return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
6567 }
6568 
6569 /// \brief Helper function for adjustWritemask
6570 static unsigned SubIdx2Lane(unsigned Idx) {
6571   switch (Idx) {
6572   default: return 0;
6573   case AMDGPU::sub0: return 0;
6574   case AMDGPU::sub1: return 1;
6575   case AMDGPU::sub2: return 2;
6576   case AMDGPU::sub3: return 3;
6577   }
6578 }
6579 
6580 /// \brief Adjust the writemask of MIMG instructions
6581 void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
6582                                        SelectionDAG &DAG) const {
6583   SDNode *Users[4] = { };
6584   unsigned Lane = 0;
6585   unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3;
6586   unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
6587   unsigned NewDmask = 0;
6588 
6589   // Try to figure out the used register components
6590   for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
6591        I != E; ++I) {
6592 
6593     // Don't look at users of the chain.
6594     if (I.getUse().getResNo() != 0)
6595       continue;
6596 
6597     // Abort if we can't understand the usage
6598     if (!I->isMachineOpcode() ||
6599         I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
6600       return;
6601 
6602     // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
6603     // Note that subregs are packed, i.e. Lane==0 is the first bit set
6604     // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
6605     // set, etc.
6606     Lane = SubIdx2Lane(I->getConstantOperandVal(1));
6607 
6608     // Set which texture component corresponds to the lane.
6609     unsigned Comp;
6610     for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) {
6611       assert(Dmask);
6612       Comp = countTrailingZeros(Dmask);
6613       Dmask &= ~(1 << Comp);
6614     }
6615 
6616     // Abort if we have more than one user per component
6617     if (Users[Lane])
6618       return;
6619 
6620     Users[Lane] = *I;
6621     NewDmask |= 1 << Comp;
6622   }
6623 
6624   // Abort if there's no change
6625   if (NewDmask == OldDmask)
6626     return;
6627 
6628   // Adjust the writemask in the node
6629   std::vector<SDValue> Ops;
6630   Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
6631   Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
6632   Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
6633   Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops);
6634 
6635   // If we only got one lane, replace it with a copy
6636   // (if NewDmask has only one bit set...)
6637   if (NewDmask && (NewDmask & (NewDmask-1)) == 0) {
6638     SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(),
6639                                        MVT::i32);
6640     SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
6641                                       SDLoc(), Users[Lane]->getValueType(0),
6642                                       SDValue(Node, 0), RC);
6643     DAG.ReplaceAllUsesWith(Users[Lane], Copy);
6644     return;
6645   }
6646 
6647   // Update the users of the node with the new indices
6648   for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) {
6649     SDNode *User = Users[i];
6650     if (!User)
6651       continue;
6652 
6653     SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
6654     DAG.UpdateNodeOperands(User, User->getOperand(0), Op);
6655 
6656     switch (Idx) {
6657     default: break;
6658     case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
6659     case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
6660     case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
6661     }
6662   }
6663 }
6664 
6665 static bool isFrameIndexOp(SDValue Op) {
6666   if (Op.getOpcode() == ISD::AssertZext)
6667     Op = Op.getOperand(0);
6668 
6669   return isa<FrameIndexSDNode>(Op);
6670 }
6671 
6672 /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG)
6673 /// with frame index operands.
6674 /// LLVM assumes that inputs are to these instructions are registers.
6675 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
6676                                                         SelectionDAG &DAG) const {
6677   if (Node->getOpcode() == ISD::CopyToReg) {
6678     RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
6679     SDValue SrcVal = Node->getOperand(2);
6680 
6681     // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
6682     // to try understanding copies to physical registers.
6683     if (SrcVal.getValueType() == MVT::i1 &&
6684         TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
6685       SDLoc SL(Node);
6686       MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
6687       SDValue VReg = DAG.getRegister(
6688         MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
6689 
6690       SDNode *Glued = Node->getGluedNode();
6691       SDValue ToVReg
6692         = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
6693                          SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
6694       SDValue ToResultReg
6695         = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
6696                            VReg, ToVReg.getValue(1));
6697       DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
6698       DAG.RemoveDeadNode(Node);
6699       return ToResultReg.getNode();
6700     }
6701   }
6702 
6703   SmallVector<SDValue, 8> Ops;
6704   for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
6705     if (!isFrameIndexOp(Node->getOperand(i))) {
6706       Ops.push_back(Node->getOperand(i));
6707       continue;
6708     }
6709 
6710     SDLoc DL(Node);
6711     Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
6712                                      Node->getOperand(i).getValueType(),
6713                                      Node->getOperand(i)), 0));
6714   }
6715 
6716   return DAG.UpdateNodeOperands(Node, Ops);
6717 }
6718 
6719 /// \brief Fold the instructions after selecting them.
6720 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
6721                                           SelectionDAG &DAG) const {
6722   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
6723   unsigned Opcode = Node->getMachineOpcode();
6724 
6725   if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
6726       !TII->isGather4(Opcode))
6727     adjustWritemask(Node, DAG);
6728 
6729   if (Opcode == AMDGPU::INSERT_SUBREG ||
6730       Opcode == AMDGPU::REG_SEQUENCE) {
6731     legalizeTargetIndependentNode(Node, DAG);
6732     return Node;
6733   }
6734 
6735   switch (Opcode) {
6736   case AMDGPU::V_DIV_SCALE_F32:
6737   case AMDGPU::V_DIV_SCALE_F64: {
6738     // Satisfy the operand register constraint when one of the inputs is
6739     // undefined. Ordinarily each undef value will have its own implicit_def of
6740     // a vreg, so force these to use a single register.
6741     SDValue Src0 = Node->getOperand(0);
6742     SDValue Src1 = Node->getOperand(1);
6743     SDValue Src2 = Node->getOperand(2);
6744 
6745     if ((Src0.isMachineOpcode() &&
6746          Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
6747         (Src0 == Src1 || Src0 == Src2))
6748       break;
6749 
6750     MVT VT = Src0.getValueType().getSimpleVT();
6751     const TargetRegisterClass *RC = getRegClassFor(VT);
6752 
6753     MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
6754     SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
6755 
6756     SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
6757                                       UndefReg, Src0, SDValue());
6758 
6759     // src0 must be the same register as src1 or src2, even if the value is
6760     // undefined, so make sure we don't violate this constraint.
6761     if (Src0.isMachineOpcode() &&
6762         Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
6763       if (Src1.isMachineOpcode() &&
6764           Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
6765         Src0 = Src1;
6766       else if (Src2.isMachineOpcode() &&
6767                Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
6768         Src0 = Src2;
6769       else {
6770         assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
6771         Src0 = UndefReg;
6772         Src1 = UndefReg;
6773       }
6774     } else
6775       break;
6776 
6777     SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
6778     for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
6779       Ops.push_back(Node->getOperand(I));
6780 
6781     Ops.push_back(ImpDef.getValue(1));
6782     return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
6783   }
6784   default:
6785     break;
6786   }
6787 
6788   return Node;
6789 }
6790 
6791 /// \brief Assign the register class depending on the number of
6792 /// bits set in the writemask
6793 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
6794                                                      SDNode *Node) const {
6795   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
6796 
6797   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
6798 
6799   if (TII->isVOP3(MI.getOpcode())) {
6800     // Make sure constant bus requirements are respected.
6801     TII->legalizeOperandsVOP3(MRI, MI);
6802     return;
6803   }
6804 
6805   if (TII->isMIMG(MI)) {
6806     unsigned VReg = MI.getOperand(0).getReg();
6807     const TargetRegisterClass *RC = MRI.getRegClass(VReg);
6808     // TODO: Need mapping tables to handle other cases (register classes).
6809     if (RC != &AMDGPU::VReg_128RegClass)
6810       return;
6811 
6812     unsigned DmaskIdx = MI.getNumOperands() == 12 ? 3 : 4;
6813     unsigned Writemask = MI.getOperand(DmaskIdx).getImm();
6814     unsigned BitsSet = 0;
6815     for (unsigned i = 0; i < 4; ++i)
6816       BitsSet += Writemask & (1 << i) ? 1 : 0;
6817     switch (BitsSet) {
6818     default: return;
6819     case 1:  RC = &AMDGPU::VGPR_32RegClass; break;
6820     case 2:  RC = &AMDGPU::VReg_64RegClass; break;
6821     case 3:  RC = &AMDGPU::VReg_96RegClass; break;
6822     }
6823 
6824     unsigned NewOpcode = TII->getMaskedMIMGOp(MI.getOpcode(), BitsSet);
6825     MI.setDesc(TII->get(NewOpcode));
6826     MRI.setRegClass(VReg, RC);
6827     return;
6828   }
6829 
6830   // Replace unused atomics with the no return version.
6831   int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
6832   if (NoRetAtomicOp != -1) {
6833     if (!Node->hasAnyUseOfValue(0)) {
6834       MI.setDesc(TII->get(NoRetAtomicOp));
6835       MI.RemoveOperand(0);
6836       return;
6837     }
6838 
6839     // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
6840     // instruction, because the return type of these instructions is a vec2 of
6841     // the memory type, so it can be tied to the input operand.
6842     // This means these instructions always have a use, so we need to add a
6843     // special case to check if the atomic has only one extract_subreg use,
6844     // which itself has no uses.
6845     if ((Node->hasNUsesOfValue(1, 0) &&
6846          Node->use_begin()->isMachineOpcode() &&
6847          Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
6848          !Node->use_begin()->hasAnyUseOfValue(0))) {
6849       unsigned Def = MI.getOperand(0).getReg();
6850 
6851       // Change this into a noret atomic.
6852       MI.setDesc(TII->get(NoRetAtomicOp));
6853       MI.RemoveOperand(0);
6854 
6855       // If we only remove the def operand from the atomic instruction, the
6856       // extract_subreg will be left with a use of a vreg without a def.
6857       // So we need to insert an implicit_def to avoid machine verifier
6858       // errors.
6859       BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
6860               TII->get(AMDGPU::IMPLICIT_DEF), Def);
6861     }
6862     return;
6863   }
6864 }
6865 
6866 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
6867                               uint64_t Val) {
6868   SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
6869   return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
6870 }
6871 
6872 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
6873                                                 const SDLoc &DL,
6874                                                 SDValue Ptr) const {
6875   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
6876 
6877   // Build the half of the subregister with the constants before building the
6878   // full 128-bit register. If we are building multiple resource descriptors,
6879   // this will allow CSEing of the 2-component register.
6880   const SDValue Ops0[] = {
6881     DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
6882     buildSMovImm32(DAG, DL, 0),
6883     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
6884     buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
6885     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
6886   };
6887 
6888   SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
6889                                                 MVT::v2i32, Ops0), 0);
6890 
6891   // Combine the constants and the pointer.
6892   const SDValue Ops1[] = {
6893     DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
6894     Ptr,
6895     DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
6896     SubRegHi,
6897     DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
6898   };
6899 
6900   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
6901 }
6902 
6903 /// \brief Return a resource descriptor with the 'Add TID' bit enabled
6904 ///        The TID (Thread ID) is multiplied by the stride value (bits [61:48]
6905 ///        of the resource descriptor) to create an offset, which is added to
6906 ///        the resource pointer.
6907 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
6908                                            SDValue Ptr, uint32_t RsrcDword1,
6909                                            uint64_t RsrcDword2And3) const {
6910   SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
6911   SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
6912   if (RsrcDword1) {
6913     PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
6914                                      DAG.getConstant(RsrcDword1, DL, MVT::i32)),
6915                     0);
6916   }
6917 
6918   SDValue DataLo = buildSMovImm32(DAG, DL,
6919                                   RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
6920   SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
6921 
6922   const SDValue Ops[] = {
6923     DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
6924     PtrLo,
6925     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
6926     PtrHi,
6927     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
6928     DataLo,
6929     DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
6930     DataHi,
6931     DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
6932   };
6933 
6934   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
6935 }
6936 
6937 //===----------------------------------------------------------------------===//
6938 //                         SI Inline Assembly Support
6939 //===----------------------------------------------------------------------===//
6940 
6941 std::pair<unsigned, const TargetRegisterClass *>
6942 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
6943                                                StringRef Constraint,
6944                                                MVT VT) const {
6945   if (!isTypeLegal(VT))
6946     return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
6947 
6948   if (Constraint.size() == 1) {
6949     switch (Constraint[0]) {
6950     case 's':
6951     case 'r':
6952       switch (VT.getSizeInBits()) {
6953       default:
6954         return std::make_pair(0U, nullptr);
6955       case 32:
6956       case 16:
6957         return std::make_pair(0U, &AMDGPU::SReg_32_XM0RegClass);
6958       case 64:
6959         return std::make_pair(0U, &AMDGPU::SGPR_64RegClass);
6960       case 128:
6961         return std::make_pair(0U, &AMDGPU::SReg_128RegClass);
6962       case 256:
6963         return std::make_pair(0U, &AMDGPU::SReg_256RegClass);
6964       case 512:
6965         return std::make_pair(0U, &AMDGPU::SReg_512RegClass);
6966       }
6967 
6968     case 'v':
6969       switch (VT.getSizeInBits()) {
6970       default:
6971         return std::make_pair(0U, nullptr);
6972       case 32:
6973       case 16:
6974         return std::make_pair(0U, &AMDGPU::VGPR_32RegClass);
6975       case 64:
6976         return std::make_pair(0U, &AMDGPU::VReg_64RegClass);
6977       case 96:
6978         return std::make_pair(0U, &AMDGPU::VReg_96RegClass);
6979       case 128:
6980         return std::make_pair(0U, &AMDGPU::VReg_128RegClass);
6981       case 256:
6982         return std::make_pair(0U, &AMDGPU::VReg_256RegClass);
6983       case 512:
6984         return std::make_pair(0U, &AMDGPU::VReg_512RegClass);
6985       }
6986     }
6987   }
6988 
6989   if (Constraint.size() > 1) {
6990     const TargetRegisterClass *RC = nullptr;
6991     if (Constraint[1] == 'v') {
6992       RC = &AMDGPU::VGPR_32RegClass;
6993     } else if (Constraint[1] == 's') {
6994       RC = &AMDGPU::SGPR_32RegClass;
6995     }
6996 
6997     if (RC) {
6998       uint32_t Idx;
6999       bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
7000       if (!Failed && Idx < RC->getNumRegs())
7001         return std::make_pair(RC->getRegister(Idx), RC);
7002     }
7003   }
7004   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
7005 }
7006 
7007 SITargetLowering::ConstraintType
7008 SITargetLowering::getConstraintType(StringRef Constraint) const {
7009   if (Constraint.size() == 1) {
7010     switch (Constraint[0]) {
7011     default: break;
7012     case 's':
7013     case 'v':
7014       return C_RegisterClass;
7015     }
7016   }
7017   return TargetLowering::getConstraintType(Constraint);
7018 }
7019 
7020 // Figure out which registers should be reserved for stack access. Only after
7021 // the function is legalized do we know all of the non-spill stack objects or if
7022 // calls are present.
7023 void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
7024   MachineRegisterInfo &MRI = MF.getRegInfo();
7025   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
7026   const MachineFrameInfo &MFI = MF.getFrameInfo();
7027   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
7028   const SIRegisterInfo *TRI = ST.getRegisterInfo();
7029 
7030   if (Info->isEntryFunction()) {
7031     // Callable functions have fixed registers used for stack access.
7032     reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
7033   }
7034 
7035   // We have to assume the SP is needed in case there are calls in the function
7036   // during lowering. Calls are only detected after the function is
7037   // lowered. We're about to reserve registers, so don't bother using it if we
7038   // aren't really going to use it.
7039   bool NeedSP = !Info->isEntryFunction() ||
7040     MFI.hasVarSizedObjects() ||
7041     MFI.hasCalls();
7042 
7043   if (NeedSP) {
7044     unsigned ReservedStackPtrOffsetReg = TRI->reservedStackPtrOffsetReg(MF);
7045     Info->setStackPtrOffsetReg(ReservedStackPtrOffsetReg);
7046 
7047     assert(Info->getStackPtrOffsetReg() != Info->getFrameOffsetReg());
7048     assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
7049                                Info->getStackPtrOffsetReg()));
7050     MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
7051   }
7052 
7053   MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
7054   MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
7055   MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
7056                      Info->getScratchWaveOffsetReg());
7057 
7058   TargetLoweringBase::finalizeLowering(MF);
7059 }
7060 
7061 void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
7062                                                      KnownBits &Known,
7063                                                      const APInt &DemandedElts,
7064                                                      const SelectionDAG &DAG,
7065                                                      unsigned Depth) const {
7066   TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
7067                                                 DAG, Depth);
7068 
7069   if (getSubtarget()->enableHugePrivateBuffer())
7070     return;
7071 
7072   // Technically it may be possible to have a dispatch with a single workitem
7073   // that uses the full private memory size, but that's not really useful. We
7074   // can't use vaddr in MUBUF instructions if we don't know the address
7075   // calculation won't overflow, so assume the sign bit is never set.
7076   Known.Zero.setHighBits(AssumeFrameIndexHighZeroBits);
7077 }
7078