1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Custom DAG lowering for SI
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #if defined(_MSC_VER) || defined(__MINGW32__)
15 // Provide M_PI.
16 #define _USE_MATH_DEFINES
17 #endif
18 
19 #include "SIISelLowering.h"
20 #include "AMDGPU.h"
21 #include "AMDGPUSubtarget.h"
22 #include "AMDGPUTargetMachine.h"
23 #include "SIDefines.h"
24 #include "SIInstrInfo.h"
25 #include "SIMachineFunctionInfo.h"
26 #include "SIRegisterInfo.h"
27 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
28 #include "Utils/AMDGPUBaseInfo.h"
29 #include "llvm/ADT/APFloat.h"
30 #include "llvm/ADT/APInt.h"
31 #include "llvm/ADT/ArrayRef.h"
32 #include "llvm/ADT/BitVector.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/CodeGen/Analysis.h"
39 #include "llvm/CodeGen/CallingConvLower.h"
40 #include "llvm/CodeGen/DAGCombine.h"
41 #include "llvm/CodeGen/ISDOpcodes.h"
42 #include "llvm/CodeGen/MachineBasicBlock.h"
43 #include "llvm/CodeGen/MachineFrameInfo.h"
44 #include "llvm/CodeGen/MachineFunction.h"
45 #include "llvm/CodeGen/MachineInstr.h"
46 #include "llvm/CodeGen/MachineInstrBuilder.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineModuleInfo.h"
49 #include "llvm/CodeGen/MachineOperand.h"
50 #include "llvm/CodeGen/MachineRegisterInfo.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetCallingConv.h"
54 #include "llvm/CodeGen/TargetRegisterInfo.h"
55 #include "llvm/CodeGen/ValueTypes.h"
56 #include "llvm/IR/Constants.h"
57 #include "llvm/IR/DataLayout.h"
58 #include "llvm/IR/DebugLoc.h"
59 #include "llvm/IR/DerivedTypes.h"
60 #include "llvm/IR/DiagnosticInfo.h"
61 #include "llvm/IR/Function.h"
62 #include "llvm/IR/GlobalValue.h"
63 #include "llvm/IR/InstrTypes.h"
64 #include "llvm/IR/Instruction.h"
65 #include "llvm/IR/Instructions.h"
66 #include "llvm/IR/IntrinsicInst.h"
67 #include "llvm/IR/Type.h"
68 #include "llvm/Support/Casting.h"
69 #include "llvm/Support/CodeGen.h"
70 #include "llvm/Support/CommandLine.h"
71 #include "llvm/Support/Compiler.h"
72 #include "llvm/Support/ErrorHandling.h"
73 #include "llvm/Support/KnownBits.h"
74 #include "llvm/Support/MachineValueType.h"
75 #include "llvm/Support/MathExtras.h"
76 #include "llvm/Target/TargetOptions.h"
77 #include <cassert>
78 #include <cmath>
79 #include <cstdint>
80 #include <iterator>
81 #include <tuple>
82 #include <utility>
83 #include <vector>
84 
85 using namespace llvm;
86 
87 #define DEBUG_TYPE "si-lower"
88 
89 STATISTIC(NumTailCalls, "Number of tail calls");
90 
91 static cl::opt<bool> EnableVGPRIndexMode(
92   "amdgpu-vgpr-index-mode",
93   cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
94   cl::init(false));
95 
96 static cl::opt<unsigned> AssumeFrameIndexHighZeroBits(
97   "amdgpu-frame-index-zero-bits",
98   cl::desc("High bits of frame index assumed to be zero"),
99   cl::init(5),
100   cl::ReallyHidden);
101 
102 static unsigned findFirstFreeSGPR(CCState &CCInfo) {
103   unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
104   for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
105     if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
106       return AMDGPU::SGPR0 + Reg;
107     }
108   }
109   llvm_unreachable("Cannot allocate sgpr");
110 }
111 
112 SITargetLowering::SITargetLowering(const TargetMachine &TM,
113                                    const GCNSubtarget &STI)
114     : AMDGPUTargetLowering(TM, STI),
115       Subtarget(&STI) {
116   addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
117   addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
118 
119   addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
120   addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
121 
122   addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
123   addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
124   addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
125 
126   addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
127   addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
128 
129   addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
130   addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
131 
132   addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
133   addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
134 
135   addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
136   addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
137 
138   if (Subtarget->has16BitInsts()) {
139     addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
140     addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
141 
142     // Unless there are also VOP3P operations, not operations are really legal.
143     addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
144     addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
145     addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
146     addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
147   }
148 
149   computeRegisterProperties(Subtarget->getRegisterInfo());
150 
151   // We need to custom lower vector stores from local memory
152   setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
153   setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
154   setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
155   setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
156   setOperationAction(ISD::LOAD, MVT::i1, Custom);
157   setOperationAction(ISD::LOAD, MVT::v32i32, Custom);
158 
159   setOperationAction(ISD::STORE, MVT::v2i32, Custom);
160   setOperationAction(ISD::STORE, MVT::v4i32, Custom);
161   setOperationAction(ISD::STORE, MVT::v8i32, Custom);
162   setOperationAction(ISD::STORE, MVT::v16i32, Custom);
163   setOperationAction(ISD::STORE, MVT::i1, Custom);
164   setOperationAction(ISD::STORE, MVT::v32i32, Custom);
165 
166   setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
167   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
168   setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
169   setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
170   setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
171   setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
172   setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
173   setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
174   setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
175   setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
176 
177   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
178   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
179 
180   setOperationAction(ISD::SELECT, MVT::i1, Promote);
181   setOperationAction(ISD::SELECT, MVT::i64, Custom);
182   setOperationAction(ISD::SELECT, MVT::f64, Promote);
183   AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
184 
185   setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
186   setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
187   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
188   setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
189   setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
190 
191   setOperationAction(ISD::SETCC, MVT::i1, Promote);
192   setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
193   setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
194   AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
195 
196   setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
197   setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
198 
199   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
200   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
201   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
202   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
203   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
204   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
205   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
206 
207   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
208   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
209   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
210   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
211   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom);
212   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
213   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
214 
215   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
216   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
217   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom);
218   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
219 
220   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
221   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
222   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
223   setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
224 
225   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
226   setOperationAction(ISD::BR_CC, MVT::i1, Expand);
227   setOperationAction(ISD::BR_CC, MVT::i32, Expand);
228   setOperationAction(ISD::BR_CC, MVT::i64, Expand);
229   setOperationAction(ISD::BR_CC, MVT::f32, Expand);
230   setOperationAction(ISD::BR_CC, MVT::f64, Expand);
231 
232   setOperationAction(ISD::UADDO, MVT::i32, Legal);
233   setOperationAction(ISD::USUBO, MVT::i32, Legal);
234 
235   setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
236   setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
237 
238   setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
239   setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
240   setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
241 
242 #if 0
243   setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
244   setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
245 #endif
246 
247   // We only support LOAD/STORE and vector manipulation ops for vectors
248   // with > 4 elements.
249   for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
250         MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16, MVT::v32i32 }) {
251     for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
252       switch (Op) {
253       case ISD::LOAD:
254       case ISD::STORE:
255       case ISD::BUILD_VECTOR:
256       case ISD::BITCAST:
257       case ISD::EXTRACT_VECTOR_ELT:
258       case ISD::INSERT_VECTOR_ELT:
259       case ISD::INSERT_SUBVECTOR:
260       case ISD::EXTRACT_SUBVECTOR:
261       case ISD::SCALAR_TO_VECTOR:
262         break;
263       case ISD::CONCAT_VECTORS:
264         setOperationAction(Op, VT, Custom);
265         break;
266       default:
267         setOperationAction(Op, VT, Expand);
268         break;
269       }
270     }
271   }
272 
273   setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
274 
275   // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
276   // is expanded to avoid having two separate loops in case the index is a VGPR.
277 
278   // Most operations are naturally 32-bit vector operations. We only support
279   // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
280   for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
281     setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
282     AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
283 
284     setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
285     AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
286 
287     setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
288     AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
289 
290     setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
291     AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
292   }
293 
294   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
295   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
296   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
297   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
298 
299   setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom);
300   setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
301 
302   // Avoid stack access for these.
303   // TODO: Generalize to more vector types.
304   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
305   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
306   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
307   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
308 
309   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
310   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
311   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom);
312   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom);
313   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom);
314 
315   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom);
316   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
317   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom);
318 
319   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom);
320   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom);
321   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
322   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
323 
324   // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
325   // and output demarshalling
326   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
327   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
328 
329   // We can't return success/failure, only the old value,
330   // let LLVM add the comparison
331   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
332   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
333 
334   if (Subtarget->hasFlatAddressSpace()) {
335     setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
336     setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
337   }
338 
339   setOperationAction(ISD::BSWAP, MVT::i32, Legal);
340   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
341 
342   // On SI this is s_memtime and s_memrealtime on VI.
343   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
344   setOperationAction(ISD::TRAP, MVT::Other, Custom);
345   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
346 
347   if (Subtarget->has16BitInsts()) {
348     setOperationAction(ISD::FLOG, MVT::f16, Custom);
349     setOperationAction(ISD::FEXP, MVT::f16, Custom);
350     setOperationAction(ISD::FLOG10, MVT::f16, Custom);
351   }
352 
353   // v_mad_f32 does not support denormals according to some sources.
354   if (!Subtarget->hasFP32Denormals())
355     setOperationAction(ISD::FMAD, MVT::f32, Legal);
356 
357   if (!Subtarget->hasBFI()) {
358     // fcopysign can be done in a single instruction with BFI.
359     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
360     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
361   }
362 
363   if (!Subtarget->hasBCNT(32))
364     setOperationAction(ISD::CTPOP, MVT::i32, Expand);
365 
366   if (!Subtarget->hasBCNT(64))
367     setOperationAction(ISD::CTPOP, MVT::i64, Expand);
368 
369   if (Subtarget->hasFFBH())
370     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
371 
372   if (Subtarget->hasFFBL())
373     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
374 
375   // We only really have 32-bit BFE instructions (and 16-bit on VI).
376   //
377   // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
378   // effort to match them now. We want this to be false for i64 cases when the
379   // extraction isn't restricted to the upper or lower half. Ideally we would
380   // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
381   // span the midpoint are probably relatively rare, so don't worry about them
382   // for now.
383   if (Subtarget->hasBFE())
384     setHasExtractBitsInsn(true);
385 
386   setOperationAction(ISD::FMINNUM, MVT::f32, Custom);
387   setOperationAction(ISD::FMAXNUM, MVT::f32, Custom);
388   setOperationAction(ISD::FMINNUM, MVT::f64, Custom);
389   setOperationAction(ISD::FMAXNUM, MVT::f64, Custom);
390 
391 
392   // These are really only legal for ieee_mode functions. We should be avoiding
393   // them for functions that don't have ieee_mode enabled, so just say they are
394   // legal.
395   setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
396   setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
397   setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
398   setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
399 
400 
401   if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
402     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
403     setOperationAction(ISD::FCEIL, MVT::f64, Legal);
404     setOperationAction(ISD::FRINT, MVT::f64, Legal);
405   } else {
406     setOperationAction(ISD::FCEIL, MVT::f64, Custom);
407     setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
408     setOperationAction(ISD::FRINT, MVT::f64, Custom);
409     setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
410   }
411 
412   setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
413 
414   setOperationAction(ISD::FSIN, MVT::f32, Custom);
415   setOperationAction(ISD::FCOS, MVT::f32, Custom);
416   setOperationAction(ISD::FDIV, MVT::f32, Custom);
417   setOperationAction(ISD::FDIV, MVT::f64, Custom);
418 
419   if (Subtarget->has16BitInsts()) {
420     setOperationAction(ISD::Constant, MVT::i16, Legal);
421 
422     setOperationAction(ISD::SMIN, MVT::i16, Legal);
423     setOperationAction(ISD::SMAX, MVT::i16, Legal);
424 
425     setOperationAction(ISD::UMIN, MVT::i16, Legal);
426     setOperationAction(ISD::UMAX, MVT::i16, Legal);
427 
428     setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
429     AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
430 
431     setOperationAction(ISD::ROTR, MVT::i16, Promote);
432     setOperationAction(ISD::ROTL, MVT::i16, Promote);
433 
434     setOperationAction(ISD::SDIV, MVT::i16, Promote);
435     setOperationAction(ISD::UDIV, MVT::i16, Promote);
436     setOperationAction(ISD::SREM, MVT::i16, Promote);
437     setOperationAction(ISD::UREM, MVT::i16, Promote);
438 
439     setOperationAction(ISD::BSWAP, MVT::i16, Promote);
440     setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
441 
442     setOperationAction(ISD::CTTZ, MVT::i16, Promote);
443     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
444     setOperationAction(ISD::CTLZ, MVT::i16, Promote);
445     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
446     setOperationAction(ISD::CTPOP, MVT::i16, Promote);
447 
448     setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
449 
450     setOperationAction(ISD::BR_CC, MVT::i16, Expand);
451 
452     setOperationAction(ISD::LOAD, MVT::i16, Custom);
453 
454     setTruncStoreAction(MVT::i64, MVT::i16, Expand);
455 
456     setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
457     AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
458     setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
459     AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
460 
461     setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
462     setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
463     setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
464     setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
465 
466     // F16 - Constant Actions.
467     setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
468 
469     // F16 - Load/Store Actions.
470     setOperationAction(ISD::LOAD, MVT::f16, Promote);
471     AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
472     setOperationAction(ISD::STORE, MVT::f16, Promote);
473     AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
474 
475     // F16 - VOP1 Actions.
476     setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
477     setOperationAction(ISD::FCOS, MVT::f16, Promote);
478     setOperationAction(ISD::FSIN, MVT::f16, Promote);
479     setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
480     setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
481     setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
482     setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
483     setOperationAction(ISD::FROUND, MVT::f16, Custom);
484 
485     // F16 - VOP2 Actions.
486     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
487     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
488 
489     setOperationAction(ISD::FDIV, MVT::f16, Custom);
490 
491     // F16 - VOP3 Actions.
492     setOperationAction(ISD::FMA, MVT::f16, Legal);
493     if (!Subtarget->hasFP16Denormals())
494       setOperationAction(ISD::FMAD, MVT::f16, Legal);
495 
496     for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
497       for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
498         switch (Op) {
499         case ISD::LOAD:
500         case ISD::STORE:
501         case ISD::BUILD_VECTOR:
502         case ISD::BITCAST:
503         case ISD::EXTRACT_VECTOR_ELT:
504         case ISD::INSERT_VECTOR_ELT:
505         case ISD::INSERT_SUBVECTOR:
506         case ISD::EXTRACT_SUBVECTOR:
507         case ISD::SCALAR_TO_VECTOR:
508           break;
509         case ISD::CONCAT_VECTORS:
510           setOperationAction(Op, VT, Custom);
511           break;
512         default:
513           setOperationAction(Op, VT, Expand);
514           break;
515         }
516       }
517     }
518 
519     // XXX - Do these do anything? Vector constants turn into build_vector.
520     setOperationAction(ISD::Constant, MVT::v2i16, Legal);
521     setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
522 
523     setOperationAction(ISD::UNDEF, MVT::v2i16, Legal);
524     setOperationAction(ISD::UNDEF, MVT::v2f16, Legal);
525 
526     setOperationAction(ISD::STORE, MVT::v2i16, Promote);
527     AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
528     setOperationAction(ISD::STORE, MVT::v2f16, Promote);
529     AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
530 
531     setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
532     AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
533     setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
534     AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
535 
536     setOperationAction(ISD::AND, MVT::v2i16, Promote);
537     AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
538     setOperationAction(ISD::OR, MVT::v2i16, Promote);
539     AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
540     setOperationAction(ISD::XOR, MVT::v2i16, Promote);
541     AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
542 
543     setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
544     AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
545     setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
546     AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
547 
548     setOperationAction(ISD::STORE, MVT::v4i16, Promote);
549     AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
550     setOperationAction(ISD::STORE, MVT::v4f16, Promote);
551     AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
552 
553     setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
554     setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
555     setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
556     setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
557 
558     setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand);
559     setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand);
560     setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand);
561 
562     if (!Subtarget->hasVOP3PInsts()) {
563       setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
564       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
565     }
566 
567     setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
568     // This isn't really legal, but this avoids the legalizer unrolling it (and
569     // allows matching fneg (fabs x) patterns)
570     setOperationAction(ISD::FABS, MVT::v2f16, Legal);
571 
572     setOperationAction(ISD::FMAXNUM, MVT::f16, Custom);
573     setOperationAction(ISD::FMINNUM, MVT::f16, Custom);
574     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal);
575     setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal);
576 
577     setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom);
578     setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom);
579 
580     setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand);
581     setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand);
582   }
583 
584   if (Subtarget->hasVOP3PInsts()) {
585     setOperationAction(ISD::ADD, MVT::v2i16, Legal);
586     setOperationAction(ISD::SUB, MVT::v2i16, Legal);
587     setOperationAction(ISD::MUL, MVT::v2i16, Legal);
588     setOperationAction(ISD::SHL, MVT::v2i16, Legal);
589     setOperationAction(ISD::SRL, MVT::v2i16, Legal);
590     setOperationAction(ISD::SRA, MVT::v2i16, Legal);
591     setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
592     setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
593     setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
594     setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
595 
596     setOperationAction(ISD::FADD, MVT::v2f16, Legal);
597     setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
598     setOperationAction(ISD::FMA, MVT::v2f16, Legal);
599 
600     setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal);
601     setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal);
602 
603     setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal);
604 
605     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
606     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
607 
608     setOperationAction(ISD::SHL, MVT::v4i16, Custom);
609     setOperationAction(ISD::SRA, MVT::v4i16, Custom);
610     setOperationAction(ISD::SRL, MVT::v4i16, Custom);
611     setOperationAction(ISD::ADD, MVT::v4i16, Custom);
612     setOperationAction(ISD::SUB, MVT::v4i16, Custom);
613     setOperationAction(ISD::MUL, MVT::v4i16, Custom);
614 
615     setOperationAction(ISD::SMIN, MVT::v4i16, Custom);
616     setOperationAction(ISD::SMAX, MVT::v4i16, Custom);
617     setOperationAction(ISD::UMIN, MVT::v4i16, Custom);
618     setOperationAction(ISD::UMAX, MVT::v4i16, Custom);
619 
620     setOperationAction(ISD::FADD, MVT::v4f16, Custom);
621     setOperationAction(ISD::FMUL, MVT::v4f16, Custom);
622 
623     setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom);
624     setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom);
625 
626     setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom);
627     setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom);
628     setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom);
629 
630     setOperationAction(ISD::FEXP, MVT::v2f16, Custom);
631     setOperationAction(ISD::SELECT, MVT::v4i16, Custom);
632     setOperationAction(ISD::SELECT, MVT::v4f16, Custom);
633   }
634 
635   setOperationAction(ISD::FNEG, MVT::v4f16, Custom);
636   setOperationAction(ISD::FABS, MVT::v4f16, Custom);
637 
638   if (Subtarget->has16BitInsts()) {
639     setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
640     AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
641     setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
642     AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
643   } else {
644     // Legalization hack.
645     setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
646     setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
647 
648     setOperationAction(ISD::FNEG, MVT::v2f16, Custom);
649     setOperationAction(ISD::FABS, MVT::v2f16, Custom);
650   }
651 
652   for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
653     setOperationAction(ISD::SELECT, VT, Custom);
654   }
655 
656   setTargetDAGCombine(ISD::ADD);
657   setTargetDAGCombine(ISD::ADDCARRY);
658   setTargetDAGCombine(ISD::SUB);
659   setTargetDAGCombine(ISD::SUBCARRY);
660   setTargetDAGCombine(ISD::FADD);
661   setTargetDAGCombine(ISD::FSUB);
662   setTargetDAGCombine(ISD::FMINNUM);
663   setTargetDAGCombine(ISD::FMAXNUM);
664   setTargetDAGCombine(ISD::FMINNUM_IEEE);
665   setTargetDAGCombine(ISD::FMAXNUM_IEEE);
666   setTargetDAGCombine(ISD::FMA);
667   setTargetDAGCombine(ISD::SMIN);
668   setTargetDAGCombine(ISD::SMAX);
669   setTargetDAGCombine(ISD::UMIN);
670   setTargetDAGCombine(ISD::UMAX);
671   setTargetDAGCombine(ISD::SETCC);
672   setTargetDAGCombine(ISD::AND);
673   setTargetDAGCombine(ISD::OR);
674   setTargetDAGCombine(ISD::XOR);
675   setTargetDAGCombine(ISD::SINT_TO_FP);
676   setTargetDAGCombine(ISD::UINT_TO_FP);
677   setTargetDAGCombine(ISD::FCANONICALIZE);
678   setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
679   setTargetDAGCombine(ISD::ZERO_EXTEND);
680   setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
681   setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
682 
683   // All memory operations. Some folding on the pointer operand is done to help
684   // matching the constant offsets in the addressing modes.
685   setTargetDAGCombine(ISD::LOAD);
686   setTargetDAGCombine(ISD::STORE);
687   setTargetDAGCombine(ISD::ATOMIC_LOAD);
688   setTargetDAGCombine(ISD::ATOMIC_STORE);
689   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
690   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
691   setTargetDAGCombine(ISD::ATOMIC_SWAP);
692   setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
693   setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
694   setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
695   setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
696   setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
697   setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
698   setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
699   setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
700   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
701   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
702   setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD);
703 
704   setSchedulingPreference(Sched::RegPressure);
705 
706   // SI at least has hardware support for floating point exceptions, but no way
707   // of using or handling them is implemented. They are also optional in OpenCL
708   // (Section 7.3)
709   setHasFloatingPointExceptions(Subtarget->hasFPExceptions());
710 }
711 
712 const GCNSubtarget *SITargetLowering::getSubtarget() const {
713   return Subtarget;
714 }
715 
716 //===----------------------------------------------------------------------===//
717 // TargetLowering queries
718 //===----------------------------------------------------------------------===//
719 
720 // v_mad_mix* support a conversion from f16 to f32.
721 //
722 // There is only one special case when denormals are enabled we don't currently,
723 // where this is OK to use.
724 bool SITargetLowering::isFPExtFoldable(unsigned Opcode,
725                                            EVT DestVT, EVT SrcVT) const {
726   return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
727           (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
728          DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() &&
729          SrcVT.getScalarType() == MVT::f16;
730 }
731 
732 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
733   // SI has some legal vector types, but no legal vector operations. Say no
734   // shuffles are legal in order to prefer scalarizing some vector operations.
735   return false;
736 }
737 
738 MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
739                                                     CallingConv::ID CC,
740                                                     EVT VT) const {
741   // TODO: Consider splitting all arguments into 32-bit pieces.
742   if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
743     EVT ScalarVT = VT.getScalarType();
744     unsigned Size = ScalarVT.getSizeInBits();
745     if (Size == 32)
746       return ScalarVT.getSimpleVT();
747 
748     if (Size == 64)
749       return MVT::i32;
750 
751     if (Size == 16 && Subtarget->has16BitInsts())
752       return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
753   }
754 
755   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
756 }
757 
758 unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
759                                                          CallingConv::ID CC,
760                                                          EVT VT) const {
761   if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
762     unsigned NumElts = VT.getVectorNumElements();
763     EVT ScalarVT = VT.getScalarType();
764     unsigned Size = ScalarVT.getSizeInBits();
765 
766     if (Size == 32)
767       return NumElts;
768 
769     if (Size == 64)
770       return 2 * NumElts;
771 
772     if (Size == 16 && Subtarget->has16BitInsts())
773       return (VT.getVectorNumElements() + 1) / 2;
774   }
775 
776   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
777 }
778 
779 unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
780   LLVMContext &Context, CallingConv::ID CC,
781   EVT VT, EVT &IntermediateVT,
782   unsigned &NumIntermediates, MVT &RegisterVT) const {
783   if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
784     unsigned NumElts = VT.getVectorNumElements();
785     EVT ScalarVT = VT.getScalarType();
786     unsigned Size = ScalarVT.getSizeInBits();
787     if (Size == 32) {
788       RegisterVT = ScalarVT.getSimpleVT();
789       IntermediateVT = RegisterVT;
790       NumIntermediates = NumElts;
791       return NumIntermediates;
792     }
793 
794     if (Size == 64) {
795       RegisterVT = MVT::i32;
796       IntermediateVT = RegisterVT;
797       NumIntermediates = 2 * NumElts;
798       return NumIntermediates;
799     }
800 
801     // FIXME: We should fix the ABI to be the same on targets without 16-bit
802     // support, but unless we can properly handle 3-vectors, it will be still be
803     // inconsistent.
804     if (Size == 16 && Subtarget->has16BitInsts()) {
805       RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
806       IntermediateVT = RegisterVT;
807       NumIntermediates = (NumElts + 1) / 2;
808       return NumIntermediates;
809     }
810   }
811 
812   return TargetLowering::getVectorTypeBreakdownForCallingConv(
813     Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
814 }
815 
816 static MVT memVTFromAggregate(Type *Ty) {
817   // Only limited forms of aggregate type currently expected.
818   assert(Ty->isStructTy() && "Expected struct type");
819 
820 
821   Type *ElementType = nullptr;
822   unsigned NumElts;
823   if (Ty->getContainedType(0)->isVectorTy()) {
824     VectorType *VecComponent = cast<VectorType>(Ty->getContainedType(0));
825     ElementType = VecComponent->getElementType();
826     NumElts = VecComponent->getNumElements();
827   } else {
828     ElementType = Ty->getContainedType(0);
829     NumElts = 1;
830   }
831 
832   assert((Ty->getContainedType(1) && Ty->getContainedType(1)->isIntegerTy(32)) && "Expected int32 type");
833 
834   // Calculate the size of the memVT type from the aggregate
835   unsigned Pow2Elts = 0;
836   unsigned ElementSize;
837   switch (ElementType->getTypeID()) {
838     default:
839       llvm_unreachable("Unknown type!");
840     case Type::IntegerTyID:
841       ElementSize = cast<IntegerType>(ElementType)->getBitWidth();
842       break;
843     case Type::HalfTyID:
844       ElementSize = 16;
845       break;
846     case Type::FloatTyID:
847       ElementSize = 32;
848       break;
849   }
850   unsigned AdditionalElts = ElementSize == 16 ? 2 : 1;
851   Pow2Elts = 1 << Log2_32_Ceil(NumElts + AdditionalElts);
852 
853   return MVT::getVectorVT(MVT::getVT(ElementType, false),
854                           Pow2Elts);
855 }
856 
857 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
858                                           const CallInst &CI,
859                                           MachineFunction &MF,
860                                           unsigned IntrID) const {
861   if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
862           AMDGPU::lookupRsrcIntrinsic(IntrID)) {
863     AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
864                                                   (Intrinsic::ID)IntrID);
865     if (Attr.hasFnAttribute(Attribute::ReadNone))
866       return false;
867 
868     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
869 
870     if (RsrcIntr->IsImage) {
871       Info.ptrVal = MFI->getImagePSV(
872         *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
873         CI.getArgOperand(RsrcIntr->RsrcArg));
874       Info.align = 0;
875     } else {
876       Info.ptrVal = MFI->getBufferPSV(
877         *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
878         CI.getArgOperand(RsrcIntr->RsrcArg));
879     }
880 
881     Info.flags = MachineMemOperand::MODereferenceable;
882     if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
883       Info.opc = ISD::INTRINSIC_W_CHAIN;
884       Info.memVT = MVT::getVT(CI.getType(), true);
885       if (Info.memVT == MVT::Other) {
886         // Some intrinsics return an aggregate type - special case to work out
887         // the correct memVT
888         Info.memVT = memVTFromAggregate(CI.getType());
889       }
890       Info.flags |= MachineMemOperand::MOLoad;
891     } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
892       Info.opc = ISD::INTRINSIC_VOID;
893       Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
894       Info.flags |= MachineMemOperand::MOStore;
895     } else {
896       // Atomic
897       Info.opc = ISD::INTRINSIC_W_CHAIN;
898       Info.memVT = MVT::getVT(CI.getType());
899       Info.flags = MachineMemOperand::MOLoad |
900                    MachineMemOperand::MOStore |
901                    MachineMemOperand::MODereferenceable;
902 
903       // XXX - Should this be volatile without known ordering?
904       Info.flags |= MachineMemOperand::MOVolatile;
905     }
906     return true;
907   }
908 
909   switch (IntrID) {
910   case Intrinsic::amdgcn_atomic_inc:
911   case Intrinsic::amdgcn_atomic_dec:
912   case Intrinsic::amdgcn_ds_ordered_add:
913   case Intrinsic::amdgcn_ds_ordered_swap:
914   case Intrinsic::amdgcn_ds_fadd:
915   case Intrinsic::amdgcn_ds_fmin:
916   case Intrinsic::amdgcn_ds_fmax: {
917     Info.opc = ISD::INTRINSIC_W_CHAIN;
918     Info.memVT = MVT::getVT(CI.getType());
919     Info.ptrVal = CI.getOperand(0);
920     Info.align = 0;
921     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
922 
923     const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
924     if (!Vol || !Vol->isZero())
925       Info.flags |= MachineMemOperand::MOVolatile;
926 
927     return true;
928   }
929   case Intrinsic::amdgcn_ds_append:
930   case Intrinsic::amdgcn_ds_consume: {
931     Info.opc = ISD::INTRINSIC_W_CHAIN;
932     Info.memVT = MVT::getVT(CI.getType());
933     Info.ptrVal = CI.getOperand(0);
934     Info.align = 0;
935     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
936 
937     const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(1));
938     if (!Vol || !Vol->isZero())
939       Info.flags |= MachineMemOperand::MOVolatile;
940 
941     return true;
942   }
943   default:
944     return false;
945   }
946 }
947 
948 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
949                                             SmallVectorImpl<Value*> &Ops,
950                                             Type *&AccessTy) const {
951   switch (II->getIntrinsicID()) {
952   case Intrinsic::amdgcn_atomic_inc:
953   case Intrinsic::amdgcn_atomic_dec:
954   case Intrinsic::amdgcn_ds_ordered_add:
955   case Intrinsic::amdgcn_ds_ordered_swap:
956   case Intrinsic::amdgcn_ds_fadd:
957   case Intrinsic::amdgcn_ds_fmin:
958   case Intrinsic::amdgcn_ds_fmax: {
959     Value *Ptr = II->getArgOperand(0);
960     AccessTy = II->getType();
961     Ops.push_back(Ptr);
962     return true;
963   }
964   default:
965     return false;
966   }
967 }
968 
969 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
970   if (!Subtarget->hasFlatInstOffsets()) {
971     // Flat instructions do not have offsets, and only have the register
972     // address.
973     return AM.BaseOffs == 0 && AM.Scale == 0;
974   }
975 
976   // GFX9 added a 13-bit signed offset. When using regular flat instructions,
977   // the sign bit is ignored and is treated as a 12-bit unsigned offset.
978 
979   // Just r + i
980   return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
981 }
982 
983 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
984   if (Subtarget->hasFlatGlobalInsts())
985     return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
986 
987   if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
988       // Assume the we will use FLAT for all global memory accesses
989       // on VI.
990       // FIXME: This assumption is currently wrong.  On VI we still use
991       // MUBUF instructions for the r + i addressing mode.  As currently
992       // implemented, the MUBUF instructions only work on buffer < 4GB.
993       // It may be possible to support > 4GB buffers with MUBUF instructions,
994       // by setting the stride value in the resource descriptor which would
995       // increase the size limit to (stride * 4GB).  However, this is risky,
996       // because it has never been validated.
997     return isLegalFlatAddressingMode(AM);
998   }
999 
1000   return isLegalMUBUFAddressingMode(AM);
1001 }
1002 
1003 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
1004   // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
1005   // additionally can do r + r + i with addr64. 32-bit has more addressing
1006   // mode options. Depending on the resource constant, it can also do
1007   // (i64 r0) + (i32 r1) * (i14 i).
1008   //
1009   // Private arrays end up using a scratch buffer most of the time, so also
1010   // assume those use MUBUF instructions. Scratch loads / stores are currently
1011   // implemented as mubuf instructions with offen bit set, so slightly
1012   // different than the normal addr64.
1013   if (!isUInt<12>(AM.BaseOffs))
1014     return false;
1015 
1016   // FIXME: Since we can split immediate into soffset and immediate offset,
1017   // would it make sense to allow any immediate?
1018 
1019   switch (AM.Scale) {
1020   case 0: // r + i or just i, depending on HasBaseReg.
1021     return true;
1022   case 1:
1023     return true; // We have r + r or r + i.
1024   case 2:
1025     if (AM.HasBaseReg) {
1026       // Reject 2 * r + r.
1027       return false;
1028     }
1029 
1030     // Allow 2 * r as r + r
1031     // Or  2 * r + i is allowed as r + r + i.
1032     return true;
1033   default: // Don't allow n * r
1034     return false;
1035   }
1036 }
1037 
1038 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
1039                                              const AddrMode &AM, Type *Ty,
1040                                              unsigned AS, Instruction *I) const {
1041   // No global is ever allowed as a base.
1042   if (AM.BaseGV)
1043     return false;
1044 
1045   if (AS == AMDGPUAS::GLOBAL_ADDRESS)
1046     return isLegalGlobalAddressingMode(AM);
1047 
1048   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
1049       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
1050     // If the offset isn't a multiple of 4, it probably isn't going to be
1051     // correctly aligned.
1052     // FIXME: Can we get the real alignment here?
1053     if (AM.BaseOffs % 4 != 0)
1054       return isLegalMUBUFAddressingMode(AM);
1055 
1056     // There are no SMRD extloads, so if we have to do a small type access we
1057     // will use a MUBUF load.
1058     // FIXME?: We also need to do this if unaligned, but we don't know the
1059     // alignment here.
1060     if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
1061       return isLegalGlobalAddressingMode(AM);
1062 
1063     if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
1064       // SMRD instructions have an 8-bit, dword offset on SI.
1065       if (!isUInt<8>(AM.BaseOffs / 4))
1066         return false;
1067     } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
1068       // On CI+, this can also be a 32-bit literal constant offset. If it fits
1069       // in 8-bits, it can use a smaller encoding.
1070       if (!isUInt<32>(AM.BaseOffs / 4))
1071         return false;
1072     } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
1073       // On VI, these use the SMEM format and the offset is 20-bit in bytes.
1074       if (!isUInt<20>(AM.BaseOffs))
1075         return false;
1076     } else
1077       llvm_unreachable("unhandled generation");
1078 
1079     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1080       return true;
1081 
1082     if (AM.Scale == 1 && AM.HasBaseReg)
1083       return true;
1084 
1085     return false;
1086 
1087   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1088     return isLegalMUBUFAddressingMode(AM);
1089   } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
1090              AS == AMDGPUAS::REGION_ADDRESS) {
1091     // Basic, single offset DS instructions allow a 16-bit unsigned immediate
1092     // field.
1093     // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
1094     // an 8-bit dword offset but we don't know the alignment here.
1095     if (!isUInt<16>(AM.BaseOffs))
1096       return false;
1097 
1098     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1099       return true;
1100 
1101     if (AM.Scale == 1 && AM.HasBaseReg)
1102       return true;
1103 
1104     return false;
1105   } else if (AS == AMDGPUAS::FLAT_ADDRESS ||
1106              AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) {
1107     // For an unknown address space, this usually means that this is for some
1108     // reason being used for pure arithmetic, and not based on some addressing
1109     // computation. We don't have instructions that compute pointers with any
1110     // addressing modes, so treat them as having no offset like flat
1111     // instructions.
1112     return isLegalFlatAddressingMode(AM);
1113   } else {
1114     llvm_unreachable("unhandled address space");
1115   }
1116 }
1117 
1118 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1119                                         const SelectionDAG &DAG) const {
1120   if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
1121     return (MemVT.getSizeInBits() <= 4 * 32);
1122   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1123     unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1124     return (MemVT.getSizeInBits() <= MaxPrivateBits);
1125   } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
1126     return (MemVT.getSizeInBits() <= 2 * 32);
1127   }
1128   return true;
1129 }
1130 
1131 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1132                                                       unsigned AddrSpace,
1133                                                       unsigned Align,
1134                                                       bool *IsFast) const {
1135   if (IsFast)
1136     *IsFast = false;
1137 
1138   // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
1139   // which isn't a simple VT.
1140   // Until MVT is extended to handle this, simply check for the size and
1141   // rely on the condition below: allow accesses if the size is a multiple of 4.
1142   if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
1143                            VT.getStoreSize() > 16)) {
1144     return false;
1145   }
1146 
1147   if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1148       AddrSpace == AMDGPUAS::REGION_ADDRESS) {
1149     // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1150     // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1151     // with adjacent offsets.
1152     bool AlignedBy4 = (Align % 4 == 0);
1153     if (IsFast)
1154       *IsFast = AlignedBy4;
1155 
1156     return AlignedBy4;
1157   }
1158 
1159   // FIXME: We have to be conservative here and assume that flat operations
1160   // will access scratch.  If we had access to the IR function, then we
1161   // could determine if any private memory was used in the function.
1162   if (!Subtarget->hasUnalignedScratchAccess() &&
1163       (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1164        AddrSpace == AMDGPUAS::FLAT_ADDRESS)) {
1165     bool AlignedBy4 = Align >= 4;
1166     if (IsFast)
1167       *IsFast = AlignedBy4;
1168 
1169     return AlignedBy4;
1170   }
1171 
1172   if (Subtarget->hasUnalignedBufferAccess()) {
1173     // If we have an uniform constant load, it still requires using a slow
1174     // buffer instruction if unaligned.
1175     if (IsFast) {
1176       *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
1177                  AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
1178         (Align % 4 == 0) : true;
1179     }
1180 
1181     return true;
1182   }
1183 
1184   // Smaller than dword value must be aligned.
1185   if (VT.bitsLT(MVT::i32))
1186     return false;
1187 
1188   // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1189   // byte-address are ignored, thus forcing Dword alignment.
1190   // This applies to private, global, and constant memory.
1191   if (IsFast)
1192     *IsFast = true;
1193 
1194   return VT.bitsGT(MVT::i32) && Align % 4 == 0;
1195 }
1196 
1197 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
1198                                           unsigned SrcAlign, bool IsMemset,
1199                                           bool ZeroMemset,
1200                                           bool MemcpyStrSrc,
1201                                           MachineFunction &MF) const {
1202   // FIXME: Should account for address space here.
1203 
1204   // The default fallback uses the private pointer size as a guess for a type to
1205   // use. Make sure we switch these to 64-bit accesses.
1206 
1207   if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
1208     return MVT::v4i32;
1209 
1210   if (Size >= 8 && DstAlign >= 4)
1211     return MVT::v2i32;
1212 
1213   // Use the default.
1214   return MVT::Other;
1215 }
1216 
1217 static bool isFlatGlobalAddrSpace(unsigned AS) {
1218   return AS == AMDGPUAS::GLOBAL_ADDRESS ||
1219          AS == AMDGPUAS::FLAT_ADDRESS ||
1220          AS == AMDGPUAS::CONSTANT_ADDRESS ||
1221          AS > AMDGPUAS::MAX_AMDGPU_ADDRESS;
1222 }
1223 
1224 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1225                                            unsigned DestAS) const {
1226   return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
1227 }
1228 
1229 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
1230   const MemSDNode *MemNode = cast<MemSDNode>(N);
1231   const Value *Ptr = MemNode->getMemOperand()->getValue();
1232   const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
1233   return I && I->getMetadata("amdgpu.noclobber");
1234 }
1235 
1236 bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
1237                                             unsigned DestAS) const {
1238   // Flat -> private/local is a simple truncate.
1239   // Flat -> global is no-op
1240   if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
1241     return true;
1242 
1243   return isNoopAddrSpaceCast(SrcAS, DestAS);
1244 }
1245 
1246 bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1247   const MemSDNode *MemNode = cast<MemSDNode>(N);
1248 
1249   return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
1250 }
1251 
1252 TargetLoweringBase::LegalizeTypeAction
1253 SITargetLowering::getPreferredVectorAction(MVT VT) const {
1254   if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
1255     return TypeSplitVector;
1256 
1257   return TargetLoweringBase::getPreferredVectorAction(VT);
1258 }
1259 
1260 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1261                                                          Type *Ty) const {
1262   // FIXME: Could be smarter if called for vector constants.
1263   return true;
1264 }
1265 
1266 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
1267   if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1268     switch (Op) {
1269     case ISD::LOAD:
1270     case ISD::STORE:
1271 
1272     // These operations are done with 32-bit instructions anyway.
1273     case ISD::AND:
1274     case ISD::OR:
1275     case ISD::XOR:
1276     case ISD::SELECT:
1277       // TODO: Extensions?
1278       return true;
1279     default:
1280       return false;
1281     }
1282   }
1283 
1284   // SimplifySetCC uses this function to determine whether or not it should
1285   // create setcc with i1 operands.  We don't have instructions for i1 setcc.
1286   if (VT == MVT::i1 && Op == ISD::SETCC)
1287     return false;
1288 
1289   return TargetLowering::isTypeDesirableForOp(Op, VT);
1290 }
1291 
1292 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1293                                                    const SDLoc &SL,
1294                                                    SDValue Chain,
1295                                                    uint64_t Offset) const {
1296   const DataLayout &DL = DAG.getDataLayout();
1297   MachineFunction &MF = DAG.getMachineFunction();
1298   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1299 
1300   const ArgDescriptor *InputPtrReg;
1301   const TargetRegisterClass *RC;
1302 
1303   std::tie(InputPtrReg, RC)
1304     = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1305 
1306   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1307   MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
1308   SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
1309     MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1310 
1311   return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
1312 }
1313 
1314 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1315                                             const SDLoc &SL) const {
1316   uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1317                                                FIRST_IMPLICIT);
1318   return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1319 }
1320 
1321 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1322                                          const SDLoc &SL, SDValue Val,
1323                                          bool Signed,
1324                                          const ISD::InputArg *Arg) const {
1325   if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1326       VT.bitsLT(MemVT)) {
1327     unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1328     Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1329   }
1330 
1331   if (MemVT.isFloatingPoint())
1332     Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
1333   else if (Signed)
1334     Val = DAG.getSExtOrTrunc(Val, SL, VT);
1335   else
1336     Val = DAG.getZExtOrTrunc(Val, SL, VT);
1337 
1338   return Val;
1339 }
1340 
1341 SDValue SITargetLowering::lowerKernargMemParameter(
1342   SelectionDAG &DAG, EVT VT, EVT MemVT,
1343   const SDLoc &SL, SDValue Chain,
1344   uint64_t Offset, unsigned Align, bool Signed,
1345   const ISD::InputArg *Arg) const {
1346   Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
1347   PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
1348   MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1349 
1350   // Try to avoid using an extload by loading earlier than the argument address,
1351   // and extracting the relevant bits. The load should hopefully be merged with
1352   // the previous argument.
1353   if (MemVT.getStoreSize() < 4 && Align < 4) {
1354     // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
1355     int64_t AlignDownOffset = alignDown(Offset, 4);
1356     int64_t OffsetDiff = Offset - AlignDownOffset;
1357 
1358     EVT IntVT = MemVT.changeTypeToInteger();
1359 
1360     // TODO: If we passed in the base kernel offset we could have a better
1361     // alignment than 4, but we don't really need it.
1362     SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1363     SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1364                                MachineMemOperand::MODereferenceable |
1365                                MachineMemOperand::MOInvariant);
1366 
1367     SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1368     SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1369 
1370     SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1371     ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1372     ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1373 
1374 
1375     return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1376   }
1377 
1378   SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1379   SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
1380                              MachineMemOperand::MODereferenceable |
1381                              MachineMemOperand::MOInvariant);
1382 
1383   SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
1384   return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
1385 }
1386 
1387 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1388                                               const SDLoc &SL, SDValue Chain,
1389                                               const ISD::InputArg &Arg) const {
1390   MachineFunction &MF = DAG.getMachineFunction();
1391   MachineFrameInfo &MFI = MF.getFrameInfo();
1392 
1393   if (Arg.Flags.isByVal()) {
1394     unsigned Size = Arg.Flags.getByValSize();
1395     int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1396     return DAG.getFrameIndex(FrameIdx, MVT::i32);
1397   }
1398 
1399   unsigned ArgOffset = VA.getLocMemOffset();
1400   unsigned ArgSize = VA.getValVT().getStoreSize();
1401 
1402   int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1403 
1404   // Create load nodes to retrieve arguments from the stack.
1405   SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1406   SDValue ArgValue;
1407 
1408   // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1409   ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1410   MVT MemVT = VA.getValVT();
1411 
1412   switch (VA.getLocInfo()) {
1413   default:
1414     break;
1415   case CCValAssign::BCvt:
1416     MemVT = VA.getLocVT();
1417     break;
1418   case CCValAssign::SExt:
1419     ExtType = ISD::SEXTLOAD;
1420     break;
1421   case CCValAssign::ZExt:
1422     ExtType = ISD::ZEXTLOAD;
1423     break;
1424   case CCValAssign::AExt:
1425     ExtType = ISD::EXTLOAD;
1426     break;
1427   }
1428 
1429   ArgValue = DAG.getExtLoad(
1430     ExtType, SL, VA.getLocVT(), Chain, FIN,
1431     MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1432     MemVT);
1433   return ArgValue;
1434 }
1435 
1436 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1437   const SIMachineFunctionInfo &MFI,
1438   EVT VT,
1439   AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1440   const ArgDescriptor *Reg;
1441   const TargetRegisterClass *RC;
1442 
1443   std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1444   return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1445 }
1446 
1447 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1448                                    CallingConv::ID CallConv,
1449                                    ArrayRef<ISD::InputArg> Ins,
1450                                    BitVector &Skipped,
1451                                    FunctionType *FType,
1452                                    SIMachineFunctionInfo *Info) {
1453   for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1454     const ISD::InputArg *Arg = &Ins[I];
1455 
1456     assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1457            "vector type argument should have been split");
1458 
1459     // First check if it's a PS input addr.
1460     if (CallConv == CallingConv::AMDGPU_PS &&
1461         !Arg->Flags.isInReg() && !Arg->Flags.isByVal() && PSInputNum <= 15) {
1462 
1463       bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1464 
1465       // Inconveniently only the first part of the split is marked as isSplit,
1466       // so skip to the end. We only want to increment PSInputNum once for the
1467       // entire split argument.
1468       if (Arg->Flags.isSplit()) {
1469         while (!Arg->Flags.isSplitEnd()) {
1470           assert(!Arg->VT.isVector() &&
1471                  "unexpected vector split in ps argument type");
1472           if (!SkipArg)
1473             Splits.push_back(*Arg);
1474           Arg = &Ins[++I];
1475         }
1476       }
1477 
1478       if (SkipArg) {
1479         // We can safely skip PS inputs.
1480         Skipped.set(Arg->getOrigArgIndex());
1481         ++PSInputNum;
1482         continue;
1483       }
1484 
1485       Info->markPSInputAllocated(PSInputNum);
1486       if (Arg->Used)
1487         Info->markPSInputEnabled(PSInputNum);
1488 
1489       ++PSInputNum;
1490     }
1491 
1492     Splits.push_back(*Arg);
1493   }
1494 }
1495 
1496 // Allocate special inputs passed in VGPRs.
1497 static void allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1498                                            MachineFunction &MF,
1499                                            const SIRegisterInfo &TRI,
1500                                            SIMachineFunctionInfo &Info) {
1501   if (Info.hasWorkItemIDX()) {
1502     unsigned Reg = AMDGPU::VGPR0;
1503     MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1504 
1505     CCInfo.AllocateReg(Reg);
1506     Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1507   }
1508 
1509   if (Info.hasWorkItemIDY()) {
1510     unsigned Reg = AMDGPU::VGPR1;
1511     MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1512 
1513     CCInfo.AllocateReg(Reg);
1514     Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1515   }
1516 
1517   if (Info.hasWorkItemIDZ()) {
1518     unsigned Reg = AMDGPU::VGPR2;
1519     MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1520 
1521     CCInfo.AllocateReg(Reg);
1522     Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1523   }
1524 }
1525 
1526 // Try to allocate a VGPR at the end of the argument list, or if no argument
1527 // VGPRs are left allocating a stack slot.
1528 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo) {
1529   ArrayRef<MCPhysReg> ArgVGPRs
1530     = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1531   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1532   if (RegIdx == ArgVGPRs.size()) {
1533     // Spill to stack required.
1534     int64_t Offset = CCInfo.AllocateStack(4, 4);
1535 
1536     return ArgDescriptor::createStack(Offset);
1537   }
1538 
1539   unsigned Reg = ArgVGPRs[RegIdx];
1540   Reg = CCInfo.AllocateReg(Reg);
1541   assert(Reg != AMDGPU::NoRegister);
1542 
1543   MachineFunction &MF = CCInfo.getMachineFunction();
1544   MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1545   return ArgDescriptor::createRegister(Reg);
1546 }
1547 
1548 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1549                                              const TargetRegisterClass *RC,
1550                                              unsigned NumArgRegs) {
1551   ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1552   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1553   if (RegIdx == ArgSGPRs.size())
1554     report_fatal_error("ran out of SGPRs for arguments");
1555 
1556   unsigned Reg = ArgSGPRs[RegIdx];
1557   Reg = CCInfo.AllocateReg(Reg);
1558   assert(Reg != AMDGPU::NoRegister);
1559 
1560   MachineFunction &MF = CCInfo.getMachineFunction();
1561   MF.addLiveIn(Reg, RC);
1562   return ArgDescriptor::createRegister(Reg);
1563 }
1564 
1565 static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1566   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1567 }
1568 
1569 static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1570   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1571 }
1572 
1573 static void allocateSpecialInputVGPRs(CCState &CCInfo,
1574                                       MachineFunction &MF,
1575                                       const SIRegisterInfo &TRI,
1576                                       SIMachineFunctionInfo &Info) {
1577   if (Info.hasWorkItemIDX())
1578     Info.setWorkItemIDX(allocateVGPR32Input(CCInfo));
1579 
1580   if (Info.hasWorkItemIDY())
1581     Info.setWorkItemIDY(allocateVGPR32Input(CCInfo));
1582 
1583   if (Info.hasWorkItemIDZ())
1584     Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo));
1585 }
1586 
1587 static void allocateSpecialInputSGPRs(CCState &CCInfo,
1588                                       MachineFunction &MF,
1589                                       const SIRegisterInfo &TRI,
1590                                       SIMachineFunctionInfo &Info) {
1591   auto &ArgInfo = Info.getArgInfo();
1592 
1593   // TODO: Unify handling with private memory pointers.
1594 
1595   if (Info.hasDispatchPtr())
1596     ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1597 
1598   if (Info.hasQueuePtr())
1599     ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1600 
1601   if (Info.hasKernargSegmentPtr())
1602     ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1603 
1604   if (Info.hasDispatchID())
1605     ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1606 
1607   // flat_scratch_init is not applicable for non-kernel functions.
1608 
1609   if (Info.hasWorkGroupIDX())
1610     ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1611 
1612   if (Info.hasWorkGroupIDY())
1613     ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1614 
1615   if (Info.hasWorkGroupIDZ())
1616     ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
1617 
1618   if (Info.hasImplicitArgPtr())
1619     ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
1620 }
1621 
1622 // Allocate special inputs passed in user SGPRs.
1623 static void allocateHSAUserSGPRs(CCState &CCInfo,
1624                                  MachineFunction &MF,
1625                                  const SIRegisterInfo &TRI,
1626                                  SIMachineFunctionInfo &Info) {
1627   if (Info.hasImplicitBufferPtr()) {
1628     unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1629     MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1630     CCInfo.AllocateReg(ImplicitBufferPtrReg);
1631   }
1632 
1633   // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1634   if (Info.hasPrivateSegmentBuffer()) {
1635     unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1636     MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1637     CCInfo.AllocateReg(PrivateSegmentBufferReg);
1638   }
1639 
1640   if (Info.hasDispatchPtr()) {
1641     unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1642     MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1643     CCInfo.AllocateReg(DispatchPtrReg);
1644   }
1645 
1646   if (Info.hasQueuePtr()) {
1647     unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1648     MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1649     CCInfo.AllocateReg(QueuePtrReg);
1650   }
1651 
1652   if (Info.hasKernargSegmentPtr()) {
1653     unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1654     MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1655     CCInfo.AllocateReg(InputPtrReg);
1656   }
1657 
1658   if (Info.hasDispatchID()) {
1659     unsigned DispatchIDReg = Info.addDispatchID(TRI);
1660     MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1661     CCInfo.AllocateReg(DispatchIDReg);
1662   }
1663 
1664   if (Info.hasFlatScratchInit()) {
1665     unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1666     MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1667     CCInfo.AllocateReg(FlatScratchInitReg);
1668   }
1669 
1670   // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1671   // these from the dispatch pointer.
1672 }
1673 
1674 // Allocate special input registers that are initialized per-wave.
1675 static void allocateSystemSGPRs(CCState &CCInfo,
1676                                 MachineFunction &MF,
1677                                 SIMachineFunctionInfo &Info,
1678                                 CallingConv::ID CallConv,
1679                                 bool IsShader) {
1680   if (Info.hasWorkGroupIDX()) {
1681     unsigned Reg = Info.addWorkGroupIDX();
1682     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1683     CCInfo.AllocateReg(Reg);
1684   }
1685 
1686   if (Info.hasWorkGroupIDY()) {
1687     unsigned Reg = Info.addWorkGroupIDY();
1688     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1689     CCInfo.AllocateReg(Reg);
1690   }
1691 
1692   if (Info.hasWorkGroupIDZ()) {
1693     unsigned Reg = Info.addWorkGroupIDZ();
1694     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1695     CCInfo.AllocateReg(Reg);
1696   }
1697 
1698   if (Info.hasWorkGroupInfo()) {
1699     unsigned Reg = Info.addWorkGroupInfo();
1700     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1701     CCInfo.AllocateReg(Reg);
1702   }
1703 
1704   if (Info.hasPrivateSegmentWaveByteOffset()) {
1705     // Scratch wave offset passed in system SGPR.
1706     unsigned PrivateSegmentWaveByteOffsetReg;
1707 
1708     if (IsShader) {
1709       PrivateSegmentWaveByteOffsetReg =
1710         Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1711 
1712       // This is true if the scratch wave byte offset doesn't have a fixed
1713       // location.
1714       if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1715         PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1716         Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1717       }
1718     } else
1719       PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1720 
1721     MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1722     CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1723   }
1724 }
1725 
1726 static void reservePrivateMemoryRegs(const TargetMachine &TM,
1727                                      MachineFunction &MF,
1728                                      const SIRegisterInfo &TRI,
1729                                      SIMachineFunctionInfo &Info) {
1730   // Now that we've figured out where the scratch register inputs are, see if
1731   // should reserve the arguments and use them directly.
1732   MachineFrameInfo &MFI = MF.getFrameInfo();
1733   bool HasStackObjects = MFI.hasStackObjects();
1734 
1735   // Record that we know we have non-spill stack objects so we don't need to
1736   // check all stack objects later.
1737   if (HasStackObjects)
1738     Info.setHasNonSpillStackObjects(true);
1739 
1740   // Everything live out of a block is spilled with fast regalloc, so it's
1741   // almost certain that spilling will be required.
1742   if (TM.getOptLevel() == CodeGenOpt::None)
1743     HasStackObjects = true;
1744 
1745   // For now assume stack access is needed in any callee functions, so we need
1746   // the scratch registers to pass in.
1747   bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1748 
1749   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1750   if (ST.isAmdHsaOrMesa(MF.getFunction())) {
1751     if (RequiresStackAccess) {
1752       // If we have stack objects, we unquestionably need the private buffer
1753       // resource. For the Code Object V2 ABI, this will be the first 4 user
1754       // SGPR inputs. We can reserve those and use them directly.
1755 
1756       unsigned PrivateSegmentBufferReg = Info.getPreloadedReg(
1757         AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
1758       Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1759 
1760       if (MFI.hasCalls()) {
1761         // If we have calls, we need to keep the frame register in a register
1762         // that won't be clobbered by a call, so ensure it is copied somewhere.
1763 
1764         // This is not a problem for the scratch wave offset, because the same
1765         // registers are reserved in all functions.
1766 
1767         // FIXME: Nothing is really ensuring this is a call preserved register,
1768         // it's just selected from the end so it happens to be.
1769         unsigned ReservedOffsetReg
1770           = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1771         Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1772       } else {
1773         unsigned PrivateSegmentWaveByteOffsetReg = Info.getPreloadedReg(
1774           AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1775         Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
1776       }
1777     } else {
1778       unsigned ReservedBufferReg
1779         = TRI.reservedPrivateSegmentBufferReg(MF);
1780       unsigned ReservedOffsetReg
1781         = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1782 
1783       // We tentatively reserve the last registers (skipping the last two
1784       // which may contain VCC). After register allocation, we'll replace
1785       // these with the ones immediately after those which were really
1786       // allocated. In the prologue copies will be inserted from the argument
1787       // to these reserved registers.
1788       Info.setScratchRSrcReg(ReservedBufferReg);
1789       Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1790     }
1791   } else {
1792     unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1793 
1794     // Without HSA, relocations are used for the scratch pointer and the
1795     // buffer resource setup is always inserted in the prologue. Scratch wave
1796     // offset is still in an input SGPR.
1797     Info.setScratchRSrcReg(ReservedBufferReg);
1798 
1799     if (HasStackObjects && !MFI.hasCalls()) {
1800       unsigned ScratchWaveOffsetReg = Info.getPreloadedReg(
1801         AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1802       Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg);
1803     } else {
1804       unsigned ReservedOffsetReg
1805         = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1806       Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1807     }
1808   }
1809 }
1810 
1811 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1812   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1813   return !Info->isEntryFunction();
1814 }
1815 
1816 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1817 
1818 }
1819 
1820 void SITargetLowering::insertCopiesSplitCSR(
1821   MachineBasicBlock *Entry,
1822   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1823   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1824 
1825   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1826   if (!IStart)
1827     return;
1828 
1829   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1830   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1831   MachineBasicBlock::iterator MBBI = Entry->begin();
1832   for (const MCPhysReg *I = IStart; *I; ++I) {
1833     const TargetRegisterClass *RC = nullptr;
1834     if (AMDGPU::SReg_64RegClass.contains(*I))
1835       RC = &AMDGPU::SGPR_64RegClass;
1836     else if (AMDGPU::SReg_32RegClass.contains(*I))
1837       RC = &AMDGPU::SGPR_32RegClass;
1838     else
1839       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1840 
1841     unsigned NewVR = MRI->createVirtualRegister(RC);
1842     // Create copy from CSR to a virtual register.
1843     Entry->addLiveIn(*I);
1844     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1845       .addReg(*I);
1846 
1847     // Insert the copy-back instructions right before the terminator.
1848     for (auto *Exit : Exits)
1849       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1850               TII->get(TargetOpcode::COPY), *I)
1851         .addReg(NewVR);
1852   }
1853 }
1854 
1855 SDValue SITargetLowering::LowerFormalArguments(
1856     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1857     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1858     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1859   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1860 
1861   MachineFunction &MF = DAG.getMachineFunction();
1862   const Function &Fn = MF.getFunction();
1863   FunctionType *FType = MF.getFunction().getFunctionType();
1864   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1865 
1866   if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
1867     DiagnosticInfoUnsupported NoGraphicsHSA(
1868         Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
1869     DAG.getContext()->diagnose(NoGraphicsHSA);
1870     return DAG.getEntryNode();
1871   }
1872 
1873   SmallVector<ISD::InputArg, 16> Splits;
1874   SmallVector<CCValAssign, 16> ArgLocs;
1875   BitVector Skipped(Ins.size());
1876   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1877                  *DAG.getContext());
1878 
1879   bool IsShader = AMDGPU::isShader(CallConv);
1880   bool IsKernel = AMDGPU::isKernel(CallConv);
1881   bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
1882 
1883   if (!IsEntryFunc) {
1884     // 4 bytes are reserved at offset 0 for the emergency stack slot. Skip over
1885     // this when allocating argument fixed offsets.
1886     CCInfo.AllocateStack(4, 4);
1887   }
1888 
1889   if (IsShader) {
1890     processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1891 
1892     // At least one interpolation mode must be enabled or else the GPU will
1893     // hang.
1894     //
1895     // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1896     // set PSInputAddr, the user wants to enable some bits after the compilation
1897     // based on run-time states. Since we can't know what the final PSInputEna
1898     // will look like, so we shouldn't do anything here and the user should take
1899     // responsibility for the correct programming.
1900     //
1901     // Otherwise, the following restrictions apply:
1902     // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1903     // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1904     //   enabled too.
1905     if (CallConv == CallingConv::AMDGPU_PS) {
1906       if ((Info->getPSInputAddr() & 0x7F) == 0 ||
1907            ((Info->getPSInputAddr() & 0xF) == 0 &&
1908             Info->isPSInputAllocated(11))) {
1909         CCInfo.AllocateReg(AMDGPU::VGPR0);
1910         CCInfo.AllocateReg(AMDGPU::VGPR1);
1911         Info->markPSInputAllocated(0);
1912         Info->markPSInputEnabled(0);
1913       }
1914       if (Subtarget->isAmdPalOS()) {
1915         // For isAmdPalOS, the user does not enable some bits after compilation
1916         // based on run-time states; the register values being generated here are
1917         // the final ones set in hardware. Therefore we need to apply the
1918         // workaround to PSInputAddr and PSInputEnable together.  (The case where
1919         // a bit is set in PSInputAddr but not PSInputEnable is where the
1920         // frontend set up an input arg for a particular interpolation mode, but
1921         // nothing uses that input arg. Really we should have an earlier pass
1922         // that removes such an arg.)
1923         unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
1924         if ((PsInputBits & 0x7F) == 0 ||
1925             ((PsInputBits & 0xF) == 0 &&
1926              (PsInputBits >> 11 & 1)))
1927           Info->markPSInputEnabled(
1928               countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
1929       }
1930     }
1931 
1932     assert(!Info->hasDispatchPtr() &&
1933            !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1934            !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1935            !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1936            !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1937            !Info->hasWorkItemIDZ());
1938   } else if (IsKernel) {
1939     assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
1940   } else {
1941     Splits.append(Ins.begin(), Ins.end());
1942   }
1943 
1944   if (IsEntryFunc) {
1945     allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
1946     allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
1947   }
1948 
1949   if (IsKernel) {
1950     analyzeFormalArgumentsCompute(CCInfo, Ins);
1951   } else {
1952     CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
1953     CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
1954   }
1955 
1956   SmallVector<SDValue, 16> Chains;
1957 
1958   // FIXME: This is the minimum kernel argument alignment. We should improve
1959   // this to the maximum alignment of the arguments.
1960   //
1961   // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
1962   // kern arg offset.
1963   const unsigned KernelArgBaseAlign = 16;
1964 
1965    for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
1966     const ISD::InputArg &Arg = Ins[i];
1967     if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
1968       InVals.push_back(DAG.getUNDEF(Arg.VT));
1969       continue;
1970     }
1971 
1972     CCValAssign &VA = ArgLocs[ArgIdx++];
1973     MVT VT = VA.getLocVT();
1974 
1975     if (IsEntryFunc && VA.isMemLoc()) {
1976       VT = Ins[i].VT;
1977       EVT MemVT = VA.getLocVT();
1978 
1979       const uint64_t Offset = VA.getLocMemOffset();
1980       unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
1981 
1982       SDValue Arg = lowerKernargMemParameter(
1983         DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
1984       Chains.push_back(Arg.getValue(1));
1985 
1986       auto *ParamTy =
1987         dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
1988       if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
1989           ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1990                       ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) {
1991         // On SI local pointers are just offsets into LDS, so they are always
1992         // less than 16-bits.  On CI and newer they could potentially be
1993         // real pointers, so we can't guarantee their size.
1994         Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
1995                           DAG.getValueType(MVT::i16));
1996       }
1997 
1998       InVals.push_back(Arg);
1999       continue;
2000     } else if (!IsEntryFunc && VA.isMemLoc()) {
2001       SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
2002       InVals.push_back(Val);
2003       if (!Arg.Flags.isByVal())
2004         Chains.push_back(Val.getValue(1));
2005       continue;
2006     }
2007 
2008     assert(VA.isRegLoc() && "Parameter must be in a register!");
2009 
2010     unsigned Reg = VA.getLocReg();
2011     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2012     EVT ValVT = VA.getValVT();
2013 
2014     Reg = MF.addLiveIn(Reg, RC);
2015     SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
2016 
2017     if (Arg.Flags.isSRet() && !getSubtarget()->enableHugePrivateBuffer()) {
2018       // The return object should be reasonably addressable.
2019 
2020       // FIXME: This helps when the return is a real sret. If it is a
2021       // automatically inserted sret (i.e. CanLowerReturn returns false), an
2022       // extra copy is inserted in SelectionDAGBuilder which obscures this.
2023       unsigned NumBits = 32 - AssumeFrameIndexHighZeroBits;
2024       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2025         DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
2026     }
2027 
2028     // If this is an 8 or 16-bit value, it is really passed promoted
2029     // to 32 bits. Insert an assert[sz]ext to capture this, then
2030     // truncate to the right size.
2031     switch (VA.getLocInfo()) {
2032     case CCValAssign::Full:
2033       break;
2034     case CCValAssign::BCvt:
2035       Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
2036       break;
2037     case CCValAssign::SExt:
2038       Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
2039                         DAG.getValueType(ValVT));
2040       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2041       break;
2042     case CCValAssign::ZExt:
2043       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2044                         DAG.getValueType(ValVT));
2045       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2046       break;
2047     case CCValAssign::AExt:
2048       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2049       break;
2050     default:
2051       llvm_unreachable("Unknown loc info!");
2052     }
2053 
2054     InVals.push_back(Val);
2055   }
2056 
2057   if (!IsEntryFunc) {
2058     // Special inputs come after user arguments.
2059     allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
2060   }
2061 
2062   // Start adding system SGPRs.
2063   if (IsEntryFunc) {
2064     allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
2065   } else {
2066     CCInfo.AllocateReg(Info->getScratchRSrcReg());
2067     CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
2068     CCInfo.AllocateReg(Info->getFrameOffsetReg());
2069     allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
2070   }
2071 
2072   auto &ArgUsageInfo =
2073     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2074   ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
2075 
2076   unsigned StackArgSize = CCInfo.getNextStackOffset();
2077   Info->setBytesInStackArgArea(StackArgSize);
2078 
2079   return Chains.empty() ? Chain :
2080     DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
2081 }
2082 
2083 // TODO: If return values can't fit in registers, we should return as many as
2084 // possible in registers before passing on stack.
2085 bool SITargetLowering::CanLowerReturn(
2086   CallingConv::ID CallConv,
2087   MachineFunction &MF, bool IsVarArg,
2088   const SmallVectorImpl<ISD::OutputArg> &Outs,
2089   LLVMContext &Context) const {
2090   // Replacing returns with sret/stack usage doesn't make sense for shaders.
2091   // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
2092   // for shaders. Vector types should be explicitly handled by CC.
2093   if (AMDGPU::isEntryFunctionCC(CallConv))
2094     return true;
2095 
2096   SmallVector<CCValAssign, 16> RVLocs;
2097   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2098   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2099 }
2100 
2101 SDValue
2102 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2103                               bool isVarArg,
2104                               const SmallVectorImpl<ISD::OutputArg> &Outs,
2105                               const SmallVectorImpl<SDValue> &OutVals,
2106                               const SDLoc &DL, SelectionDAG &DAG) const {
2107   MachineFunction &MF = DAG.getMachineFunction();
2108   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2109 
2110   if (AMDGPU::isKernel(CallConv)) {
2111     return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2112                                              OutVals, DL, DAG);
2113   }
2114 
2115   bool IsShader = AMDGPU::isShader(CallConv);
2116 
2117   Info->setIfReturnsVoid(Outs.empty());
2118   bool IsWaveEnd = Info->returnsVoid() && IsShader;
2119 
2120   // CCValAssign - represent the assignment of the return value to a location.
2121   SmallVector<CCValAssign, 48> RVLocs;
2122   SmallVector<ISD::OutputArg, 48> Splits;
2123 
2124   // CCState - Info about the registers and stack slots.
2125   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2126                  *DAG.getContext());
2127 
2128   // Analyze outgoing return values.
2129   CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2130 
2131   SDValue Flag;
2132   SmallVector<SDValue, 48> RetOps;
2133   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2134 
2135   // Add return address for callable functions.
2136   if (!Info->isEntryFunction()) {
2137     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2138     SDValue ReturnAddrReg = CreateLiveInRegister(
2139       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2140 
2141     // FIXME: Should be able to use a vreg here, but need a way to prevent it
2142     // from being allcoated to a CSR.
2143 
2144     SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2145                                                 MVT::i64);
2146 
2147     Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
2148     Flag = Chain.getValue(1);
2149 
2150     RetOps.push_back(PhysReturnAddrReg);
2151   }
2152 
2153   // Copy the result values into the output registers.
2154   for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2155        ++I, ++RealRVLocIdx) {
2156     CCValAssign &VA = RVLocs[I];
2157     assert(VA.isRegLoc() && "Can only return in registers!");
2158     // TODO: Partially return in registers if return values don't fit.
2159     SDValue Arg = OutVals[RealRVLocIdx];
2160 
2161     // Copied from other backends.
2162     switch (VA.getLocInfo()) {
2163     case CCValAssign::Full:
2164       break;
2165     case CCValAssign::BCvt:
2166       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2167       break;
2168     case CCValAssign::SExt:
2169       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2170       break;
2171     case CCValAssign::ZExt:
2172       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2173       break;
2174     case CCValAssign::AExt:
2175       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2176       break;
2177     default:
2178       llvm_unreachable("Unknown loc info!");
2179     }
2180 
2181     Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2182     Flag = Chain.getValue(1);
2183     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2184   }
2185 
2186   // FIXME: Does sret work properly?
2187   if (!Info->isEntryFunction()) {
2188     const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2189     const MCPhysReg *I =
2190       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2191     if (I) {
2192       for (; *I; ++I) {
2193         if (AMDGPU::SReg_64RegClass.contains(*I))
2194           RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2195         else if (AMDGPU::SReg_32RegClass.contains(*I))
2196           RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2197         else
2198           llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2199       }
2200     }
2201   }
2202 
2203   // Update chain and glue.
2204   RetOps[0] = Chain;
2205   if (Flag.getNode())
2206     RetOps.push_back(Flag);
2207 
2208   unsigned Opc = AMDGPUISD::ENDPGM;
2209   if (!IsWaveEnd)
2210     Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
2211   return DAG.getNode(Opc, DL, MVT::Other, RetOps);
2212 }
2213 
2214 SDValue SITargetLowering::LowerCallResult(
2215     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2216     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2217     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2218     SDValue ThisVal) const {
2219   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2220 
2221   // Assign locations to each value returned by this call.
2222   SmallVector<CCValAssign, 16> RVLocs;
2223   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2224                  *DAG.getContext());
2225   CCInfo.AnalyzeCallResult(Ins, RetCC);
2226 
2227   // Copy all of the result registers out of their specified physreg.
2228   for (unsigned i = 0; i != RVLocs.size(); ++i) {
2229     CCValAssign VA = RVLocs[i];
2230     SDValue Val;
2231 
2232     if (VA.isRegLoc()) {
2233       Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2234       Chain = Val.getValue(1);
2235       InFlag = Val.getValue(2);
2236     } else if (VA.isMemLoc()) {
2237       report_fatal_error("TODO: return values in memory");
2238     } else
2239       llvm_unreachable("unknown argument location type");
2240 
2241     switch (VA.getLocInfo()) {
2242     case CCValAssign::Full:
2243       break;
2244     case CCValAssign::BCvt:
2245       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2246       break;
2247     case CCValAssign::ZExt:
2248       Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2249                         DAG.getValueType(VA.getValVT()));
2250       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2251       break;
2252     case CCValAssign::SExt:
2253       Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2254                         DAG.getValueType(VA.getValVT()));
2255       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2256       break;
2257     case CCValAssign::AExt:
2258       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2259       break;
2260     default:
2261       llvm_unreachable("Unknown loc info!");
2262     }
2263 
2264     InVals.push_back(Val);
2265   }
2266 
2267   return Chain;
2268 }
2269 
2270 // Add code to pass special inputs required depending on used features separate
2271 // from the explicit user arguments present in the IR.
2272 void SITargetLowering::passSpecialInputs(
2273     CallLoweringInfo &CLI,
2274     CCState &CCInfo,
2275     const SIMachineFunctionInfo &Info,
2276     SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2277     SmallVectorImpl<SDValue> &MemOpChains,
2278     SDValue Chain) const {
2279   // If we don't have a call site, this was a call inserted by
2280   // legalization. These can never use special inputs.
2281   if (!CLI.CS)
2282     return;
2283 
2284   const Function *CalleeFunc = CLI.CS.getCalledFunction();
2285   assert(CalleeFunc);
2286 
2287   SelectionDAG &DAG = CLI.DAG;
2288   const SDLoc &DL = CLI.DL;
2289 
2290   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2291 
2292   auto &ArgUsageInfo =
2293     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2294   const AMDGPUFunctionArgInfo &CalleeArgInfo
2295     = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2296 
2297   const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2298 
2299   // TODO: Unify with private memory register handling. This is complicated by
2300   // the fact that at least in kernels, the input argument is not necessarily
2301   // in the same location as the input.
2302   AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2303     AMDGPUFunctionArgInfo::DISPATCH_PTR,
2304     AMDGPUFunctionArgInfo::QUEUE_PTR,
2305     AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
2306     AMDGPUFunctionArgInfo::DISPATCH_ID,
2307     AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2308     AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2309     AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
2310     AMDGPUFunctionArgInfo::WORKITEM_ID_X,
2311     AMDGPUFunctionArgInfo::WORKITEM_ID_Y,
2312     AMDGPUFunctionArgInfo::WORKITEM_ID_Z,
2313     AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
2314   };
2315 
2316   for (auto InputID : InputRegs) {
2317     const ArgDescriptor *OutgoingArg;
2318     const TargetRegisterClass *ArgRC;
2319 
2320     std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2321     if (!OutgoingArg)
2322       continue;
2323 
2324     const ArgDescriptor *IncomingArg;
2325     const TargetRegisterClass *IncomingArgRC;
2326     std::tie(IncomingArg, IncomingArgRC)
2327       = CallerArgInfo.getPreloadedValue(InputID);
2328     assert(IncomingArgRC == ArgRC);
2329 
2330     // All special arguments are ints for now.
2331     EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
2332     SDValue InputReg;
2333 
2334     if (IncomingArg) {
2335       InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2336     } else {
2337       // The implicit arg ptr is special because it doesn't have a corresponding
2338       // input for kernels, and is computed from the kernarg segment pointer.
2339       assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2340       InputReg = getImplicitArgPtr(DAG, DL);
2341     }
2342 
2343     if (OutgoingArg->isRegister()) {
2344       RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2345     } else {
2346       unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4);
2347       SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2348                                               SpecialArgOffset);
2349       MemOpChains.push_back(ArgStore);
2350     }
2351   }
2352 }
2353 
2354 static bool canGuaranteeTCO(CallingConv::ID CC) {
2355   return CC == CallingConv::Fast;
2356 }
2357 
2358 /// Return true if we might ever do TCO for calls with this calling convention.
2359 static bool mayTailCallThisCC(CallingConv::ID CC) {
2360   switch (CC) {
2361   case CallingConv::C:
2362     return true;
2363   default:
2364     return canGuaranteeTCO(CC);
2365   }
2366 }
2367 
2368 bool SITargetLowering::isEligibleForTailCallOptimization(
2369     SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2370     const SmallVectorImpl<ISD::OutputArg> &Outs,
2371     const SmallVectorImpl<SDValue> &OutVals,
2372     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2373   if (!mayTailCallThisCC(CalleeCC))
2374     return false;
2375 
2376   MachineFunction &MF = DAG.getMachineFunction();
2377   const Function &CallerF = MF.getFunction();
2378   CallingConv::ID CallerCC = CallerF.getCallingConv();
2379   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2380   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2381 
2382   // Kernels aren't callable, and don't have a live in return address so it
2383   // doesn't make sense to do a tail call with entry functions.
2384   if (!CallerPreserved)
2385     return false;
2386 
2387   bool CCMatch = CallerCC == CalleeCC;
2388 
2389   if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2390     if (canGuaranteeTCO(CalleeCC) && CCMatch)
2391       return true;
2392     return false;
2393   }
2394 
2395   // TODO: Can we handle var args?
2396   if (IsVarArg)
2397     return false;
2398 
2399   for (const Argument &Arg : CallerF.args()) {
2400     if (Arg.hasByValAttr())
2401       return false;
2402   }
2403 
2404   LLVMContext &Ctx = *DAG.getContext();
2405 
2406   // Check that the call results are passed in the same way.
2407   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2408                                   CCAssignFnForCall(CalleeCC, IsVarArg),
2409                                   CCAssignFnForCall(CallerCC, IsVarArg)))
2410     return false;
2411 
2412   // The callee has to preserve all registers the caller needs to preserve.
2413   if (!CCMatch) {
2414     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2415     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2416       return false;
2417   }
2418 
2419   // Nothing more to check if the callee is taking no arguments.
2420   if (Outs.empty())
2421     return true;
2422 
2423   SmallVector<CCValAssign, 16> ArgLocs;
2424   CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2425 
2426   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2427 
2428   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2429   // If the stack arguments for this call do not fit into our own save area then
2430   // the call cannot be made tail.
2431   // TODO: Is this really necessary?
2432   if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2433     return false;
2434 
2435   const MachineRegisterInfo &MRI = MF.getRegInfo();
2436   return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2437 }
2438 
2439 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2440   if (!CI->isTailCall())
2441     return false;
2442 
2443   const Function *ParentFn = CI->getParent()->getParent();
2444   if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2445     return false;
2446 
2447   auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2448   return (Attr.getValueAsString() != "true");
2449 }
2450 
2451 // The wave scratch offset register is used as the global base pointer.
2452 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2453                                     SmallVectorImpl<SDValue> &InVals) const {
2454   SelectionDAG &DAG = CLI.DAG;
2455   const SDLoc &DL = CLI.DL;
2456   SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2457   SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2458   SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2459   SDValue Chain = CLI.Chain;
2460   SDValue Callee = CLI.Callee;
2461   bool &IsTailCall = CLI.IsTailCall;
2462   CallingConv::ID CallConv = CLI.CallConv;
2463   bool IsVarArg = CLI.IsVarArg;
2464   bool IsSibCall = false;
2465   bool IsThisReturn = false;
2466   MachineFunction &MF = DAG.getMachineFunction();
2467 
2468   if (IsVarArg) {
2469     return lowerUnhandledCall(CLI, InVals,
2470                               "unsupported call to variadic function ");
2471   }
2472 
2473   if (!CLI.CS.getInstruction())
2474     report_fatal_error("unsupported libcall legalization");
2475 
2476   if (!CLI.CS.getCalledFunction()) {
2477     return lowerUnhandledCall(CLI, InVals,
2478                               "unsupported indirect call to function ");
2479   }
2480 
2481   if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2482     return lowerUnhandledCall(CLI, InVals,
2483                               "unsupported required tail call to function ");
2484   }
2485 
2486   if (AMDGPU::isShader(MF.getFunction().getCallingConv())) {
2487     // Note the issue is with the CC of the calling function, not of the call
2488     // itself.
2489     return lowerUnhandledCall(CLI, InVals,
2490                           "unsupported call from graphics shader of function ");
2491   }
2492 
2493   // The first 4 bytes are reserved for the callee's emergency stack slot.
2494   if (IsTailCall) {
2495     IsTailCall = isEligibleForTailCallOptimization(
2496       Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2497     if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2498       report_fatal_error("failed to perform tail call elimination on a call "
2499                          "site marked musttail");
2500     }
2501 
2502     bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2503 
2504     // A sibling call is one where we're under the usual C ABI and not planning
2505     // to change that but can still do a tail call:
2506     if (!TailCallOpt && IsTailCall)
2507       IsSibCall = true;
2508 
2509     if (IsTailCall)
2510       ++NumTailCalls;
2511   }
2512 
2513   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2514 
2515   // Analyze operands of the call, assigning locations to each operand.
2516   SmallVector<CCValAssign, 16> ArgLocs;
2517   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2518   CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2519 
2520   // The first 4 bytes are reserved for the callee's emergency stack slot.
2521   CCInfo.AllocateStack(4, 4);
2522 
2523   CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2524 
2525   // Get a count of how many bytes are to be pushed on the stack.
2526   unsigned NumBytes = CCInfo.getNextStackOffset();
2527 
2528   if (IsSibCall) {
2529     // Since we're not changing the ABI to make this a tail call, the memory
2530     // operands are already available in the caller's incoming argument space.
2531     NumBytes = 0;
2532   }
2533 
2534   // FPDiff is the byte offset of the call's argument area from the callee's.
2535   // Stores to callee stack arguments will be placed in FixedStackSlots offset
2536   // by this amount for a tail call. In a sibling call it must be 0 because the
2537   // caller will deallocate the entire stack and the callee still expects its
2538   // arguments to begin at SP+0. Completely unused for non-tail calls.
2539   int32_t FPDiff = 0;
2540   MachineFrameInfo &MFI = MF.getFrameInfo();
2541   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2542 
2543   SDValue CallerSavedFP;
2544 
2545   // Adjust the stack pointer for the new arguments...
2546   // These operations are automatically eliminated by the prolog/epilog pass
2547   if (!IsSibCall) {
2548     Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2549 
2550     unsigned OffsetReg = Info->getScratchWaveOffsetReg();
2551 
2552     // In the HSA case, this should be an identity copy.
2553     SDValue ScratchRSrcReg
2554       = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2555     RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2556 
2557     // TODO: Don't hardcode these registers and get from the callee function.
2558     SDValue ScratchWaveOffsetReg
2559       = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32);
2560     RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg);
2561 
2562     if (!Info->isEntryFunction()) {
2563       // Avoid clobbering this function's FP value. In the current convention
2564       // callee will overwrite this, so do save/restore around the call site.
2565       CallerSavedFP = DAG.getCopyFromReg(Chain, DL,
2566                                          Info->getFrameOffsetReg(), MVT::i32);
2567     }
2568   }
2569 
2570   SmallVector<SDValue, 8> MemOpChains;
2571   MVT PtrVT = MVT::i32;
2572 
2573   // Walk the register/memloc assignments, inserting copies/loads.
2574   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2575        ++i, ++realArgIdx) {
2576     CCValAssign &VA = ArgLocs[i];
2577     SDValue Arg = OutVals[realArgIdx];
2578 
2579     // Promote the value if needed.
2580     switch (VA.getLocInfo()) {
2581     case CCValAssign::Full:
2582       break;
2583     case CCValAssign::BCvt:
2584       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2585       break;
2586     case CCValAssign::ZExt:
2587       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2588       break;
2589     case CCValAssign::SExt:
2590       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2591       break;
2592     case CCValAssign::AExt:
2593       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2594       break;
2595     case CCValAssign::FPExt:
2596       Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2597       break;
2598     default:
2599       llvm_unreachable("Unknown loc info!");
2600     }
2601 
2602     if (VA.isRegLoc()) {
2603       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2604     } else {
2605       assert(VA.isMemLoc());
2606 
2607       SDValue DstAddr;
2608       MachinePointerInfo DstInfo;
2609 
2610       unsigned LocMemOffset = VA.getLocMemOffset();
2611       int32_t Offset = LocMemOffset;
2612 
2613       SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT);
2614       unsigned Align = 0;
2615 
2616       if (IsTailCall) {
2617         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2618         unsigned OpSize = Flags.isByVal() ?
2619           Flags.getByValSize() : VA.getValVT().getStoreSize();
2620 
2621         // FIXME: We can have better than the minimum byval required alignment.
2622         Align = Flags.isByVal() ? Flags.getByValAlign() :
2623           MinAlign(Subtarget->getStackAlignment(), Offset);
2624 
2625         Offset = Offset + FPDiff;
2626         int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2627 
2628         DstAddr = DAG.getFrameIndex(FI, PtrVT);
2629         DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2630 
2631         // Make sure any stack arguments overlapping with where we're storing
2632         // are loaded before this eventual operation. Otherwise they'll be
2633         // clobbered.
2634 
2635         // FIXME: Why is this really necessary? This seems to just result in a
2636         // lot of code to copy the stack and write them back to the same
2637         // locations, which are supposed to be immutable?
2638         Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2639       } else {
2640         DstAddr = PtrOff;
2641         DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2642         Align = MinAlign(Subtarget->getStackAlignment(), LocMemOffset);
2643       }
2644 
2645       if (Outs[i].Flags.isByVal()) {
2646         SDValue SizeNode =
2647             DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2648         SDValue Cpy = DAG.getMemcpy(
2649             Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2650             /*isVol = */ false, /*AlwaysInline = */ true,
2651             /*isTailCall = */ false, DstInfo,
2652             MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy(
2653                 *DAG.getContext(), AMDGPUAS::PRIVATE_ADDRESS))));
2654 
2655         MemOpChains.push_back(Cpy);
2656       } else {
2657         SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Align);
2658         MemOpChains.push_back(Store);
2659       }
2660     }
2661   }
2662 
2663   // Copy special input registers after user input arguments.
2664   passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
2665 
2666   if (!MemOpChains.empty())
2667     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2668 
2669   // Build a sequence of copy-to-reg nodes chained together with token chain
2670   // and flag operands which copy the outgoing args into the appropriate regs.
2671   SDValue InFlag;
2672   for (auto &RegToPass : RegsToPass) {
2673     Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2674                              RegToPass.second, InFlag);
2675     InFlag = Chain.getValue(1);
2676   }
2677 
2678 
2679   SDValue PhysReturnAddrReg;
2680   if (IsTailCall) {
2681     // Since the return is being combined with the call, we need to pass on the
2682     // return address.
2683 
2684     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2685     SDValue ReturnAddrReg = CreateLiveInRegister(
2686       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2687 
2688     PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2689                                         MVT::i64);
2690     Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2691     InFlag = Chain.getValue(1);
2692   }
2693 
2694   // We don't usually want to end the call-sequence here because we would tidy
2695   // the frame up *after* the call, however in the ABI-changing tail-call case
2696   // we've carefully laid out the parameters so that when sp is reset they'll be
2697   // in the correct location.
2698   if (IsTailCall && !IsSibCall) {
2699     Chain = DAG.getCALLSEQ_END(Chain,
2700                                DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2701                                DAG.getTargetConstant(0, DL, MVT::i32),
2702                                InFlag, DL);
2703     InFlag = Chain.getValue(1);
2704   }
2705 
2706   std::vector<SDValue> Ops;
2707   Ops.push_back(Chain);
2708   Ops.push_back(Callee);
2709   // Add a redundant copy of the callee global which will not be legalized, as
2710   // we need direct access to the callee later.
2711   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee);
2712   const GlobalValue *GV = GSD->getGlobal();
2713   Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
2714 
2715   if (IsTailCall) {
2716     // Each tail call may have to adjust the stack by a different amount, so
2717     // this information must travel along with the operation for eventual
2718     // consumption by emitEpilogue.
2719     Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
2720 
2721     Ops.push_back(PhysReturnAddrReg);
2722   }
2723 
2724   // Add argument registers to the end of the list so that they are known live
2725   // into the call.
2726   for (auto &RegToPass : RegsToPass) {
2727     Ops.push_back(DAG.getRegister(RegToPass.first,
2728                                   RegToPass.second.getValueType()));
2729   }
2730 
2731   // Add a register mask operand representing the call-preserved registers.
2732 
2733   auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
2734   const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2735   assert(Mask && "Missing call preserved mask for calling convention");
2736   Ops.push_back(DAG.getRegisterMask(Mask));
2737 
2738   if (InFlag.getNode())
2739     Ops.push_back(InFlag);
2740 
2741   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2742 
2743   // If we're doing a tall call, use a TC_RETURN here rather than an
2744   // actual call instruction.
2745   if (IsTailCall) {
2746     MFI.setHasTailCall();
2747     return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
2748   }
2749 
2750   // Returns a chain and a flag for retval copy to use.
2751   SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2752   Chain = Call.getValue(0);
2753   InFlag = Call.getValue(1);
2754 
2755   if (CallerSavedFP) {
2756     SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32);
2757     Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag);
2758     InFlag = Chain.getValue(1);
2759   }
2760 
2761   uint64_t CalleePopBytes = NumBytes;
2762   Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
2763                              DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2764                              InFlag, DL);
2765   if (!Ins.empty())
2766     InFlag = Chain.getValue(1);
2767 
2768   // Handle result values, copying them out of physregs into vregs that we
2769   // return.
2770   return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2771                          InVals, IsThisReturn,
2772                          IsThisReturn ? OutVals[0] : SDValue());
2773 }
2774 
2775 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2776                                              SelectionDAG &DAG) const {
2777   unsigned Reg = StringSwitch<unsigned>(RegName)
2778     .Case("m0", AMDGPU::M0)
2779     .Case("exec", AMDGPU::EXEC)
2780     .Case("exec_lo", AMDGPU::EXEC_LO)
2781     .Case("exec_hi", AMDGPU::EXEC_HI)
2782     .Case("flat_scratch", AMDGPU::FLAT_SCR)
2783     .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2784     .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2785     .Default(AMDGPU::NoRegister);
2786 
2787   if (Reg == AMDGPU::NoRegister) {
2788     report_fatal_error(Twine("invalid register name \""
2789                              + StringRef(RegName)  + "\"."));
2790 
2791   }
2792 
2793   if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
2794       Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2795     report_fatal_error(Twine("invalid register \""
2796                              + StringRef(RegName)  + "\" for subtarget."));
2797   }
2798 
2799   switch (Reg) {
2800   case AMDGPU::M0:
2801   case AMDGPU::EXEC_LO:
2802   case AMDGPU::EXEC_HI:
2803   case AMDGPU::FLAT_SCR_LO:
2804   case AMDGPU::FLAT_SCR_HI:
2805     if (VT.getSizeInBits() == 32)
2806       return Reg;
2807     break;
2808   case AMDGPU::EXEC:
2809   case AMDGPU::FLAT_SCR:
2810     if (VT.getSizeInBits() == 64)
2811       return Reg;
2812     break;
2813   default:
2814     llvm_unreachable("missing register type checking");
2815   }
2816 
2817   report_fatal_error(Twine("invalid type for register \""
2818                            + StringRef(RegName) + "\"."));
2819 }
2820 
2821 // If kill is not the last instruction, split the block so kill is always a
2822 // proper terminator.
2823 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
2824                                                     MachineBasicBlock *BB) const {
2825   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2826 
2827   MachineBasicBlock::iterator SplitPoint(&MI);
2828   ++SplitPoint;
2829 
2830   if (SplitPoint == BB->end()) {
2831     // Don't bother with a new block.
2832     MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
2833     return BB;
2834   }
2835 
2836   MachineFunction *MF = BB->getParent();
2837   MachineBasicBlock *SplitBB
2838     = MF->CreateMachineBasicBlock(BB->getBasicBlock());
2839 
2840   MF->insert(++MachineFunction::iterator(BB), SplitBB);
2841   SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
2842 
2843   SplitBB->transferSuccessorsAndUpdatePHIs(BB);
2844   BB->addSuccessor(SplitBB);
2845 
2846   MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
2847   return SplitBB;
2848 }
2849 
2850 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
2851 // wavefront. If the value is uniform and just happens to be in a VGPR, this
2852 // will only do one iteration. In the worst case, this will loop 64 times.
2853 //
2854 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
2855 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
2856   const SIInstrInfo *TII,
2857   MachineRegisterInfo &MRI,
2858   MachineBasicBlock &OrigBB,
2859   MachineBasicBlock &LoopBB,
2860   const DebugLoc &DL,
2861   const MachineOperand &IdxReg,
2862   unsigned InitReg,
2863   unsigned ResultReg,
2864   unsigned PhiReg,
2865   unsigned InitSaveExecReg,
2866   int Offset,
2867   bool UseGPRIdxMode,
2868   bool IsIndirectSrc) {
2869   MachineBasicBlock::iterator I = LoopBB.begin();
2870 
2871   unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2872   unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2873   unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2874   unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2875 
2876   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
2877     .addReg(InitReg)
2878     .addMBB(&OrigBB)
2879     .addReg(ResultReg)
2880     .addMBB(&LoopBB);
2881 
2882   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
2883     .addReg(InitSaveExecReg)
2884     .addMBB(&OrigBB)
2885     .addReg(NewExec)
2886     .addMBB(&LoopBB);
2887 
2888   // Read the next variant <- also loop target.
2889   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
2890     .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
2891 
2892   // Compare the just read M0 value to all possible Idx values.
2893   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
2894     .addReg(CurrentIdxReg)
2895     .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
2896 
2897   // Update EXEC, save the original EXEC value to VCC.
2898   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
2899     .addReg(CondReg, RegState::Kill);
2900 
2901   MRI.setSimpleHint(NewExec, CondReg);
2902 
2903   if (UseGPRIdxMode) {
2904     unsigned IdxReg;
2905     if (Offset == 0) {
2906       IdxReg = CurrentIdxReg;
2907     } else {
2908       IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2909       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
2910         .addReg(CurrentIdxReg, RegState::Kill)
2911         .addImm(Offset);
2912     }
2913     unsigned IdxMode = IsIndirectSrc ?
2914       AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
2915     MachineInstr *SetOn =
2916       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2917       .addReg(IdxReg, RegState::Kill)
2918       .addImm(IdxMode);
2919     SetOn->getOperand(3).setIsUndef();
2920   } else {
2921     // Move index from VCC into M0
2922     if (Offset == 0) {
2923       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2924         .addReg(CurrentIdxReg, RegState::Kill);
2925     } else {
2926       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2927         .addReg(CurrentIdxReg, RegState::Kill)
2928         .addImm(Offset);
2929     }
2930   }
2931 
2932   // Update EXEC, switch all done bits to 0 and all todo bits to 1.
2933   MachineInstr *InsertPt =
2934     BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC)
2935     .addReg(AMDGPU::EXEC)
2936     .addReg(NewExec);
2937 
2938   // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
2939   // s_cbranch_scc0?
2940 
2941   // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
2942   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
2943     .addMBB(&LoopBB);
2944 
2945   return InsertPt->getIterator();
2946 }
2947 
2948 // This has slightly sub-optimal regalloc when the source vector is killed by
2949 // the read. The register allocator does not understand that the kill is
2950 // per-workitem, so is kept alive for the whole loop so we end up not re-using a
2951 // subregister from it, using 1 more VGPR than necessary. This was saved when
2952 // this was expanded after register allocation.
2953 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
2954                                                   MachineBasicBlock &MBB,
2955                                                   MachineInstr &MI,
2956                                                   unsigned InitResultReg,
2957                                                   unsigned PhiReg,
2958                                                   int Offset,
2959                                                   bool UseGPRIdxMode,
2960                                                   bool IsIndirectSrc) {
2961   MachineFunction *MF = MBB.getParent();
2962   MachineRegisterInfo &MRI = MF->getRegInfo();
2963   const DebugLoc &DL = MI.getDebugLoc();
2964   MachineBasicBlock::iterator I(&MI);
2965 
2966   unsigned DstReg = MI.getOperand(0).getReg();
2967   unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
2968   unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
2969 
2970   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
2971 
2972   // Save the EXEC mask
2973   BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
2974     .addReg(AMDGPU::EXEC);
2975 
2976   // To insert the loop we need to split the block. Move everything after this
2977   // point to a new block, and insert a new empty block between the two.
2978   MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
2979   MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
2980   MachineFunction::iterator MBBI(MBB);
2981   ++MBBI;
2982 
2983   MF->insert(MBBI, LoopBB);
2984   MF->insert(MBBI, RemainderBB);
2985 
2986   LoopBB->addSuccessor(LoopBB);
2987   LoopBB->addSuccessor(RemainderBB);
2988 
2989   // Move the rest of the block into a new block.
2990   RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
2991   RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
2992 
2993   MBB.addSuccessor(LoopBB);
2994 
2995   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2996 
2997   auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
2998                                       InitResultReg, DstReg, PhiReg, TmpExec,
2999                                       Offset, UseGPRIdxMode, IsIndirectSrc);
3000 
3001   MachineBasicBlock::iterator First = RemainderBB->begin();
3002   BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
3003     .addReg(SaveExec);
3004 
3005   return InsPt;
3006 }
3007 
3008 // Returns subreg index, offset
3009 static std::pair<unsigned, int>
3010 computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
3011                             const TargetRegisterClass *SuperRC,
3012                             unsigned VecReg,
3013                             int Offset) {
3014   int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
3015 
3016   // Skip out of bounds offsets, or else we would end up using an undefined
3017   // register.
3018   if (Offset >= NumElts || Offset < 0)
3019     return std::make_pair(AMDGPU::sub0, Offset);
3020 
3021   return std::make_pair(AMDGPU::sub0 + Offset, 0);
3022 }
3023 
3024 // Return true if the index is an SGPR and was set.
3025 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
3026                                  MachineRegisterInfo &MRI,
3027                                  MachineInstr &MI,
3028                                  int Offset,
3029                                  bool UseGPRIdxMode,
3030                                  bool IsIndirectSrc) {
3031   MachineBasicBlock *MBB = MI.getParent();
3032   const DebugLoc &DL = MI.getDebugLoc();
3033   MachineBasicBlock::iterator I(&MI);
3034 
3035   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3036   const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
3037 
3038   assert(Idx->getReg() != AMDGPU::NoRegister);
3039 
3040   if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
3041     return false;
3042 
3043   if (UseGPRIdxMode) {
3044     unsigned IdxMode = IsIndirectSrc ?
3045       AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
3046     if (Offset == 0) {
3047       MachineInstr *SetOn =
3048           BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3049               .add(*Idx)
3050               .addImm(IdxMode);
3051 
3052       SetOn->getOperand(3).setIsUndef();
3053     } else {
3054       unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3055       BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
3056           .add(*Idx)
3057           .addImm(Offset);
3058       MachineInstr *SetOn =
3059         BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3060         .addReg(Tmp, RegState::Kill)
3061         .addImm(IdxMode);
3062 
3063       SetOn->getOperand(3).setIsUndef();
3064     }
3065 
3066     return true;
3067   }
3068 
3069   if (Offset == 0) {
3070     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3071       .add(*Idx);
3072   } else {
3073     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3074       .add(*Idx)
3075       .addImm(Offset);
3076   }
3077 
3078   return true;
3079 }
3080 
3081 // Control flow needs to be inserted if indexing with a VGPR.
3082 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
3083                                           MachineBasicBlock &MBB,
3084                                           const GCNSubtarget &ST) {
3085   const SIInstrInfo *TII = ST.getInstrInfo();
3086   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3087   MachineFunction *MF = MBB.getParent();
3088   MachineRegisterInfo &MRI = MF->getRegInfo();
3089 
3090   unsigned Dst = MI.getOperand(0).getReg();
3091   unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
3092   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3093 
3094   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
3095 
3096   unsigned SubReg;
3097   std::tie(SubReg, Offset)
3098     = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
3099 
3100   bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
3101 
3102   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
3103     MachineBasicBlock::iterator I(&MI);
3104     const DebugLoc &DL = MI.getDebugLoc();
3105 
3106     if (UseGPRIdxMode) {
3107       // TODO: Look at the uses to avoid the copy. This may require rescheduling
3108       // to avoid interfering with other uses, so probably requires a new
3109       // optimization pass.
3110       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3111         .addReg(SrcReg, RegState::Undef, SubReg)
3112         .addReg(SrcReg, RegState::Implicit)
3113         .addReg(AMDGPU::M0, RegState::Implicit);
3114       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3115     } else {
3116       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3117         .addReg(SrcReg, RegState::Undef, SubReg)
3118         .addReg(SrcReg, RegState::Implicit);
3119     }
3120 
3121     MI.eraseFromParent();
3122 
3123     return &MBB;
3124   }
3125 
3126   const DebugLoc &DL = MI.getDebugLoc();
3127   MachineBasicBlock::iterator I(&MI);
3128 
3129   unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3130   unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3131 
3132   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3133 
3134   auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3135                               Offset, UseGPRIdxMode, true);
3136   MachineBasicBlock *LoopBB = InsPt->getParent();
3137 
3138   if (UseGPRIdxMode) {
3139     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3140       .addReg(SrcReg, RegState::Undef, SubReg)
3141       .addReg(SrcReg, RegState::Implicit)
3142       .addReg(AMDGPU::M0, RegState::Implicit);
3143     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3144   } else {
3145     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3146       .addReg(SrcReg, RegState::Undef, SubReg)
3147       .addReg(SrcReg, RegState::Implicit);
3148   }
3149 
3150   MI.eraseFromParent();
3151 
3152   return LoopBB;
3153 }
3154 
3155 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
3156                                  const TargetRegisterClass *VecRC) {
3157   switch (TRI.getRegSizeInBits(*VecRC)) {
3158   case 32: // 4 bytes
3159     return AMDGPU::V_MOVRELD_B32_V1;
3160   case 64: // 8 bytes
3161     return AMDGPU::V_MOVRELD_B32_V2;
3162   case 128: // 16 bytes
3163     return AMDGPU::V_MOVRELD_B32_V4;
3164   case 256: // 32 bytes
3165     return AMDGPU::V_MOVRELD_B32_V8;
3166   case 512: // 64 bytes
3167     return AMDGPU::V_MOVRELD_B32_V16;
3168   default:
3169     llvm_unreachable("unsupported size for MOVRELD pseudos");
3170   }
3171 }
3172 
3173 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3174                                           MachineBasicBlock &MBB,
3175                                           const GCNSubtarget &ST) {
3176   const SIInstrInfo *TII = ST.getInstrInfo();
3177   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3178   MachineFunction *MF = MBB.getParent();
3179   MachineRegisterInfo &MRI = MF->getRegInfo();
3180 
3181   unsigned Dst = MI.getOperand(0).getReg();
3182   const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3183   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3184   const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3185   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3186   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3187 
3188   // This can be an immediate, but will be folded later.
3189   assert(Val->getReg());
3190 
3191   unsigned SubReg;
3192   std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3193                                                          SrcVec->getReg(),
3194                                                          Offset);
3195   bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
3196 
3197   if (Idx->getReg() == AMDGPU::NoRegister) {
3198     MachineBasicBlock::iterator I(&MI);
3199     const DebugLoc &DL = MI.getDebugLoc();
3200 
3201     assert(Offset == 0);
3202 
3203     BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
3204         .add(*SrcVec)
3205         .add(*Val)
3206         .addImm(SubReg);
3207 
3208     MI.eraseFromParent();
3209     return &MBB;
3210   }
3211 
3212   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
3213     MachineBasicBlock::iterator I(&MI);
3214     const DebugLoc &DL = MI.getDebugLoc();
3215 
3216     if (UseGPRIdxMode) {
3217       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3218           .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3219           .add(*Val)
3220           .addReg(Dst, RegState::ImplicitDefine)
3221           .addReg(SrcVec->getReg(), RegState::Implicit)
3222           .addReg(AMDGPU::M0, RegState::Implicit);
3223 
3224       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3225     } else {
3226       const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3227 
3228       BuildMI(MBB, I, DL, MovRelDesc)
3229           .addReg(Dst, RegState::Define)
3230           .addReg(SrcVec->getReg())
3231           .add(*Val)
3232           .addImm(SubReg - AMDGPU::sub0);
3233     }
3234 
3235     MI.eraseFromParent();
3236     return &MBB;
3237   }
3238 
3239   if (Val->isReg())
3240     MRI.clearKillFlags(Val->getReg());
3241 
3242   const DebugLoc &DL = MI.getDebugLoc();
3243 
3244   unsigned PhiReg = MRI.createVirtualRegister(VecRC);
3245 
3246   auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
3247                               Offset, UseGPRIdxMode, false);
3248   MachineBasicBlock *LoopBB = InsPt->getParent();
3249 
3250   if (UseGPRIdxMode) {
3251     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3252         .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3253         .add(*Val)                               // src0
3254         .addReg(Dst, RegState::ImplicitDefine)
3255         .addReg(PhiReg, RegState::Implicit)
3256         .addReg(AMDGPU::M0, RegState::Implicit);
3257     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3258   } else {
3259     const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3260 
3261     BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3262         .addReg(Dst, RegState::Define)
3263         .addReg(PhiReg)
3264         .add(*Val)
3265         .addImm(SubReg - AMDGPU::sub0);
3266   }
3267 
3268   MI.eraseFromParent();
3269 
3270   return LoopBB;
3271 }
3272 
3273 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3274   MachineInstr &MI, MachineBasicBlock *BB) const {
3275 
3276   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3277   MachineFunction *MF = BB->getParent();
3278   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3279 
3280   if (TII->isMIMG(MI)) {
3281     if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3282       report_fatal_error("missing mem operand from MIMG instruction");
3283     }
3284     // Add a memoperand for mimg instructions so that they aren't assumed to
3285     // be ordered memory instuctions.
3286 
3287     return BB;
3288   }
3289 
3290   switch (MI.getOpcode()) {
3291   case AMDGPU::S_ADD_U64_PSEUDO:
3292   case AMDGPU::S_SUB_U64_PSEUDO: {
3293     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3294     const DebugLoc &DL = MI.getDebugLoc();
3295 
3296     MachineOperand &Dest = MI.getOperand(0);
3297     MachineOperand &Src0 = MI.getOperand(1);
3298     MachineOperand &Src1 = MI.getOperand(2);
3299 
3300     unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3301     unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3302 
3303     MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3304      Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3305      &AMDGPU::SReg_32_XM0RegClass);
3306     MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3307       Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3308       &AMDGPU::SReg_32_XM0RegClass);
3309 
3310     MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3311       Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3312       &AMDGPU::SReg_32_XM0RegClass);
3313     MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3314       Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3315       &AMDGPU::SReg_32_XM0RegClass);
3316 
3317     bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3318 
3319     unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3320     unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3321     BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3322       .add(Src0Sub0)
3323       .add(Src1Sub0);
3324     BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3325       .add(Src0Sub1)
3326       .add(Src1Sub1);
3327     BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3328       .addReg(DestSub0)
3329       .addImm(AMDGPU::sub0)
3330       .addReg(DestSub1)
3331       .addImm(AMDGPU::sub1);
3332     MI.eraseFromParent();
3333     return BB;
3334   }
3335   case AMDGPU::SI_INIT_M0: {
3336     BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
3337             TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3338         .add(MI.getOperand(0));
3339     MI.eraseFromParent();
3340     return BB;
3341   }
3342   case AMDGPU::SI_INIT_EXEC:
3343     // This should be before all vector instructions.
3344     BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3345             AMDGPU::EXEC)
3346         .addImm(MI.getOperand(0).getImm());
3347     MI.eraseFromParent();
3348     return BB;
3349 
3350   case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3351     // Extract the thread count from an SGPR input and set EXEC accordingly.
3352     // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3353     //
3354     // S_BFE_U32 count, input, {shift, 7}
3355     // S_BFM_B64 exec, count, 0
3356     // S_CMP_EQ_U32 count, 64
3357     // S_CMOV_B64 exec, -1
3358     MachineInstr *FirstMI = &*BB->begin();
3359     MachineRegisterInfo &MRI = MF->getRegInfo();
3360     unsigned InputReg = MI.getOperand(0).getReg();
3361     unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3362     bool Found = false;
3363 
3364     // Move the COPY of the input reg to the beginning, so that we can use it.
3365     for (auto I = BB->begin(); I != &MI; I++) {
3366       if (I->getOpcode() != TargetOpcode::COPY ||
3367           I->getOperand(0).getReg() != InputReg)
3368         continue;
3369 
3370       if (I == FirstMI) {
3371         FirstMI = &*++BB->begin();
3372       } else {
3373         I->removeFromParent();
3374         BB->insert(FirstMI, &*I);
3375       }
3376       Found = true;
3377       break;
3378     }
3379     assert(Found);
3380     (void)Found;
3381 
3382     // This should be before all vector instructions.
3383     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3384         .addReg(InputReg)
3385         .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
3386     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
3387             AMDGPU::EXEC)
3388         .addReg(CountReg)
3389         .addImm(0);
3390     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3391         .addReg(CountReg, RegState::Kill)
3392         .addImm(64);
3393     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
3394             AMDGPU::EXEC)
3395         .addImm(-1);
3396     MI.eraseFromParent();
3397     return BB;
3398   }
3399 
3400   case AMDGPU::GET_GROUPSTATICSIZE: {
3401     DebugLoc DL = MI.getDebugLoc();
3402     BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
3403         .add(MI.getOperand(0))
3404         .addImm(MFI->getLDSSize());
3405     MI.eraseFromParent();
3406     return BB;
3407   }
3408   case AMDGPU::SI_INDIRECT_SRC_V1:
3409   case AMDGPU::SI_INDIRECT_SRC_V2:
3410   case AMDGPU::SI_INDIRECT_SRC_V4:
3411   case AMDGPU::SI_INDIRECT_SRC_V8:
3412   case AMDGPU::SI_INDIRECT_SRC_V16:
3413     return emitIndirectSrc(MI, *BB, *getSubtarget());
3414   case AMDGPU::SI_INDIRECT_DST_V1:
3415   case AMDGPU::SI_INDIRECT_DST_V2:
3416   case AMDGPU::SI_INDIRECT_DST_V4:
3417   case AMDGPU::SI_INDIRECT_DST_V8:
3418   case AMDGPU::SI_INDIRECT_DST_V16:
3419     return emitIndirectDst(MI, *BB, *getSubtarget());
3420   case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3421   case AMDGPU::SI_KILL_I1_PSEUDO:
3422     return splitKillBlock(MI, BB);
3423   case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3424     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3425 
3426     unsigned Dst = MI.getOperand(0).getReg();
3427     unsigned Src0 = MI.getOperand(1).getReg();
3428     unsigned Src1 = MI.getOperand(2).getReg();
3429     const DebugLoc &DL = MI.getDebugLoc();
3430     unsigned SrcCond = MI.getOperand(3).getReg();
3431 
3432     unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3433     unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3434     unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
3435 
3436     BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3437       .addReg(SrcCond);
3438     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3439       .addReg(Src0, 0, AMDGPU::sub0)
3440       .addReg(Src1, 0, AMDGPU::sub0)
3441       .addReg(SrcCondCopy);
3442     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3443       .addReg(Src0, 0, AMDGPU::sub1)
3444       .addReg(Src1, 0, AMDGPU::sub1)
3445       .addReg(SrcCondCopy);
3446 
3447     BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3448       .addReg(DstLo)
3449       .addImm(AMDGPU::sub0)
3450       .addReg(DstHi)
3451       .addImm(AMDGPU::sub1);
3452     MI.eraseFromParent();
3453     return BB;
3454   }
3455   case AMDGPU::SI_BR_UNDEF: {
3456     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3457     const DebugLoc &DL = MI.getDebugLoc();
3458     MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3459                            .add(MI.getOperand(0));
3460     Br->getOperand(1).setIsUndef(true); // read undef SCC
3461     MI.eraseFromParent();
3462     return BB;
3463   }
3464   case AMDGPU::ADJCALLSTACKUP:
3465   case AMDGPU::ADJCALLSTACKDOWN: {
3466     const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3467     MachineInstrBuilder MIB(*MF, &MI);
3468 
3469     // Add an implicit use of the frame offset reg to prevent the restore copy
3470     // inserted after the call from being reorderd after stack operations in the
3471     // the caller's frame.
3472     MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
3473         .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3474         .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
3475     return BB;
3476   }
3477   case AMDGPU::SI_CALL_ISEL: {
3478     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3479     const DebugLoc &DL = MI.getDebugLoc();
3480 
3481     unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
3482 
3483     MachineInstrBuilder MIB;
3484     MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
3485 
3486     for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
3487       MIB.add(MI.getOperand(I));
3488 
3489     MIB.cloneMemRefs(MI);
3490     MI.eraseFromParent();
3491     return BB;
3492   }
3493   default:
3494     return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
3495   }
3496 }
3497 
3498 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3499   return isTypeLegal(VT.getScalarType());
3500 }
3501 
3502 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3503   // This currently forces unfolding various combinations of fsub into fma with
3504   // free fneg'd operands. As long as we have fast FMA (controlled by
3505   // isFMAFasterThanFMulAndFAdd), we should perform these.
3506 
3507   // When fma is quarter rate, for f64 where add / sub are at best half rate,
3508   // most of these combines appear to be cycle neutral but save on instruction
3509   // count / code size.
3510   return true;
3511 }
3512 
3513 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3514                                          EVT VT) const {
3515   if (!VT.isVector()) {
3516     return MVT::i1;
3517   }
3518   return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
3519 }
3520 
3521 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3522   // TODO: Should i16 be used always if legal? For now it would force VALU
3523   // shifts.
3524   return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
3525 }
3526 
3527 // Answering this is somewhat tricky and depends on the specific device which
3528 // have different rates for fma or all f64 operations.
3529 //
3530 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3531 // regardless of which device (although the number of cycles differs between
3532 // devices), so it is always profitable for f64.
3533 //
3534 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3535 // only on full rate devices. Normally, we should prefer selecting v_mad_f32
3536 // which we can always do even without fused FP ops since it returns the same
3537 // result as the separate operations and since it is always full
3538 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3539 // however does not support denormals, so we do report fma as faster if we have
3540 // a fast fma device and require denormals.
3541 //
3542 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3543   VT = VT.getScalarType();
3544 
3545   switch (VT.getSimpleVT().SimpleTy) {
3546   case MVT::f32: {
3547     // This is as fast on some subtargets. However, we always have full rate f32
3548     // mad available which returns the same result as the separate operations
3549     // which we should prefer over fma. We can't use this if we want to support
3550     // denormals, so only report this in these cases.
3551     if (Subtarget->hasFP32Denormals())
3552       return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3553 
3554     // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3555     return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3556   }
3557   case MVT::f64:
3558     return true;
3559   case MVT::f16:
3560     return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
3561   default:
3562     break;
3563   }
3564 
3565   return false;
3566 }
3567 
3568 //===----------------------------------------------------------------------===//
3569 // Custom DAG Lowering Operations
3570 //===----------------------------------------------------------------------===//
3571 
3572 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3573 // wider vector type is legal.
3574 SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
3575                                              SelectionDAG &DAG) const {
3576   unsigned Opc = Op.getOpcode();
3577   EVT VT = Op.getValueType();
3578   assert(VT == MVT::v4f16);
3579 
3580   SDValue Lo, Hi;
3581   std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3582 
3583   SDLoc SL(Op);
3584   SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3585                              Op->getFlags());
3586   SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3587                              Op->getFlags());
3588 
3589   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3590 }
3591 
3592 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3593 // wider vector type is legal.
3594 SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
3595                                               SelectionDAG &DAG) const {
3596   unsigned Opc = Op.getOpcode();
3597   EVT VT = Op.getValueType();
3598   assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3599 
3600   SDValue Lo0, Hi0;
3601   std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3602   SDValue Lo1, Hi1;
3603   std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3604 
3605   SDLoc SL(Op);
3606 
3607   SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3608                              Op->getFlags());
3609   SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3610                              Op->getFlags());
3611 
3612   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3613 }
3614 
3615 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3616   switch (Op.getOpcode()) {
3617   default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
3618   case ISD::BRCOND: return LowerBRCOND(Op, DAG);
3619   case ISD::LOAD: {
3620     SDValue Result = LowerLOAD(Op, DAG);
3621     assert((!Result.getNode() ||
3622             Result.getNode()->getNumValues() == 2) &&
3623            "Load should return a value and a chain");
3624     return Result;
3625   }
3626 
3627   case ISD::FSIN:
3628   case ISD::FCOS:
3629     return LowerTrig(Op, DAG);
3630   case ISD::SELECT: return LowerSELECT(Op, DAG);
3631   case ISD::FDIV: return LowerFDIV(Op, DAG);
3632   case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
3633   case ISD::STORE: return LowerSTORE(Op, DAG);
3634   case ISD::GlobalAddress: {
3635     MachineFunction &MF = DAG.getMachineFunction();
3636     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3637     return LowerGlobalAddress(MFI, Op, DAG);
3638   }
3639   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3640   case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
3641   case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
3642   case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
3643   case ISD::INSERT_VECTOR_ELT:
3644     return lowerINSERT_VECTOR_ELT(Op, DAG);
3645   case ISD::EXTRACT_VECTOR_ELT:
3646     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3647   case ISD::BUILD_VECTOR:
3648     return lowerBUILD_VECTOR(Op, DAG);
3649   case ISD::FP_ROUND:
3650     return lowerFP_ROUND(Op, DAG);
3651   case ISD::TRAP:
3652     return lowerTRAP(Op, DAG);
3653   case ISD::DEBUGTRAP:
3654     return lowerDEBUGTRAP(Op, DAG);
3655   case ISD::FABS:
3656   case ISD::FNEG:
3657   case ISD::FCANONICALIZE:
3658     return splitUnaryVectorOp(Op, DAG);
3659   case ISD::FMINNUM:
3660   case ISD::FMAXNUM:
3661     return lowerFMINNUM_FMAXNUM(Op, DAG);
3662   case ISD::SHL:
3663   case ISD::SRA:
3664   case ISD::SRL:
3665   case ISD::ADD:
3666   case ISD::SUB:
3667   case ISD::MUL:
3668   case ISD::SMIN:
3669   case ISD::SMAX:
3670   case ISD::UMIN:
3671   case ISD::UMAX:
3672   case ISD::FADD:
3673   case ISD::FMUL:
3674   case ISD::FMINNUM_IEEE:
3675   case ISD::FMAXNUM_IEEE:
3676     return splitBinaryVectorOp(Op, DAG);
3677   }
3678   return SDValue();
3679 }
3680 
3681 static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
3682                                        const SDLoc &DL,
3683                                        SelectionDAG &DAG, bool Unpacked) {
3684   if (!LoadVT.isVector())
3685     return Result;
3686 
3687   if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
3688     // Truncate to v2i16/v4i16.
3689     EVT IntLoadVT = LoadVT.changeTypeToInteger();
3690 
3691     // Workaround legalizer not scalarizing truncate after vector op
3692     // legalization byt not creating intermediate vector trunc.
3693     SmallVector<SDValue, 4> Elts;
3694     DAG.ExtractVectorElements(Result, Elts);
3695     for (SDValue &Elt : Elts)
3696       Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
3697 
3698     Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
3699 
3700     // Bitcast to original type (v2f16/v4f16).
3701     return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3702   }
3703 
3704   // Cast back to the original packed type.
3705   return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3706 }
3707 
3708 SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
3709                                               MemSDNode *M,
3710                                               SelectionDAG &DAG,
3711                                               ArrayRef<SDValue> Ops,
3712                                               bool IsIntrinsic) const {
3713   SDLoc DL(M);
3714 
3715   bool Unpacked = Subtarget->hasUnpackedD16VMem();
3716   EVT LoadVT = M->getValueType(0);
3717 
3718   EVT EquivLoadVT = LoadVT;
3719   if (Unpacked && LoadVT.isVector()) {
3720     EquivLoadVT = LoadVT.isVector() ?
3721       EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3722                        LoadVT.getVectorNumElements()) : LoadVT;
3723   }
3724 
3725   // Change from v4f16/v2f16 to EquivLoadVT.
3726   SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
3727 
3728   SDValue Load
3729     = DAG.getMemIntrinsicNode(
3730       IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
3731       VTList, Ops, M->getMemoryVT(),
3732       M->getMemOperand());
3733   if (!Unpacked) // Just adjusted the opcode.
3734     return Load;
3735 
3736   SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
3737 
3738   return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
3739 }
3740 
3741 static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI,
3742                                   SDNode *N, SelectionDAG &DAG) {
3743   EVT VT = N->getValueType(0);
3744   const auto *CD = dyn_cast<ConstantSDNode>(N->getOperand(3));
3745   if (!CD)
3746     return DAG.getUNDEF(VT);
3747 
3748   int CondCode = CD->getSExtValue();
3749   if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
3750       CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
3751     return DAG.getUNDEF(VT);
3752 
3753   ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
3754 
3755 
3756   SDValue LHS = N->getOperand(1);
3757   SDValue RHS = N->getOperand(2);
3758 
3759   SDLoc DL(N);
3760 
3761   EVT CmpVT = LHS.getValueType();
3762   if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
3763     unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
3764       ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3765     LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
3766     RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
3767   }
3768 
3769   ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
3770 
3771   return DAG.getNode(AMDGPUISD::SETCC, DL, VT, LHS, RHS,
3772                      DAG.getCondCode(CCOpcode));
3773 }
3774 
3775 static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
3776                                   SDNode *N, SelectionDAG &DAG) {
3777   EVT VT = N->getValueType(0);
3778   const auto *CD = dyn_cast<ConstantSDNode>(N->getOperand(3));
3779   if (!CD)
3780     return DAG.getUNDEF(VT);
3781 
3782   int CondCode = CD->getSExtValue();
3783   if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
3784       CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) {
3785     return DAG.getUNDEF(VT);
3786   }
3787 
3788   SDValue Src0 = N->getOperand(1);
3789   SDValue Src1 = N->getOperand(2);
3790   EVT CmpVT = Src0.getValueType();
3791   SDLoc SL(N);
3792 
3793   if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
3794     Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
3795     Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
3796   }
3797 
3798   FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
3799   ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
3800   return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src0,
3801                      Src1, DAG.getCondCode(CCOpcode));
3802 }
3803 
3804 void SITargetLowering::ReplaceNodeResults(SDNode *N,
3805                                           SmallVectorImpl<SDValue> &Results,
3806                                           SelectionDAG &DAG) const {
3807   switch (N->getOpcode()) {
3808   case ISD::INSERT_VECTOR_ELT: {
3809     if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
3810       Results.push_back(Res);
3811     return;
3812   }
3813   case ISD::EXTRACT_VECTOR_ELT: {
3814     if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
3815       Results.push_back(Res);
3816     return;
3817   }
3818   case ISD::INTRINSIC_WO_CHAIN: {
3819     unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3820     switch (IID) {
3821     case Intrinsic::amdgcn_cvt_pkrtz: {
3822       SDValue Src0 = N->getOperand(1);
3823       SDValue Src1 = N->getOperand(2);
3824       SDLoc SL(N);
3825       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
3826                                 Src0, Src1);
3827       Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
3828       return;
3829     }
3830     case Intrinsic::amdgcn_cvt_pknorm_i16:
3831     case Intrinsic::amdgcn_cvt_pknorm_u16:
3832     case Intrinsic::amdgcn_cvt_pk_i16:
3833     case Intrinsic::amdgcn_cvt_pk_u16: {
3834       SDValue Src0 = N->getOperand(1);
3835       SDValue Src1 = N->getOperand(2);
3836       SDLoc SL(N);
3837       unsigned Opcode;
3838 
3839       if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
3840         Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
3841       else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
3842         Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
3843       else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
3844         Opcode = AMDGPUISD::CVT_PK_I16_I32;
3845       else
3846         Opcode = AMDGPUISD::CVT_PK_U16_U32;
3847 
3848       EVT VT = N->getValueType(0);
3849       if (isTypeLegal(VT))
3850         Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
3851       else {
3852         SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
3853         Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
3854       }
3855       return;
3856     }
3857     }
3858     break;
3859   }
3860   case ISD::INTRINSIC_W_CHAIN: {
3861     if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
3862       Results.push_back(Res);
3863       Results.push_back(Res.getValue(1));
3864       return;
3865     }
3866 
3867     break;
3868   }
3869   case ISD::SELECT: {
3870     SDLoc SL(N);
3871     EVT VT = N->getValueType(0);
3872     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3873     SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
3874     SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
3875 
3876     EVT SelectVT = NewVT;
3877     if (NewVT.bitsLT(MVT::i32)) {
3878       LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
3879       RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
3880       SelectVT = MVT::i32;
3881     }
3882 
3883     SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
3884                                     N->getOperand(0), LHS, RHS);
3885 
3886     if (NewVT != SelectVT)
3887       NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
3888     Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
3889     return;
3890   }
3891   case ISD::FNEG: {
3892     if (N->getValueType(0) != MVT::v2f16)
3893       break;
3894 
3895     SDLoc SL(N);
3896     SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3897 
3898     SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
3899                              BC,
3900                              DAG.getConstant(0x80008000, SL, MVT::i32));
3901     Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3902     return;
3903   }
3904   case ISD::FABS: {
3905     if (N->getValueType(0) != MVT::v2f16)
3906       break;
3907 
3908     SDLoc SL(N);
3909     SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3910 
3911     SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
3912                              BC,
3913                              DAG.getConstant(0x7fff7fff, SL, MVT::i32));
3914     Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3915     return;
3916   }
3917   default:
3918     break;
3919   }
3920 }
3921 
3922 /// Helper function for LowerBRCOND
3923 static SDNode *findUser(SDValue Value, unsigned Opcode) {
3924 
3925   SDNode *Parent = Value.getNode();
3926   for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
3927        I != E; ++I) {
3928 
3929     if (I.getUse().get() != Value)
3930       continue;
3931 
3932     if (I->getOpcode() == Opcode)
3933       return *I;
3934   }
3935   return nullptr;
3936 }
3937 
3938 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
3939   if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
3940     switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
3941     case Intrinsic::amdgcn_if:
3942       return AMDGPUISD::IF;
3943     case Intrinsic::amdgcn_else:
3944       return AMDGPUISD::ELSE;
3945     case Intrinsic::amdgcn_loop:
3946       return AMDGPUISD::LOOP;
3947     case Intrinsic::amdgcn_end_cf:
3948       llvm_unreachable("should not occur");
3949     default:
3950       return 0;
3951     }
3952   }
3953 
3954   // break, if_break, else_break are all only used as inputs to loop, not
3955   // directly as branch conditions.
3956   return 0;
3957 }
3958 
3959 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
3960   const Triple &TT = getTargetMachine().getTargetTriple();
3961   return (GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
3962           GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
3963          AMDGPU::shouldEmitConstantsToTextSection(TT);
3964 }
3965 
3966 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
3967   // FIXME: Either avoid relying on address space here or change the default
3968   // address space for functions to avoid the explicit check.
3969   return (GV->getValueType()->isFunctionTy() ||
3970           GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
3971           GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
3972           GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
3973          !shouldEmitFixup(GV) &&
3974          !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3975 }
3976 
3977 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
3978   return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
3979 }
3980 
3981 /// This transforms the control flow intrinsics to get the branch destination as
3982 /// last parameter, also switches branch target with BR if the need arise
3983 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
3984                                       SelectionDAG &DAG) const {
3985   SDLoc DL(BRCOND);
3986 
3987   SDNode *Intr = BRCOND.getOperand(1).getNode();
3988   SDValue Target = BRCOND.getOperand(2);
3989   SDNode *BR = nullptr;
3990   SDNode *SetCC = nullptr;
3991 
3992   if (Intr->getOpcode() == ISD::SETCC) {
3993     // As long as we negate the condition everything is fine
3994     SetCC = Intr;
3995     Intr = SetCC->getOperand(0).getNode();
3996 
3997   } else {
3998     // Get the target from BR if we don't negate the condition
3999     BR = findUser(BRCOND, ISD::BR);
4000     Target = BR->getOperand(1);
4001   }
4002 
4003   // FIXME: This changes the types of the intrinsics instead of introducing new
4004   // nodes with the correct types.
4005   // e.g. llvm.amdgcn.loop
4006 
4007   // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
4008   // =>     t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
4009 
4010   unsigned CFNode = isCFIntrinsic(Intr);
4011   if (CFNode == 0) {
4012     // This is a uniform branch so we don't need to legalize.
4013     return BRCOND;
4014   }
4015 
4016   bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
4017                    Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
4018 
4019   assert(!SetCC ||
4020         (SetCC->getConstantOperandVal(1) == 1 &&
4021          cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
4022                                                              ISD::SETNE));
4023 
4024   // operands of the new intrinsic call
4025   SmallVector<SDValue, 4> Ops;
4026   if (HaveChain)
4027     Ops.push_back(BRCOND.getOperand(0));
4028 
4029   Ops.append(Intr->op_begin() + (HaveChain ?  2 : 1), Intr->op_end());
4030   Ops.push_back(Target);
4031 
4032   ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
4033 
4034   // build the new intrinsic call
4035   SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
4036 
4037   if (!HaveChain) {
4038     SDValue Ops[] =  {
4039       SDValue(Result, 0),
4040       BRCOND.getOperand(0)
4041     };
4042 
4043     Result = DAG.getMergeValues(Ops, DL).getNode();
4044   }
4045 
4046   if (BR) {
4047     // Give the branch instruction our target
4048     SDValue Ops[] = {
4049       BR->getOperand(0),
4050       BRCOND.getOperand(2)
4051     };
4052     SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
4053     DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
4054     BR = NewBR.getNode();
4055   }
4056 
4057   SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
4058 
4059   // Copy the intrinsic results to registers
4060   for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
4061     SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
4062     if (!CopyToReg)
4063       continue;
4064 
4065     Chain = DAG.getCopyToReg(
4066       Chain, DL,
4067       CopyToReg->getOperand(1),
4068       SDValue(Result, i - 1),
4069       SDValue());
4070 
4071     DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
4072   }
4073 
4074   // Remove the old intrinsic from the chain
4075   DAG.ReplaceAllUsesOfValueWith(
4076     SDValue(Intr, Intr->getNumValues() - 1),
4077     Intr->getOperand(0));
4078 
4079   return Chain;
4080 }
4081 
4082 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
4083                                             SDValue Op,
4084                                             const SDLoc &DL,
4085                                             EVT VT) const {
4086   return Op.getValueType().bitsLE(VT) ?
4087       DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
4088       DAG.getNode(ISD::FTRUNC, DL, VT, Op);
4089 }
4090 
4091 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
4092   assert(Op.getValueType() == MVT::f16 &&
4093          "Do not know how to custom lower FP_ROUND for non-f16 type");
4094 
4095   SDValue Src = Op.getOperand(0);
4096   EVT SrcVT = Src.getValueType();
4097   if (SrcVT != MVT::f64)
4098     return Op;
4099 
4100   SDLoc DL(Op);
4101 
4102   SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
4103   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
4104   return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
4105 }
4106 
4107 SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
4108                                                SelectionDAG &DAG) const {
4109   EVT VT = Op.getValueType();
4110   bool IsIEEEMode = Subtarget->enableIEEEBit(DAG.getMachineFunction());
4111 
4112   // FIXME: Assert during eslection that this is only selected for
4113   // ieee_mode. Currently a combine can produce the ieee version for non-ieee
4114   // mode functions, but this happens to be OK since it's only done in cases
4115   // where there is known no sNaN.
4116   if (IsIEEEMode)
4117     return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
4118 
4119   if (VT == MVT::v4f16)
4120     return splitBinaryVectorOp(Op, DAG);
4121   return Op;
4122 }
4123 
4124 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
4125   SDLoc SL(Op);
4126   SDValue Chain = Op.getOperand(0);
4127 
4128   if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4129       !Subtarget->isTrapHandlerEnabled())
4130     return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
4131 
4132   MachineFunction &MF = DAG.getMachineFunction();
4133   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4134   unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4135   assert(UserSGPR != AMDGPU::NoRegister);
4136   SDValue QueuePtr = CreateLiveInRegister(
4137     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4138   SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4139   SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4140                                    QueuePtr, SDValue());
4141   SDValue Ops[] = {
4142     ToReg,
4143     DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16),
4144     SGPR01,
4145     ToReg.getValue(1)
4146   };
4147   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4148 }
4149 
4150 SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4151   SDLoc SL(Op);
4152   SDValue Chain = Op.getOperand(0);
4153   MachineFunction &MF = DAG.getMachineFunction();
4154 
4155   if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4156       !Subtarget->isTrapHandlerEnabled()) {
4157     DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
4158                                      "debugtrap handler not supported",
4159                                      Op.getDebugLoc(),
4160                                      DS_Warning);
4161     LLVMContext &Ctx = MF.getFunction().getContext();
4162     Ctx.diagnose(NoTrap);
4163     return Chain;
4164   }
4165 
4166   SDValue Ops[] = {
4167     Chain,
4168     DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16)
4169   };
4170   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4171 }
4172 
4173 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
4174                                              SelectionDAG &DAG) const {
4175   // FIXME: Use inline constants (src_{shared, private}_base) instead.
4176   if (Subtarget->hasApertureRegs()) {
4177     unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
4178         AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
4179         AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
4180     unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
4181         AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
4182         AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
4183     unsigned Encoding =
4184         AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
4185         Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4186         WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
4187 
4188     SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4189     SDValue ApertureReg = SDValue(
4190         DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4191     SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4192     return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
4193   }
4194 
4195   MachineFunction &MF = DAG.getMachineFunction();
4196   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4197   unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4198   assert(UserSGPR != AMDGPU::NoRegister);
4199 
4200   SDValue QueuePtr = CreateLiveInRegister(
4201     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4202 
4203   // Offset into amd_queue_t for group_segment_aperture_base_hi /
4204   // private_segment_aperture_base_hi.
4205   uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
4206 
4207   SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
4208 
4209   // TODO: Use custom target PseudoSourceValue.
4210   // TODO: We should use the value from the IR intrinsic call, but it might not
4211   // be available and how do we get it?
4212   Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
4213                                               AMDGPUAS::CONSTANT_ADDRESS));
4214 
4215   MachinePointerInfo PtrInfo(V, StructOffset);
4216   return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
4217                      MinAlign(64, StructOffset),
4218                      MachineMemOperand::MODereferenceable |
4219                          MachineMemOperand::MOInvariant);
4220 }
4221 
4222 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4223                                              SelectionDAG &DAG) const {
4224   SDLoc SL(Op);
4225   const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4226 
4227   SDValue Src = ASC->getOperand(0);
4228   SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4229 
4230   const AMDGPUTargetMachine &TM =
4231     static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4232 
4233   // flat -> local/private
4234   if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
4235     unsigned DestAS = ASC->getDestAddressSpace();
4236 
4237     if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
4238         DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
4239       unsigned NullVal = TM.getNullPointerValue(DestAS);
4240       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4241       SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4242       SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4243 
4244       return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4245                          NonNull, Ptr, SegmentNullPtr);
4246     }
4247   }
4248 
4249   // local/private -> flat
4250   if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
4251     unsigned SrcAS = ASC->getSrcAddressSpace();
4252 
4253     if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
4254         SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
4255       unsigned NullVal = TM.getNullPointerValue(SrcAS);
4256       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4257 
4258       SDValue NonNull
4259         = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4260 
4261       SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
4262       SDValue CvtPtr
4263         = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4264 
4265       return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4266                          DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4267                          FlatNullPtr);
4268     }
4269   }
4270 
4271   // global <-> flat are no-ops and never emitted.
4272 
4273   const MachineFunction &MF = DAG.getMachineFunction();
4274   DiagnosticInfoUnsupported InvalidAddrSpaceCast(
4275     MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
4276   DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4277 
4278   return DAG.getUNDEF(ASC->getValueType(0));
4279 }
4280 
4281 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4282                                                  SelectionDAG &DAG) const {
4283   SDValue Vec = Op.getOperand(0);
4284   SDValue InsVal = Op.getOperand(1);
4285   SDValue Idx = Op.getOperand(2);
4286   EVT VecVT = Vec.getValueType();
4287   EVT EltVT = VecVT.getVectorElementType();
4288   unsigned VecSize = VecVT.getSizeInBits();
4289   unsigned EltSize = EltVT.getSizeInBits();
4290 
4291 
4292   assert(VecSize <= 64);
4293 
4294   unsigned NumElts = VecVT.getVectorNumElements();
4295   SDLoc SL(Op);
4296   auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4297 
4298   if (NumElts == 4 && EltSize == 16 && KIdx) {
4299     SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4300 
4301     SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4302                                  DAG.getConstant(0, SL, MVT::i32));
4303     SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4304                                  DAG.getConstant(1, SL, MVT::i32));
4305 
4306     SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4307     SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4308 
4309     unsigned Idx = KIdx->getZExtValue();
4310     bool InsertLo = Idx < 2;
4311     SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4312       InsertLo ? LoVec : HiVec,
4313       DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4314       DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4315 
4316     InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4317 
4318     SDValue Concat = InsertLo ?
4319       DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4320       DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4321 
4322     return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4323   }
4324 
4325   if (isa<ConstantSDNode>(Idx))
4326     return SDValue();
4327 
4328   MVT IntVT = MVT::getIntegerVT(VecSize);
4329 
4330   // Avoid stack access for dynamic indexing.
4331   // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
4332 
4333   // Create a congruent vector with the target value in each element so that
4334   // the required element can be masked and ORed into the target vector.
4335   SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT,
4336                                DAG.getSplatBuildVector(VecVT, SL, InsVal));
4337 
4338   assert(isPowerOf2_32(EltSize));
4339   SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4340 
4341   // Convert vector index to bit-index.
4342   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
4343 
4344   SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4345   SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
4346                             DAG.getConstant(0xffff, SL, IntVT),
4347                             ScaledIdx);
4348 
4349   SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
4350   SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
4351                             DAG.getNOT(SL, BFM, IntVT), BCVec);
4352 
4353   SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
4354   return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
4355 }
4356 
4357 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4358                                                   SelectionDAG &DAG) const {
4359   SDLoc SL(Op);
4360 
4361   EVT ResultVT = Op.getValueType();
4362   SDValue Vec = Op.getOperand(0);
4363   SDValue Idx = Op.getOperand(1);
4364   EVT VecVT = Vec.getValueType();
4365   unsigned VecSize = VecVT.getSizeInBits();
4366   EVT EltVT = VecVT.getVectorElementType();
4367   assert(VecSize <= 64);
4368 
4369   DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4370 
4371   // Make sure we do any optimizations that will make it easier to fold
4372   // source modifiers before obscuring it with bit operations.
4373 
4374   // XXX - Why doesn't this get called when vector_shuffle is expanded?
4375   if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4376     return Combined;
4377 
4378   unsigned EltSize = EltVT.getSizeInBits();
4379   assert(isPowerOf2_32(EltSize));
4380 
4381   MVT IntVT = MVT::getIntegerVT(VecSize);
4382   SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4383 
4384   // Convert vector index to bit-index (* EltSize)
4385   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
4386 
4387   SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4388   SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
4389 
4390   if (ResultVT == MVT::f16) {
4391     SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
4392     return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4393   }
4394 
4395   return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
4396 }
4397 
4398 SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
4399                                             SelectionDAG &DAG) const {
4400   SDLoc SL(Op);
4401   EVT VT = Op.getValueType();
4402 
4403   if (VT == MVT::v4i16 || VT == MVT::v4f16) {
4404     EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2);
4405 
4406     // Turn into pair of packed build_vectors.
4407     // TODO: Special case for constants that can be materialized with s_mov_b64.
4408     SDValue Lo = DAG.getBuildVector(HalfVT, SL,
4409                                     { Op.getOperand(0), Op.getOperand(1) });
4410     SDValue Hi = DAG.getBuildVector(HalfVT, SL,
4411                                     { Op.getOperand(2), Op.getOperand(3) });
4412 
4413     SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo);
4414     SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi);
4415 
4416     SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi });
4417     return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
4418   }
4419 
4420   assert(VT == MVT::v2f16 || VT == MVT::v2i16);
4421   assert(!Subtarget->hasVOP3PInsts() && "this should be legal");
4422 
4423   SDValue Lo = Op.getOperand(0);
4424   SDValue Hi = Op.getOperand(1);
4425 
4426   // Avoid adding defined bits with the zero_extend.
4427   if (Hi.isUndef()) {
4428     Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4429     SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo);
4430     return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo);
4431   }
4432 
4433   Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
4434   Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
4435 
4436   SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
4437                               DAG.getConstant(16, SL, MVT::i32));
4438   if (Lo.isUndef())
4439     return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi);
4440 
4441   Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4442   Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
4443 
4444   SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
4445   return DAG.getNode(ISD::BITCAST, SL, VT, Or);
4446 }
4447 
4448 bool
4449 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4450   // We can fold offsets for anything that doesn't require a GOT relocation.
4451   return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
4452           GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4453           GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
4454          !shouldEmitGOTReloc(GA->getGlobal());
4455 }
4456 
4457 static SDValue
4458 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
4459                         const SDLoc &DL, unsigned Offset, EVT PtrVT,
4460                         unsigned GAFlags = SIInstrInfo::MO_NONE) {
4461   // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
4462   // lowered to the following code sequence:
4463   //
4464   // For constant address space:
4465   //   s_getpc_b64 s[0:1]
4466   //   s_add_u32 s0, s0, $symbol
4467   //   s_addc_u32 s1, s1, 0
4468   //
4469   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
4470   //   a fixup or relocation is emitted to replace $symbol with a literal
4471   //   constant, which is a pc-relative offset from the encoding of the $symbol
4472   //   operand to the global variable.
4473   //
4474   // For global address space:
4475   //   s_getpc_b64 s[0:1]
4476   //   s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
4477   //   s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
4478   //
4479   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
4480   //   fixups or relocations are emitted to replace $symbol@*@lo and
4481   //   $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
4482   //   which is a 64-bit pc-relative offset from the encoding of the $symbol
4483   //   operand to the global variable.
4484   //
4485   // What we want here is an offset from the value returned by s_getpc
4486   // (which is the address of the s_add_u32 instruction) to the global
4487   // variable, but since the encoding of $symbol starts 4 bytes after the start
4488   // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
4489   // small. This requires us to add 4 to the global variable offset in order to
4490   // compute the correct address.
4491   SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4492                                              GAFlags);
4493   SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4494                                              GAFlags == SIInstrInfo::MO_NONE ?
4495                                              GAFlags : GAFlags + 1);
4496   return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
4497 }
4498 
4499 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
4500                                              SDValue Op,
4501                                              SelectionDAG &DAG) const {
4502   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
4503   const GlobalValue *GV = GSD->getGlobal();
4504   if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
4505       GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS ||
4506       GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS)
4507     return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
4508 
4509   SDLoc DL(GSD);
4510   EVT PtrVT = Op.getValueType();
4511 
4512   // FIXME: Should not make address space based decisions here.
4513   if (shouldEmitFixup(GV))
4514     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
4515   else if (shouldEmitPCReloc(GV))
4516     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
4517                                    SIInstrInfo::MO_REL32);
4518 
4519   SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
4520                                             SIInstrInfo::MO_GOTPCREL32);
4521 
4522   Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
4523   PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
4524   const DataLayout &DataLayout = DAG.getDataLayout();
4525   unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
4526   MachinePointerInfo PtrInfo
4527     = MachinePointerInfo::getGOT(DAG.getMachineFunction());
4528 
4529   return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
4530                      MachineMemOperand::MODereferenceable |
4531                          MachineMemOperand::MOInvariant);
4532 }
4533 
4534 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
4535                                    const SDLoc &DL, SDValue V) const {
4536   // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
4537   // the destination register.
4538   //
4539   // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
4540   // so we will end up with redundant moves to m0.
4541   //
4542   // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
4543 
4544   // A Null SDValue creates a glue result.
4545   SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
4546                                   V, Chain);
4547   return SDValue(M0, 0);
4548 }
4549 
4550 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
4551                                                  SDValue Op,
4552                                                  MVT VT,
4553                                                  unsigned Offset) const {
4554   SDLoc SL(Op);
4555   SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
4556                                            DAG.getEntryNode(), Offset, 4, false);
4557   // The local size values will have the hi 16-bits as zero.
4558   return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
4559                      DAG.getValueType(VT));
4560 }
4561 
4562 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4563                                         EVT VT) {
4564   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
4565                                       "non-hsa intrinsic with hsa target",
4566                                       DL.getDebugLoc());
4567   DAG.getContext()->diagnose(BadIntrin);
4568   return DAG.getUNDEF(VT);
4569 }
4570 
4571 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4572                                          EVT VT) {
4573   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
4574                                       "intrinsic not supported on subtarget",
4575                                       DL.getDebugLoc());
4576   DAG.getContext()->diagnose(BadIntrin);
4577   return DAG.getUNDEF(VT);
4578 }
4579 
4580 static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
4581                                     ArrayRef<SDValue> Elts) {
4582   assert(!Elts.empty());
4583   MVT Type;
4584   unsigned NumElts;
4585 
4586   if (Elts.size() == 1) {
4587     Type = MVT::f32;
4588     NumElts = 1;
4589   } else if (Elts.size() == 2) {
4590     Type = MVT::v2f32;
4591     NumElts = 2;
4592   } else if (Elts.size() <= 4) {
4593     Type = MVT::v4f32;
4594     NumElts = 4;
4595   } else if (Elts.size() <= 8) {
4596     Type = MVT::v8f32;
4597     NumElts = 8;
4598   } else {
4599     assert(Elts.size() <= 16);
4600     Type = MVT::v16f32;
4601     NumElts = 16;
4602   }
4603 
4604   SmallVector<SDValue, 16> VecElts(NumElts);
4605   for (unsigned i = 0; i < Elts.size(); ++i) {
4606     SDValue Elt = Elts[i];
4607     if (Elt.getValueType() != MVT::f32)
4608       Elt = DAG.getBitcast(MVT::f32, Elt);
4609     VecElts[i] = Elt;
4610   }
4611   for (unsigned i = Elts.size(); i < NumElts; ++i)
4612     VecElts[i] = DAG.getUNDEF(MVT::f32);
4613 
4614   if (NumElts == 1)
4615     return VecElts[0];
4616   return DAG.getBuildVector(Type, DL, VecElts);
4617 }
4618 
4619 static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG,
4620                              SDValue *GLC, SDValue *SLC) {
4621   auto CachePolicyConst = dyn_cast<ConstantSDNode>(CachePolicy.getNode());
4622   if (!CachePolicyConst)
4623     return false;
4624 
4625   uint64_t Value = CachePolicyConst->getZExtValue();
4626   SDLoc DL(CachePolicy);
4627   if (GLC) {
4628     *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
4629     Value &= ~(uint64_t)0x1;
4630   }
4631   if (SLC) {
4632     *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
4633     Value &= ~(uint64_t)0x2;
4634   }
4635 
4636   return Value == 0;
4637 }
4638 
4639 // Re-construct the required return value for a image load intrinsic.
4640 // This is more complicated due to the optional use TexFailCtrl which means the required
4641 // return type is an aggregate
4642 static SDValue constructRetValue(SelectionDAG &DAG,
4643                                  MachineSDNode *Result,
4644                                  ArrayRef<EVT> ResultTypes,
4645                                  bool IsTexFail, bool Unpacked, bool IsD16,
4646                                  int DMaskPop, int NumVDataDwords,
4647                                  const SDLoc &DL, LLVMContext &Context) {
4648   // Determine the required return type. This is the same regardless of IsTexFail flag
4649   EVT ReqRetVT = ResultTypes[0];
4650   EVT ReqRetEltVT = ReqRetVT.isVector() ? ReqRetVT.getVectorElementType() : ReqRetVT;
4651   int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1;
4652   EVT AdjEltVT = Unpacked && IsD16 ? MVT::i32 : ReqRetEltVT;
4653   EVT AdjVT = Unpacked ? ReqRetNumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, ReqRetNumElts)
4654                                            : AdjEltVT
4655                        : ReqRetVT;
4656 
4657   // Extract data part of the result
4658   // Bitcast the result to the same type as the required return type
4659   int NumElts;
4660   if (IsD16 && !Unpacked)
4661     NumElts = NumVDataDwords << 1;
4662   else
4663     NumElts = NumVDataDwords;
4664 
4665   EVT CastVT = NumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, NumElts)
4666                            : AdjEltVT;
4667 
4668   // Special case for v8f16. Rather than add support for this, use v4i32 to
4669   // extract the data elements
4670   bool V8F16Special = false;
4671   if (CastVT == MVT::v8f16) {
4672     CastVT = MVT::v4i32;
4673     DMaskPop >>= 1;
4674     ReqRetNumElts >>= 1;
4675     V8F16Special = true;
4676     AdjVT = MVT::v2i32;
4677   }
4678 
4679   SDValue N = SDValue(Result, 0);
4680   SDValue CastRes = DAG.getNode(ISD::BITCAST, DL, CastVT, N);
4681 
4682   // Iterate over the result
4683   SmallVector<SDValue, 4> BVElts;
4684 
4685   if (CastVT.isVector()) {
4686     DAG.ExtractVectorElements(CastRes, BVElts, 0, DMaskPop);
4687   } else {
4688     BVElts.push_back(CastRes);
4689   }
4690   int ExtraElts = ReqRetNumElts - DMaskPop;
4691   while(ExtraElts--)
4692     BVElts.push_back(DAG.getUNDEF(AdjEltVT));
4693 
4694   SDValue PreTFCRes;
4695   if (ReqRetNumElts > 1) {
4696     SDValue NewVec = DAG.getBuildVector(AdjVT, DL, BVElts);
4697     if (IsD16 && Unpacked)
4698       PreTFCRes = adjustLoadValueTypeImpl(NewVec, ReqRetVT, DL, DAG, Unpacked);
4699     else
4700       PreTFCRes = NewVec;
4701   } else {
4702     PreTFCRes = BVElts[0];
4703   }
4704 
4705   if (V8F16Special)
4706     PreTFCRes = DAG.getNode(ISD::BITCAST, DL, MVT::v4f16, PreTFCRes);
4707 
4708   if (!IsTexFail) {
4709     if (Result->getNumValues() > 1)
4710       return DAG.getMergeValues({PreTFCRes, SDValue(Result, 1)}, DL);
4711     else
4712       return PreTFCRes;
4713   }
4714 
4715   // Extract the TexFail result and insert into aggregate return
4716   SmallVector<SDValue, 1> TFCElt;
4717   DAG.ExtractVectorElements(N, TFCElt, DMaskPop, 1);
4718   SDValue TFCRes = DAG.getNode(ISD::BITCAST, DL, ResultTypes[1], TFCElt[0]);
4719   return DAG.getMergeValues({PreTFCRes, TFCRes, SDValue(Result, 1)}, DL);
4720 }
4721 
4722 static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE,
4723                          SDValue *LWE, bool &IsTexFail) {
4724   auto TexFailCtrlConst = dyn_cast<ConstantSDNode>(TexFailCtrl.getNode());
4725   if (!TexFailCtrlConst)
4726     return false;
4727 
4728   uint64_t Value = TexFailCtrlConst->getZExtValue();
4729   if (Value) {
4730     IsTexFail = true;
4731   }
4732 
4733   SDLoc DL(TexFailCtrlConst);
4734   *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
4735   Value &= ~(uint64_t)0x1;
4736   *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
4737   Value &= ~(uint64_t)0x2;
4738 
4739   return Value == 0;
4740 }
4741 
4742 SDValue SITargetLowering::lowerImage(SDValue Op,
4743                                      const AMDGPU::ImageDimIntrinsicInfo *Intr,
4744                                      SelectionDAG &DAG) const {
4745   SDLoc DL(Op);
4746   MachineFunction &MF = DAG.getMachineFunction();
4747   const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>();
4748   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
4749       AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
4750   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
4751   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
4752       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
4753   unsigned IntrOpcode = Intr->BaseOpcode;
4754 
4755   SmallVector<EVT, 3> ResultTypes(Op->value_begin(), Op->value_end());
4756   SmallVector<EVT, 3> OrigResultTypes(Op->value_begin(), Op->value_end());
4757   bool IsD16 = false;
4758   bool IsA16 = false;
4759   SDValue VData;
4760   int NumVDataDwords;
4761   bool AdjustRetType = false;
4762 
4763   unsigned AddrIdx; // Index of first address argument
4764   unsigned DMask;
4765   unsigned DMaskLanes = 0;
4766 
4767   if (BaseOpcode->Atomic) {
4768     VData = Op.getOperand(2);
4769 
4770     bool Is64Bit = VData.getValueType() == MVT::i64;
4771     if (BaseOpcode->AtomicX2) {
4772       SDValue VData2 = Op.getOperand(3);
4773       VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL,
4774                                  {VData, VData2});
4775       if (Is64Bit)
4776         VData = DAG.getBitcast(MVT::v4i32, VData);
4777 
4778       ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32;
4779       DMask = Is64Bit ? 0xf : 0x3;
4780       NumVDataDwords = Is64Bit ? 4 : 2;
4781       AddrIdx = 4;
4782     } else {
4783       DMask = Is64Bit ? 0x3 : 0x1;
4784       NumVDataDwords = Is64Bit ? 2 : 1;
4785       AddrIdx = 3;
4786     }
4787   } else {
4788     unsigned DMaskIdx = BaseOpcode->Store ? 3 : isa<MemSDNode>(Op) ? 2 : 1;
4789     auto DMaskConst = dyn_cast<ConstantSDNode>(Op.getOperand(DMaskIdx));
4790     if (!DMaskConst)
4791       return Op;
4792     DMask = DMaskConst->getZExtValue();
4793     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
4794 
4795     if (BaseOpcode->Store) {
4796       VData = Op.getOperand(2);
4797 
4798       MVT StoreVT = VData.getSimpleValueType();
4799       if (StoreVT.getScalarType() == MVT::f16) {
4800         if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ||
4801             !BaseOpcode->HasD16)
4802           return Op; // D16 is unsupported for this instruction
4803 
4804         IsD16 = true;
4805         VData = handleD16VData(VData, DAG);
4806       }
4807 
4808       NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
4809     } else {
4810       // Work out the num dwords based on the dmask popcount and underlying type
4811       // and whether packing is supported.
4812       MVT LoadVT = ResultTypes[0].getSimpleVT();
4813       if (LoadVT.getScalarType() == MVT::f16) {
4814         if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ||
4815             !BaseOpcode->HasD16)
4816           return Op; // D16 is unsupported for this instruction
4817 
4818         IsD16 = true;
4819       }
4820 
4821       // Confirm that the return type is large enough for the dmask specified
4822       if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) ||
4823           (!LoadVT.isVector() && DMaskLanes > 1))
4824           return Op;
4825 
4826       if (IsD16 && !Subtarget->hasUnpackedD16VMem())
4827         NumVDataDwords = (DMaskLanes + 1) / 2;
4828       else
4829         NumVDataDwords = DMaskLanes;
4830 
4831       AdjustRetType = true;
4832     }
4833 
4834     AddrIdx = DMaskIdx + 1;
4835   }
4836 
4837   unsigned NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0;
4838   unsigned NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0;
4839   unsigned NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0;
4840   unsigned NumVAddrs = BaseOpcode->NumExtraArgs + NumGradients +
4841                        NumCoords + NumLCM;
4842   unsigned NumMIVAddrs = NumVAddrs;
4843 
4844   SmallVector<SDValue, 4> VAddrs;
4845 
4846   // Optimize _L to _LZ when _L is zero
4847   if (LZMappingInfo) {
4848     if (auto ConstantLod =
4849          dyn_cast<ConstantFPSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
4850       if (ConstantLod->isZero() || ConstantLod->isNegative()) {
4851         IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
4852         NumMIVAddrs--;               // remove 'lod'
4853       }
4854     }
4855   }
4856 
4857   // Check for 16 bit addresses and pack if true.
4858   unsigned DimIdx = AddrIdx + BaseOpcode->NumExtraArgs;
4859   MVT VAddrVT = Op.getOperand(DimIdx).getSimpleValueType();
4860   const MVT VAddrScalarVT = VAddrVT.getScalarType();
4861   if (((VAddrScalarVT == MVT::f16) || (VAddrScalarVT == MVT::i16)) &&
4862       ST->hasFeature(AMDGPU::FeatureR128A16)) {
4863     IsA16 = true;
4864     const MVT VectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16;
4865     for (unsigned i = AddrIdx; i < (AddrIdx + NumMIVAddrs); ++i) {
4866       SDValue AddrLo, AddrHi;
4867       // Push back extra arguments.
4868       if (i < DimIdx) {
4869         AddrLo = Op.getOperand(i);
4870       } else {
4871         AddrLo = Op.getOperand(i);
4872         // Dz/dh, dz/dv and the last odd coord are packed with undef. Also,
4873         // in 1D, derivatives dx/dh and dx/dv are packed with undef.
4874         if (((i + 1) >= (AddrIdx + NumMIVAddrs)) ||
4875             ((NumGradients / 2) % 2 == 1 &&
4876             (i == DimIdx + (NumGradients / 2) - 1 ||
4877              i == DimIdx + NumGradients - 1))) {
4878           AddrHi = DAG.getUNDEF(MVT::f16);
4879         } else {
4880           AddrHi = Op.getOperand(i + 1);
4881           i++;
4882         }
4883         AddrLo = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VectorVT,
4884                              {AddrLo, AddrHi});
4885         AddrLo = DAG.getBitcast(MVT::i32, AddrLo);
4886       }
4887       VAddrs.push_back(AddrLo);
4888     }
4889   } else {
4890     for (unsigned i = 0; i < NumMIVAddrs; ++i)
4891       VAddrs.push_back(Op.getOperand(AddrIdx + i));
4892   }
4893 
4894   SDValue VAddr = getBuildDwordsVector(DAG, DL, VAddrs);
4895 
4896   SDValue True = DAG.getTargetConstant(1, DL, MVT::i1);
4897   SDValue False = DAG.getTargetConstant(0, DL, MVT::i1);
4898   unsigned CtrlIdx; // Index of texfailctrl argument
4899   SDValue Unorm;
4900   if (!BaseOpcode->Sampler) {
4901     Unorm = True;
4902     CtrlIdx = AddrIdx + NumVAddrs + 1;
4903   } else {
4904     auto UnormConst =
4905         dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2));
4906     if (!UnormConst)
4907       return Op;
4908 
4909     Unorm = UnormConst->getZExtValue() ? True : False;
4910     CtrlIdx = AddrIdx + NumVAddrs + 3;
4911   }
4912 
4913   SDValue TFE;
4914   SDValue LWE;
4915   SDValue TexFail = Op.getOperand(CtrlIdx);
4916   bool IsTexFail = false;
4917   if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail))
4918     return Op;
4919 
4920   if (IsTexFail) {
4921     if (!DMaskLanes) {
4922       // Expecting to get an error flag since TFC is on - and dmask is 0
4923       // Force dmask to be at least 1 otherwise the instruction will fail
4924       DMask = 0x1;
4925       DMaskLanes = 1;
4926       NumVDataDwords = 1;
4927     }
4928     NumVDataDwords += 1;
4929     AdjustRetType = true;
4930   }
4931 
4932   // Has something earlier tagged that the return type needs adjusting
4933   // This happens if the instruction is a load or has set TexFailCtrl flags
4934   if (AdjustRetType) {
4935     // NumVDataDwords reflects the true number of dwords required in the return type
4936     if (DMaskLanes == 0 && !BaseOpcode->Store) {
4937       // This is a no-op load. This can be eliminated
4938       SDValue Undef = DAG.getUNDEF(Op.getValueType());
4939       if (isa<MemSDNode>(Op))
4940         return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
4941       return Undef;
4942     }
4943 
4944     // Have to use a power of 2 number of dwords
4945     NumVDataDwords = 1 << Log2_32_Ceil(NumVDataDwords);
4946 
4947     EVT NewVT = NumVDataDwords > 1 ?
4948                   EVT::getVectorVT(*DAG.getContext(), MVT::f32, NumVDataDwords)
4949                 : MVT::f32;
4950 
4951     ResultTypes[0] = NewVT;
4952     if (ResultTypes.size() == 3) {
4953       // Original result was aggregate type used for TexFailCtrl results
4954       // The actual instruction returns as a vector type which has now been
4955       // created. Remove the aggregate result.
4956       ResultTypes.erase(&ResultTypes[1]);
4957     }
4958   }
4959 
4960   SDValue GLC;
4961   SDValue SLC;
4962   if (BaseOpcode->Atomic) {
4963     GLC = True; // TODO no-return optimization
4964     if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC))
4965       return Op;
4966   } else {
4967     if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC))
4968       return Op;
4969   }
4970 
4971   SmallVector<SDValue, 14> Ops;
4972   if (BaseOpcode->Store || BaseOpcode->Atomic)
4973     Ops.push_back(VData); // vdata
4974   Ops.push_back(VAddr);
4975   Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc
4976   if (BaseOpcode->Sampler)
4977     Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler
4978   Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32));
4979   Ops.push_back(Unorm);
4980   Ops.push_back(GLC);
4981   Ops.push_back(SLC);
4982   Ops.push_back(IsA16 &&  // a16 or r128
4983                 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False);
4984   Ops.push_back(TFE); // tfe
4985   Ops.push_back(LWE); // lwe
4986   Ops.push_back(DimInfo->DA ? True : False);
4987   if (BaseOpcode->HasD16)
4988     Ops.push_back(IsD16 ? True : False);
4989   if (isa<MemSDNode>(Op))
4990     Ops.push_back(Op.getOperand(0)); // chain
4991 
4992   int NumVAddrDwords = VAddr.getValueType().getSizeInBits() / 32;
4993   int Opcode = -1;
4994 
4995   if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
4996     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
4997                                    NumVDataDwords, NumVAddrDwords);
4998   if (Opcode == -1)
4999     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
5000                                    NumVDataDwords, NumVAddrDwords);
5001   assert(Opcode != -1);
5002 
5003   MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops);
5004   if (auto MemOp = dyn_cast<MemSDNode>(Op)) {
5005     MachineMemOperand *MemRef = MemOp->getMemOperand();
5006     DAG.setNodeMemRefs(NewNode, {MemRef});
5007   }
5008 
5009   if (BaseOpcode->AtomicX2) {
5010     SmallVector<SDValue, 1> Elt;
5011     DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
5012     return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
5013   } else if (!BaseOpcode->Store) {
5014     return constructRetValue(DAG, NewNode,
5015                              OrigResultTypes, IsTexFail,
5016                              Subtarget->hasUnpackedD16VMem(), IsD16,
5017                              DMaskLanes, NumVDataDwords, DL,
5018                              *DAG.getContext());
5019   }
5020 
5021   return SDValue(NewNode, 0);
5022 }
5023 
5024 SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
5025                                        SDValue Offset, SDValue GLC,
5026                                        SelectionDAG &DAG) const {
5027   MachineFunction &MF = DAG.getMachineFunction();
5028   MachineMemOperand *MMO = MF.getMachineMemOperand(
5029       MachinePointerInfo(),
5030       MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
5031           MachineMemOperand::MOInvariant,
5032       VT.getStoreSize(), VT.getStoreSize());
5033 
5034   if (!Offset->isDivergent()) {
5035     SDValue Ops[] = {
5036         Rsrc,
5037         Offset, // Offset
5038         GLC     // glc
5039     };
5040     return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL,
5041                                    DAG.getVTList(VT), Ops, VT, MMO);
5042   }
5043 
5044   // We have a divergent offset. Emit a MUBUF buffer load instead. We can
5045   // assume that the buffer is unswizzled.
5046   SmallVector<SDValue, 4> Loads;
5047   unsigned NumLoads = 1;
5048   MVT LoadVT = VT.getSimpleVT();
5049   unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1;
5050   assert((LoadVT.getScalarType() == MVT::i32 ||
5051           LoadVT.getScalarType() == MVT::f32) &&
5052          isPowerOf2_32(NumElts));
5053 
5054   if (NumElts == 8 || NumElts == 16) {
5055     NumLoads = NumElts == 16 ? 4 : 2;
5056     LoadVT = MVT::v4i32;
5057   }
5058 
5059   SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue});
5060   unsigned CachePolicy = cast<ConstantSDNode>(GLC)->getZExtValue();
5061   SDValue Ops[] = {
5062       DAG.getEntryNode(),                         // Chain
5063       Rsrc,                                       // rsrc
5064       DAG.getConstant(0, DL, MVT::i32),           // vindex
5065       {},                                         // voffset
5066       {},                                         // soffset
5067       {},                                         // offset
5068       DAG.getConstant(CachePolicy, DL, MVT::i32), // cachepolicy
5069       DAG.getConstant(0, DL, MVT::i1),            // idxen
5070   };
5071 
5072   // Use the alignment to ensure that the required offsets will fit into the
5073   // immediate offsets.
5074   setBufferOffsets(Offset, DAG, &Ops[3], NumLoads > 1 ? 16 * NumLoads : 4);
5075 
5076   uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
5077   for (unsigned i = 0; i < NumLoads; ++i) {
5078     Ops[5] = DAG.getConstant(InstOffset + 16 * i, DL, MVT::i32);
5079     Loads.push_back(DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList,
5080                                             Ops, LoadVT, MMO));
5081   }
5082 
5083   if (VT == MVT::v8i32 || VT == MVT::v16i32)
5084     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads);
5085 
5086   return Loads[0];
5087 }
5088 
5089 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
5090                                                   SelectionDAG &DAG) const {
5091   MachineFunction &MF = DAG.getMachineFunction();
5092   auto MFI = MF.getInfo<SIMachineFunctionInfo>();
5093 
5094   EVT VT = Op.getValueType();
5095   SDLoc DL(Op);
5096   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5097 
5098   // TODO: Should this propagate fast-math-flags?
5099 
5100   switch (IntrinsicID) {
5101   case Intrinsic::amdgcn_implicit_buffer_ptr: {
5102     if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction()))
5103       return emitNonHSAIntrinsicError(DAG, DL, VT);
5104     return getPreloadedValue(DAG, *MFI, VT,
5105                              AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
5106   }
5107   case Intrinsic::amdgcn_dispatch_ptr:
5108   case Intrinsic::amdgcn_queue_ptr: {
5109     if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) {
5110       DiagnosticInfoUnsupported BadIntrin(
5111           MF.getFunction(), "unsupported hsa intrinsic without hsa target",
5112           DL.getDebugLoc());
5113       DAG.getContext()->diagnose(BadIntrin);
5114       return DAG.getUNDEF(VT);
5115     }
5116 
5117     auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
5118       AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
5119     return getPreloadedValue(DAG, *MFI, VT, RegID);
5120   }
5121   case Intrinsic::amdgcn_implicitarg_ptr: {
5122     if (MFI->isEntryFunction())
5123       return getImplicitArgPtr(DAG, DL);
5124     return getPreloadedValue(DAG, *MFI, VT,
5125                              AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
5126   }
5127   case Intrinsic::amdgcn_kernarg_segment_ptr: {
5128     return getPreloadedValue(DAG, *MFI, VT,
5129                              AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
5130   }
5131   case Intrinsic::amdgcn_dispatch_id: {
5132     return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
5133   }
5134   case Intrinsic::amdgcn_rcp:
5135     return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
5136   case Intrinsic::amdgcn_rsq:
5137     return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5138   case Intrinsic::amdgcn_rsq_legacy:
5139     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5140       return emitRemovedIntrinsicError(DAG, DL, VT);
5141 
5142     return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
5143   case Intrinsic::amdgcn_rcp_legacy:
5144     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5145       return emitRemovedIntrinsicError(DAG, DL, VT);
5146     return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
5147   case Intrinsic::amdgcn_rsq_clamp: {
5148     if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
5149       return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
5150 
5151     Type *Type = VT.getTypeForEVT(*DAG.getContext());
5152     APFloat Max = APFloat::getLargest(Type->getFltSemantics());
5153     APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
5154 
5155     SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5156     SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
5157                               DAG.getConstantFP(Max, DL, VT));
5158     return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
5159                        DAG.getConstantFP(Min, DL, VT));
5160   }
5161   case Intrinsic::r600_read_ngroups_x:
5162     if (Subtarget->isAmdHsaOS())
5163       return emitNonHSAIntrinsicError(DAG, DL, VT);
5164 
5165     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5166                                     SI::KernelInputOffsets::NGROUPS_X, 4, false);
5167   case Intrinsic::r600_read_ngroups_y:
5168     if (Subtarget->isAmdHsaOS())
5169       return emitNonHSAIntrinsicError(DAG, DL, VT);
5170 
5171     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5172                                     SI::KernelInputOffsets::NGROUPS_Y, 4, false);
5173   case Intrinsic::r600_read_ngroups_z:
5174     if (Subtarget->isAmdHsaOS())
5175       return emitNonHSAIntrinsicError(DAG, DL, VT);
5176 
5177     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5178                                     SI::KernelInputOffsets::NGROUPS_Z, 4, false);
5179   case Intrinsic::r600_read_global_size_x:
5180     if (Subtarget->isAmdHsaOS())
5181       return emitNonHSAIntrinsicError(DAG, DL, VT);
5182 
5183     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5184                                     SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false);
5185   case Intrinsic::r600_read_global_size_y:
5186     if (Subtarget->isAmdHsaOS())
5187       return emitNonHSAIntrinsicError(DAG, DL, VT);
5188 
5189     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5190                                     SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false);
5191   case Intrinsic::r600_read_global_size_z:
5192     if (Subtarget->isAmdHsaOS())
5193       return emitNonHSAIntrinsicError(DAG, DL, VT);
5194 
5195     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5196                                     SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false);
5197   case Intrinsic::r600_read_local_size_x:
5198     if (Subtarget->isAmdHsaOS())
5199       return emitNonHSAIntrinsicError(DAG, DL, VT);
5200 
5201     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5202                                   SI::KernelInputOffsets::LOCAL_SIZE_X);
5203   case Intrinsic::r600_read_local_size_y:
5204     if (Subtarget->isAmdHsaOS())
5205       return emitNonHSAIntrinsicError(DAG, DL, VT);
5206 
5207     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5208                                   SI::KernelInputOffsets::LOCAL_SIZE_Y);
5209   case Intrinsic::r600_read_local_size_z:
5210     if (Subtarget->isAmdHsaOS())
5211       return emitNonHSAIntrinsicError(DAG, DL, VT);
5212 
5213     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5214                                   SI::KernelInputOffsets::LOCAL_SIZE_Z);
5215   case Intrinsic::amdgcn_workgroup_id_x:
5216   case Intrinsic::r600_read_tgid_x:
5217     return getPreloadedValue(DAG, *MFI, VT,
5218                              AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
5219   case Intrinsic::amdgcn_workgroup_id_y:
5220   case Intrinsic::r600_read_tgid_y:
5221     return getPreloadedValue(DAG, *MFI, VT,
5222                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
5223   case Intrinsic::amdgcn_workgroup_id_z:
5224   case Intrinsic::r600_read_tgid_z:
5225     return getPreloadedValue(DAG, *MFI, VT,
5226                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
5227   case Intrinsic::amdgcn_workitem_id_x:
5228   case Intrinsic::r600_read_tidig_x:
5229     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5230                           SDLoc(DAG.getEntryNode()),
5231                           MFI->getArgInfo().WorkItemIDX);
5232   case Intrinsic::amdgcn_workitem_id_y:
5233   case Intrinsic::r600_read_tidig_y:
5234     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5235                           SDLoc(DAG.getEntryNode()),
5236                           MFI->getArgInfo().WorkItemIDY);
5237   case Intrinsic::amdgcn_workitem_id_z:
5238   case Intrinsic::r600_read_tidig_z:
5239     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5240                           SDLoc(DAG.getEntryNode()),
5241                           MFI->getArgInfo().WorkItemIDZ);
5242   case Intrinsic::amdgcn_s_buffer_load: {
5243     unsigned Cache = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
5244     return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2),
5245                         DAG.getTargetConstant(Cache & 1, DL, MVT::i1), DAG);
5246   }
5247   case Intrinsic::amdgcn_fdiv_fast:
5248     return lowerFDIV_FAST(Op, DAG);
5249   case Intrinsic::amdgcn_interp_mov: {
5250     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5251     SDValue Glue = M0.getValue(1);
5252     return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
5253                        Op.getOperand(2), Op.getOperand(3), Glue);
5254   }
5255   case Intrinsic::amdgcn_interp_p1: {
5256     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5257     SDValue Glue = M0.getValue(1);
5258     return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
5259                        Op.getOperand(2), Op.getOperand(3), Glue);
5260   }
5261   case Intrinsic::amdgcn_interp_p2: {
5262     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5263     SDValue Glue = SDValue(M0.getNode(), 1);
5264     return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
5265                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
5266                        Glue);
5267   }
5268   case Intrinsic::amdgcn_interp_p1_f16: {
5269     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5270     SDValue Glue = M0.getValue(1);
5271     if (getSubtarget()->getLDSBankCount() == 16) {
5272       // 16 bank LDS
5273       SDValue S = DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32,
5274                               DAG.getConstant(2, DL, MVT::i32), // P0
5275                               Op.getOperand(2), // Attrchan
5276                               Op.getOperand(3), // Attr
5277                               Glue);
5278       SDValue Ops[] = {
5279         Op.getOperand(1), // Src0
5280         Op.getOperand(2), // Attrchan
5281         Op.getOperand(3), // Attr
5282         DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5283         S, // Src2 - holds two f16 values selected by high
5284         DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers
5285         Op.getOperand(4), // high
5286         DAG.getConstant(0, DL, MVT::i1), // $clamp
5287         DAG.getConstant(0, DL, MVT::i32) // $omod
5288       };
5289       return DAG.getNode(AMDGPUISD::INTERP_P1LV_F16, DL, MVT::f32, Ops);
5290     } else {
5291       // 32 bank LDS
5292       SDValue Ops[] = {
5293         Op.getOperand(1), // Src0
5294         Op.getOperand(2), // Attrchan
5295         Op.getOperand(3), // Attr
5296         DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5297         Op.getOperand(4), // high
5298         DAG.getConstant(0, DL, MVT::i1), // $clamp
5299         DAG.getConstant(0, DL, MVT::i32), // $omod
5300         Glue
5301       };
5302       return DAG.getNode(AMDGPUISD::INTERP_P1LL_F16, DL, MVT::f32, Ops);
5303     }
5304   }
5305   case Intrinsic::amdgcn_interp_p2_f16: {
5306     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(6));
5307     SDValue Glue = SDValue(M0.getNode(), 1);
5308     SDValue Ops[] = {
5309       Op.getOperand(2), // Src0
5310       Op.getOperand(3), // Attrchan
5311       Op.getOperand(4), // Attr
5312       DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5313       Op.getOperand(1), // Src2
5314       DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers
5315       Op.getOperand(5), // high
5316       DAG.getConstant(0, DL, MVT::i1), // $clamp
5317       Glue
5318     };
5319     return DAG.getNode(AMDGPUISD::INTERP_P2_F16, DL, MVT::f16, Ops);
5320   }
5321   case Intrinsic::amdgcn_sin:
5322     return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
5323 
5324   case Intrinsic::amdgcn_cos:
5325     return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
5326 
5327   case Intrinsic::amdgcn_log_clamp: {
5328     if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
5329       return SDValue();
5330 
5331     DiagnosticInfoUnsupported BadIntrin(
5332       MF.getFunction(), "intrinsic not supported on subtarget",
5333       DL.getDebugLoc());
5334       DAG.getContext()->diagnose(BadIntrin);
5335       return DAG.getUNDEF(VT);
5336   }
5337   case Intrinsic::amdgcn_ldexp:
5338     return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
5339                        Op.getOperand(1), Op.getOperand(2));
5340 
5341   case Intrinsic::amdgcn_fract:
5342     return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
5343 
5344   case Intrinsic::amdgcn_class:
5345     return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
5346                        Op.getOperand(1), Op.getOperand(2));
5347   case Intrinsic::amdgcn_div_fmas:
5348     return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
5349                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5350                        Op.getOperand(4));
5351 
5352   case Intrinsic::amdgcn_div_fixup:
5353     return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
5354                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5355 
5356   case Intrinsic::amdgcn_trig_preop:
5357     return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
5358                        Op.getOperand(1), Op.getOperand(2));
5359   case Intrinsic::amdgcn_div_scale: {
5360     // 3rd parameter required to be a constant.
5361     const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
5362     if (!Param)
5363       return DAG.getMergeValues({ DAG.getUNDEF(VT), DAG.getUNDEF(MVT::i1) }, DL);
5364 
5365     // Translate to the operands expected by the machine instruction. The
5366     // first parameter must be the same as the first instruction.
5367     SDValue Numerator = Op.getOperand(1);
5368     SDValue Denominator = Op.getOperand(2);
5369 
5370     // Note this order is opposite of the machine instruction's operations,
5371     // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
5372     // intrinsic has the numerator as the first operand to match a normal
5373     // division operation.
5374 
5375     SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
5376 
5377     return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
5378                        Denominator, Numerator);
5379   }
5380   case Intrinsic::amdgcn_icmp: {
5381     // There is a Pat that handles this variant, so return it as-is.
5382     if (Op.getOperand(1).getValueType() == MVT::i1 &&
5383         Op.getConstantOperandVal(2) == 0 &&
5384         Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE)
5385       return Op;
5386     return lowerICMPIntrinsic(*this, Op.getNode(), DAG);
5387   }
5388   case Intrinsic::amdgcn_fcmp: {
5389     return lowerFCMPIntrinsic(*this, Op.getNode(), DAG);
5390   }
5391   case Intrinsic::amdgcn_fmed3:
5392     return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
5393                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5394   case Intrinsic::amdgcn_fdot2:
5395     return DAG.getNode(AMDGPUISD::FDOT2, DL, VT,
5396                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5397                        Op.getOperand(4));
5398   case Intrinsic::amdgcn_fmul_legacy:
5399     return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
5400                        Op.getOperand(1), Op.getOperand(2));
5401   case Intrinsic::amdgcn_sffbh:
5402     return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
5403   case Intrinsic::amdgcn_sbfe:
5404     return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
5405                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5406   case Intrinsic::amdgcn_ubfe:
5407     return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
5408                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5409   case Intrinsic::amdgcn_cvt_pkrtz:
5410   case Intrinsic::amdgcn_cvt_pknorm_i16:
5411   case Intrinsic::amdgcn_cvt_pknorm_u16:
5412   case Intrinsic::amdgcn_cvt_pk_i16:
5413   case Intrinsic::amdgcn_cvt_pk_u16: {
5414     // FIXME: Stop adding cast if v2f16/v2i16 are legal.
5415     EVT VT = Op.getValueType();
5416     unsigned Opcode;
5417 
5418     if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
5419       Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
5420     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
5421       Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
5422     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
5423       Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
5424     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
5425       Opcode = AMDGPUISD::CVT_PK_I16_I32;
5426     else
5427       Opcode = AMDGPUISD::CVT_PK_U16_U32;
5428 
5429     if (isTypeLegal(VT))
5430       return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2));
5431 
5432     SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
5433                                Op.getOperand(1), Op.getOperand(2));
5434     return DAG.getNode(ISD::BITCAST, DL, VT, Node);
5435   }
5436   case Intrinsic::amdgcn_wqm: {
5437     SDValue Src = Op.getOperand(1);
5438     return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src),
5439                    0);
5440   }
5441   case Intrinsic::amdgcn_wwm: {
5442     SDValue Src = Op.getOperand(1);
5443     return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src),
5444                    0);
5445   }
5446   case Intrinsic::amdgcn_fmad_ftz:
5447     return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1),
5448                        Op.getOperand(2), Op.getOperand(3));
5449   default:
5450     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
5451             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
5452       return lowerImage(Op, ImageDimIntr, DAG);
5453 
5454     return Op;
5455   }
5456 }
5457 
5458 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
5459                                                  SelectionDAG &DAG) const {
5460   unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
5461   SDLoc DL(Op);
5462 
5463   switch (IntrID) {
5464   case Intrinsic::amdgcn_ds_ordered_add:
5465   case Intrinsic::amdgcn_ds_ordered_swap: {
5466     MemSDNode *M = cast<MemSDNode>(Op);
5467     SDValue Chain = M->getOperand(0);
5468     SDValue M0 = M->getOperand(2);
5469     SDValue Value = M->getOperand(3);
5470     unsigned OrderedCountIndex = M->getConstantOperandVal(7);
5471     unsigned WaveRelease = M->getConstantOperandVal(8);
5472     unsigned WaveDone = M->getConstantOperandVal(9);
5473     unsigned ShaderType;
5474     unsigned Instruction;
5475 
5476     switch (IntrID) {
5477     case Intrinsic::amdgcn_ds_ordered_add:
5478       Instruction = 0;
5479       break;
5480     case Intrinsic::amdgcn_ds_ordered_swap:
5481       Instruction = 1;
5482       break;
5483     }
5484 
5485     if (WaveDone && !WaveRelease)
5486       report_fatal_error("ds_ordered_count: wave_done requires wave_release");
5487 
5488     switch (DAG.getMachineFunction().getFunction().getCallingConv()) {
5489     case CallingConv::AMDGPU_CS:
5490     case CallingConv::AMDGPU_KERNEL:
5491       ShaderType = 0;
5492       break;
5493     case CallingConv::AMDGPU_PS:
5494       ShaderType = 1;
5495       break;
5496     case CallingConv::AMDGPU_VS:
5497       ShaderType = 2;
5498       break;
5499     case CallingConv::AMDGPU_GS:
5500       ShaderType = 3;
5501       break;
5502     default:
5503       report_fatal_error("ds_ordered_count unsupported for this calling conv");
5504     }
5505 
5506     unsigned Offset0 = OrderedCountIndex << 2;
5507     unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
5508                        (Instruction << 4);
5509     unsigned Offset = Offset0 | (Offset1 << 8);
5510 
5511     SDValue Ops[] = {
5512       Chain,
5513       Value,
5514       DAG.getTargetConstant(Offset, DL, MVT::i16),
5515       copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue
5516     };
5517     return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL,
5518                                    M->getVTList(), Ops, M->getMemoryVT(),
5519                                    M->getMemOperand());
5520   }
5521   case Intrinsic::amdgcn_ds_fadd: {
5522     MemSDNode *M = cast<MemSDNode>(Op);
5523     unsigned Opc;
5524     switch (IntrID) {
5525     case Intrinsic::amdgcn_ds_fadd:
5526       Opc = ISD::ATOMIC_LOAD_FADD;
5527       break;
5528     }
5529 
5530     return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(),
5531                          M->getOperand(0), M->getOperand(2), M->getOperand(3),
5532                          M->getMemOperand());
5533   }
5534   case Intrinsic::amdgcn_atomic_inc:
5535   case Intrinsic::amdgcn_atomic_dec:
5536   case Intrinsic::amdgcn_ds_fmin:
5537   case Intrinsic::amdgcn_ds_fmax: {
5538     MemSDNode *M = cast<MemSDNode>(Op);
5539     unsigned Opc;
5540     switch (IntrID) {
5541     case Intrinsic::amdgcn_atomic_inc:
5542       Opc = AMDGPUISD::ATOMIC_INC;
5543       break;
5544     case Intrinsic::amdgcn_atomic_dec:
5545       Opc = AMDGPUISD::ATOMIC_DEC;
5546       break;
5547     case Intrinsic::amdgcn_ds_fmin:
5548       Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
5549       break;
5550     case Intrinsic::amdgcn_ds_fmax:
5551       Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
5552       break;
5553     default:
5554       llvm_unreachable("Unknown intrinsic!");
5555     }
5556     SDValue Ops[] = {
5557       M->getOperand(0), // Chain
5558       M->getOperand(2), // Ptr
5559       M->getOperand(3)  // Value
5560     };
5561 
5562     return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
5563                                    M->getMemoryVT(), M->getMemOperand());
5564   }
5565   case Intrinsic::amdgcn_buffer_load:
5566   case Intrinsic::amdgcn_buffer_load_format: {
5567     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
5568     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
5569     unsigned IdxEn = 1;
5570     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
5571       IdxEn = Idx->getZExtValue() != 0;
5572     SDValue Ops[] = {
5573       Op.getOperand(0), // Chain
5574       Op.getOperand(2), // rsrc
5575       Op.getOperand(3), // vindex
5576       SDValue(),        // voffset -- will be set by setBufferOffsets
5577       SDValue(),        // soffset -- will be set by setBufferOffsets
5578       SDValue(),        // offset -- will be set by setBufferOffsets
5579       DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
5580       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
5581     };
5582 
5583     setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]);
5584     unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
5585         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
5586 
5587     EVT VT = Op.getValueType();
5588     EVT IntVT = VT.changeTypeToInteger();
5589     auto *M = cast<MemSDNode>(Op);
5590     EVT LoadVT = Op.getValueType();
5591 
5592     if (LoadVT.getScalarType() == MVT::f16)
5593       return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
5594                                  M, DAG, Ops);
5595     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
5596                                    M->getMemOperand());
5597   }
5598   case Intrinsic::amdgcn_raw_buffer_load:
5599   case Intrinsic::amdgcn_raw_buffer_load_format: {
5600     auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
5601     SDValue Ops[] = {
5602       Op.getOperand(0), // Chain
5603       Op.getOperand(2), // rsrc
5604       DAG.getConstant(0, DL, MVT::i32), // vindex
5605       Offsets.first,    // voffset
5606       Op.getOperand(4), // soffset
5607       Offsets.second,   // offset
5608       Op.getOperand(5), // cachepolicy
5609       DAG.getConstant(0, DL, MVT::i1), // idxen
5610     };
5611 
5612     unsigned Opc = (IntrID == Intrinsic::amdgcn_raw_buffer_load) ?
5613         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
5614 
5615     EVT VT = Op.getValueType();
5616     EVT IntVT = VT.changeTypeToInteger();
5617     auto *M = cast<MemSDNode>(Op);
5618     EVT LoadVT = Op.getValueType();
5619 
5620     if (LoadVT.getScalarType() == MVT::f16)
5621       return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
5622                                  M, DAG, Ops);
5623     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
5624                                    M->getMemOperand());
5625   }
5626   case Intrinsic::amdgcn_struct_buffer_load:
5627   case Intrinsic::amdgcn_struct_buffer_load_format: {
5628     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
5629     SDValue Ops[] = {
5630       Op.getOperand(0), // Chain
5631       Op.getOperand(2), // rsrc
5632       Op.getOperand(3), // vindex
5633       Offsets.first,    // voffset
5634       Op.getOperand(5), // soffset
5635       Offsets.second,   // offset
5636       Op.getOperand(6), // cachepolicy
5637       DAG.getConstant(1, DL, MVT::i1), // idxen
5638     };
5639 
5640     unsigned Opc = (IntrID == Intrinsic::amdgcn_struct_buffer_load) ?
5641         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
5642 
5643     EVT VT = Op.getValueType();
5644     EVT IntVT = VT.changeTypeToInteger();
5645     auto *M = cast<MemSDNode>(Op);
5646     EVT LoadVT = Op.getValueType();
5647 
5648     if (LoadVT.getScalarType() == MVT::f16)
5649       return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
5650                                  M, DAG, Ops);
5651     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
5652                                    M->getMemOperand());
5653   }
5654   case Intrinsic::amdgcn_tbuffer_load: {
5655     MemSDNode *M = cast<MemSDNode>(Op);
5656     EVT LoadVT = Op.getValueType();
5657 
5658     unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
5659     unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
5660     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
5661     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
5662     unsigned IdxEn = 1;
5663     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
5664       IdxEn = Idx->getZExtValue() != 0;
5665     SDValue Ops[] = {
5666       Op.getOperand(0),  // Chain
5667       Op.getOperand(2),  // rsrc
5668       Op.getOperand(3),  // vindex
5669       Op.getOperand(4),  // voffset
5670       Op.getOperand(5),  // soffset
5671       Op.getOperand(6),  // offset
5672       DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
5673       DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
5674       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
5675     };
5676 
5677     if (LoadVT.getScalarType() == MVT::f16)
5678       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
5679                                  M, DAG, Ops);
5680     return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
5681                                    Op->getVTList(), Ops, LoadVT,
5682                                    M->getMemOperand());
5683   }
5684   case Intrinsic::amdgcn_raw_tbuffer_load: {
5685     MemSDNode *M = cast<MemSDNode>(Op);
5686     EVT LoadVT = Op.getValueType();
5687     auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
5688 
5689     SDValue Ops[] = {
5690       Op.getOperand(0),  // Chain
5691       Op.getOperand(2),  // rsrc
5692       DAG.getConstant(0, DL, MVT::i32), // vindex
5693       Offsets.first,     // voffset
5694       Op.getOperand(4),  // soffset
5695       Offsets.second,    // offset
5696       Op.getOperand(5),  // format
5697       Op.getOperand(6),  // cachepolicy
5698       DAG.getConstant(0, DL, MVT::i1), // idxen
5699     };
5700 
5701     if (LoadVT.getScalarType() == MVT::f16)
5702       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
5703                                  M, DAG, Ops);
5704     return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
5705                                    Op->getVTList(), Ops, LoadVT,
5706                                    M->getMemOperand());
5707   }
5708   case Intrinsic::amdgcn_struct_tbuffer_load: {
5709     MemSDNode *M = cast<MemSDNode>(Op);
5710     EVT LoadVT = Op.getValueType();
5711     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
5712 
5713     SDValue Ops[] = {
5714       Op.getOperand(0),  // Chain
5715       Op.getOperand(2),  // rsrc
5716       Op.getOperand(3),  // vindex
5717       Offsets.first,     // voffset
5718       Op.getOperand(5),  // soffset
5719       Offsets.second,    // offset
5720       Op.getOperand(6),  // format
5721       Op.getOperand(7),  // cachepolicy
5722       DAG.getConstant(1, DL, MVT::i1), // idxen
5723     };
5724 
5725     if (LoadVT.getScalarType() == MVT::f16)
5726       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
5727                                  M, DAG, Ops);
5728     return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
5729                                    Op->getVTList(), Ops, LoadVT,
5730                                    M->getMemOperand());
5731   }
5732   case Intrinsic::amdgcn_buffer_atomic_swap:
5733   case Intrinsic::amdgcn_buffer_atomic_add:
5734   case Intrinsic::amdgcn_buffer_atomic_sub:
5735   case Intrinsic::amdgcn_buffer_atomic_smin:
5736   case Intrinsic::amdgcn_buffer_atomic_umin:
5737   case Intrinsic::amdgcn_buffer_atomic_smax:
5738   case Intrinsic::amdgcn_buffer_atomic_umax:
5739   case Intrinsic::amdgcn_buffer_atomic_and:
5740   case Intrinsic::amdgcn_buffer_atomic_or:
5741   case Intrinsic::amdgcn_buffer_atomic_xor: {
5742     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
5743     unsigned IdxEn = 1;
5744     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
5745       IdxEn = Idx->getZExtValue() != 0;
5746     SDValue Ops[] = {
5747       Op.getOperand(0), // Chain
5748       Op.getOperand(2), // vdata
5749       Op.getOperand(3), // rsrc
5750       Op.getOperand(4), // vindex
5751       SDValue(),        // voffset -- will be set by setBufferOffsets
5752       SDValue(),        // soffset -- will be set by setBufferOffsets
5753       SDValue(),        // offset -- will be set by setBufferOffsets
5754       DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
5755       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
5756     };
5757     setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
5758     EVT VT = Op.getValueType();
5759 
5760     auto *M = cast<MemSDNode>(Op);
5761     unsigned Opcode = 0;
5762 
5763     switch (IntrID) {
5764     case Intrinsic::amdgcn_buffer_atomic_swap:
5765       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
5766       break;
5767     case Intrinsic::amdgcn_buffer_atomic_add:
5768       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
5769       break;
5770     case Intrinsic::amdgcn_buffer_atomic_sub:
5771       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
5772       break;
5773     case Intrinsic::amdgcn_buffer_atomic_smin:
5774       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
5775       break;
5776     case Intrinsic::amdgcn_buffer_atomic_umin:
5777       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
5778       break;
5779     case Intrinsic::amdgcn_buffer_atomic_smax:
5780       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
5781       break;
5782     case Intrinsic::amdgcn_buffer_atomic_umax:
5783       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
5784       break;
5785     case Intrinsic::amdgcn_buffer_atomic_and:
5786       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
5787       break;
5788     case Intrinsic::amdgcn_buffer_atomic_or:
5789       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
5790       break;
5791     case Intrinsic::amdgcn_buffer_atomic_xor:
5792       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
5793       break;
5794     default:
5795       llvm_unreachable("unhandled atomic opcode");
5796     }
5797 
5798     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
5799                                    M->getMemOperand());
5800   }
5801   case Intrinsic::amdgcn_raw_buffer_atomic_swap:
5802   case Intrinsic::amdgcn_raw_buffer_atomic_add:
5803   case Intrinsic::amdgcn_raw_buffer_atomic_sub:
5804   case Intrinsic::amdgcn_raw_buffer_atomic_smin:
5805   case Intrinsic::amdgcn_raw_buffer_atomic_umin:
5806   case Intrinsic::amdgcn_raw_buffer_atomic_smax:
5807   case Intrinsic::amdgcn_raw_buffer_atomic_umax:
5808   case Intrinsic::amdgcn_raw_buffer_atomic_and:
5809   case Intrinsic::amdgcn_raw_buffer_atomic_or:
5810   case Intrinsic::amdgcn_raw_buffer_atomic_xor: {
5811     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
5812     SDValue Ops[] = {
5813       Op.getOperand(0), // Chain
5814       Op.getOperand(2), // vdata
5815       Op.getOperand(3), // rsrc
5816       DAG.getConstant(0, DL, MVT::i32), // vindex
5817       Offsets.first,    // voffset
5818       Op.getOperand(5), // soffset
5819       Offsets.second,   // offset
5820       Op.getOperand(6), // cachepolicy
5821       DAG.getConstant(0, DL, MVT::i1), // idxen
5822     };
5823     EVT VT = Op.getValueType();
5824 
5825     auto *M = cast<MemSDNode>(Op);
5826     unsigned Opcode = 0;
5827 
5828     switch (IntrID) {
5829     case Intrinsic::amdgcn_raw_buffer_atomic_swap:
5830       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
5831       break;
5832     case Intrinsic::amdgcn_raw_buffer_atomic_add:
5833       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
5834       break;
5835     case Intrinsic::amdgcn_raw_buffer_atomic_sub:
5836       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
5837       break;
5838     case Intrinsic::amdgcn_raw_buffer_atomic_smin:
5839       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
5840       break;
5841     case Intrinsic::amdgcn_raw_buffer_atomic_umin:
5842       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
5843       break;
5844     case Intrinsic::amdgcn_raw_buffer_atomic_smax:
5845       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
5846       break;
5847     case Intrinsic::amdgcn_raw_buffer_atomic_umax:
5848       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
5849       break;
5850     case Intrinsic::amdgcn_raw_buffer_atomic_and:
5851       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
5852       break;
5853     case Intrinsic::amdgcn_raw_buffer_atomic_or:
5854       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
5855       break;
5856     case Intrinsic::amdgcn_raw_buffer_atomic_xor:
5857       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
5858       break;
5859     default:
5860       llvm_unreachable("unhandled atomic opcode");
5861     }
5862 
5863     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
5864                                    M->getMemOperand());
5865   }
5866   case Intrinsic::amdgcn_struct_buffer_atomic_swap:
5867   case Intrinsic::amdgcn_struct_buffer_atomic_add:
5868   case Intrinsic::amdgcn_struct_buffer_atomic_sub:
5869   case Intrinsic::amdgcn_struct_buffer_atomic_smin:
5870   case Intrinsic::amdgcn_struct_buffer_atomic_umin:
5871   case Intrinsic::amdgcn_struct_buffer_atomic_smax:
5872   case Intrinsic::amdgcn_struct_buffer_atomic_umax:
5873   case Intrinsic::amdgcn_struct_buffer_atomic_and:
5874   case Intrinsic::amdgcn_struct_buffer_atomic_or:
5875   case Intrinsic::amdgcn_struct_buffer_atomic_xor: {
5876     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
5877     SDValue Ops[] = {
5878       Op.getOperand(0), // Chain
5879       Op.getOperand(2), // vdata
5880       Op.getOperand(3), // rsrc
5881       Op.getOperand(4), // vindex
5882       Offsets.first,    // voffset
5883       Op.getOperand(6), // soffset
5884       Offsets.second,   // offset
5885       Op.getOperand(7), // cachepolicy
5886       DAG.getConstant(1, DL, MVT::i1), // idxen
5887     };
5888     EVT VT = Op.getValueType();
5889 
5890     auto *M = cast<MemSDNode>(Op);
5891     unsigned Opcode = 0;
5892 
5893     switch (IntrID) {
5894     case Intrinsic::amdgcn_struct_buffer_atomic_swap:
5895       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
5896       break;
5897     case Intrinsic::amdgcn_struct_buffer_atomic_add:
5898       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
5899       break;
5900     case Intrinsic::amdgcn_struct_buffer_atomic_sub:
5901       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
5902       break;
5903     case Intrinsic::amdgcn_struct_buffer_atomic_smin:
5904       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
5905       break;
5906     case Intrinsic::amdgcn_struct_buffer_atomic_umin:
5907       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
5908       break;
5909     case Intrinsic::amdgcn_struct_buffer_atomic_smax:
5910       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
5911       break;
5912     case Intrinsic::amdgcn_struct_buffer_atomic_umax:
5913       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
5914       break;
5915     case Intrinsic::amdgcn_struct_buffer_atomic_and:
5916       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
5917       break;
5918     case Intrinsic::amdgcn_struct_buffer_atomic_or:
5919       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
5920       break;
5921     case Intrinsic::amdgcn_struct_buffer_atomic_xor:
5922       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
5923       break;
5924     default:
5925       llvm_unreachable("unhandled atomic opcode");
5926     }
5927 
5928     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
5929                                    M->getMemOperand());
5930   }
5931   case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
5932     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
5933     unsigned IdxEn = 1;
5934     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5)))
5935       IdxEn = Idx->getZExtValue() != 0;
5936     SDValue Ops[] = {
5937       Op.getOperand(0), // Chain
5938       Op.getOperand(2), // src
5939       Op.getOperand(3), // cmp
5940       Op.getOperand(4), // rsrc
5941       Op.getOperand(5), // vindex
5942       SDValue(),        // voffset -- will be set by setBufferOffsets
5943       SDValue(),        // soffset -- will be set by setBufferOffsets
5944       SDValue(),        // offset -- will be set by setBufferOffsets
5945       DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
5946       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
5947     };
5948     setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]);
5949     EVT VT = Op.getValueType();
5950     auto *M = cast<MemSDNode>(Op);
5951 
5952     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
5953                                    Op->getVTList(), Ops, VT, M->getMemOperand());
5954   }
5955   case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: {
5956     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
5957     SDValue Ops[] = {
5958       Op.getOperand(0), // Chain
5959       Op.getOperand(2), // src
5960       Op.getOperand(3), // cmp
5961       Op.getOperand(4), // rsrc
5962       DAG.getConstant(0, DL, MVT::i32), // vindex
5963       Offsets.first,    // voffset
5964       Op.getOperand(6), // soffset
5965       Offsets.second,   // offset
5966       Op.getOperand(7), // cachepolicy
5967       DAG.getConstant(0, DL, MVT::i1), // idxen
5968     };
5969     EVT VT = Op.getValueType();
5970     auto *M = cast<MemSDNode>(Op);
5971 
5972     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
5973                                    Op->getVTList(), Ops, VT, M->getMemOperand());
5974   }
5975   case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: {
5976     auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG);
5977     SDValue Ops[] = {
5978       Op.getOperand(0), // Chain
5979       Op.getOperand(2), // src
5980       Op.getOperand(3), // cmp
5981       Op.getOperand(4), // rsrc
5982       Op.getOperand(5), // vindex
5983       Offsets.first,    // voffset
5984       Op.getOperand(7), // soffset
5985       Offsets.second,   // offset
5986       Op.getOperand(8), // cachepolicy
5987       DAG.getConstant(1, DL, MVT::i1), // idxen
5988     };
5989     EVT VT = Op.getValueType();
5990     auto *M = cast<MemSDNode>(Op);
5991 
5992     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
5993                                    Op->getVTList(), Ops, VT, M->getMemOperand());
5994   }
5995 
5996   default:
5997     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
5998             AMDGPU::getImageDimIntrinsicInfo(IntrID))
5999       return lowerImage(Op, ImageDimIntr, DAG);
6000 
6001     return SDValue();
6002   }
6003 }
6004 
6005 SDValue SITargetLowering::handleD16VData(SDValue VData,
6006                                          SelectionDAG &DAG) const {
6007   EVT StoreVT = VData.getValueType();
6008 
6009   // No change for f16 and legal vector D16 types.
6010   if (!StoreVT.isVector())
6011     return VData;
6012 
6013   SDLoc DL(VData);
6014   assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
6015 
6016   if (Subtarget->hasUnpackedD16VMem()) {
6017     // We need to unpack the packed data to store.
6018     EVT IntStoreVT = StoreVT.changeTypeToInteger();
6019     SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
6020 
6021     EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
6022                                         StoreVT.getVectorNumElements());
6023     SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
6024     return DAG.UnrollVectorOp(ZExt.getNode());
6025   }
6026 
6027   assert(isTypeLegal(StoreVT));
6028   return VData;
6029 }
6030 
6031 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
6032                                               SelectionDAG &DAG) const {
6033   SDLoc DL(Op);
6034   SDValue Chain = Op.getOperand(0);
6035   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6036   MachineFunction &MF = DAG.getMachineFunction();
6037 
6038   switch (IntrinsicID) {
6039   case Intrinsic::amdgcn_exp: {
6040     const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6041     const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6042     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
6043     const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
6044 
6045     const SDValue Ops[] = {
6046       Chain,
6047       DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6048       DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8),  // en
6049       Op.getOperand(4), // src0
6050       Op.getOperand(5), // src1
6051       Op.getOperand(6), // src2
6052       Op.getOperand(7), // src3
6053       DAG.getTargetConstant(0, DL, MVT::i1), // compr
6054       DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6055     };
6056 
6057     unsigned Opc = Done->isNullValue() ?
6058       AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6059     return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6060   }
6061   case Intrinsic::amdgcn_exp_compr: {
6062     const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6063     const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6064     SDValue Src0 = Op.getOperand(4);
6065     SDValue Src1 = Op.getOperand(5);
6066     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
6067     const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
6068 
6069     SDValue Undef = DAG.getUNDEF(MVT::f32);
6070     const SDValue Ops[] = {
6071       Chain,
6072       DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6073       DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8),  // en
6074       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
6075       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
6076       Undef, // src2
6077       Undef, // src3
6078       DAG.getTargetConstant(1, DL, MVT::i1), // compr
6079       DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6080     };
6081 
6082     unsigned Opc = Done->isNullValue() ?
6083       AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6084     return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6085   }
6086   case Intrinsic::amdgcn_s_sendmsg:
6087   case Intrinsic::amdgcn_s_sendmsghalt: {
6088     unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ?
6089       AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT;
6090     Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
6091     SDValue Glue = Chain.getValue(1);
6092     return DAG.getNode(NodeOp, DL, MVT::Other, Chain,
6093                        Op.getOperand(2), Glue);
6094   }
6095   case Intrinsic::amdgcn_init_exec: {
6096     return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
6097                        Op.getOperand(2));
6098   }
6099   case Intrinsic::amdgcn_init_exec_from_input: {
6100     return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
6101                        Op.getOperand(2), Op.getOperand(3));
6102   }
6103   case Intrinsic::amdgcn_s_barrier: {
6104     if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
6105       const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
6106       unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
6107       if (WGSize <= ST.getWavefrontSize())
6108         return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
6109                                           Op.getOperand(0)), 0);
6110     }
6111     return SDValue();
6112   };
6113   case Intrinsic::amdgcn_tbuffer_store: {
6114     SDValue VData = Op.getOperand(2);
6115     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6116     if (IsD16)
6117       VData = handleD16VData(VData, DAG);
6118     unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6119     unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6120     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6121     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue();
6122     unsigned IdxEn = 1;
6123     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6124       IdxEn = Idx->getZExtValue() != 0;
6125     SDValue Ops[] = {
6126       Chain,
6127       VData,             // vdata
6128       Op.getOperand(3),  // rsrc
6129       Op.getOperand(4),  // vindex
6130       Op.getOperand(5),  // voffset
6131       Op.getOperand(6),  // soffset
6132       Op.getOperand(7),  // offset
6133       DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6134       DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6135       DAG.getConstant(IdxEn, DL, MVT::i1), // idexen
6136     };
6137     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6138                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6139     MemSDNode *M = cast<MemSDNode>(Op);
6140     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6141                                    M->getMemoryVT(), M->getMemOperand());
6142   }
6143 
6144   case Intrinsic::amdgcn_struct_tbuffer_store: {
6145     SDValue VData = Op.getOperand(2);
6146     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6147     if (IsD16)
6148       VData = handleD16VData(VData, DAG);
6149     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6150     SDValue Ops[] = {
6151       Chain,
6152       VData,             // vdata
6153       Op.getOperand(3),  // rsrc
6154       Op.getOperand(4),  // vindex
6155       Offsets.first,     // voffset
6156       Op.getOperand(6),  // soffset
6157       Offsets.second,    // offset
6158       Op.getOperand(7),  // format
6159       Op.getOperand(8),  // cachepolicy
6160       DAG.getConstant(1, DL, MVT::i1), // idexen
6161     };
6162     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6163                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6164     MemSDNode *M = cast<MemSDNode>(Op);
6165     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6166                                    M->getMemoryVT(), M->getMemOperand());
6167   }
6168 
6169   case Intrinsic::amdgcn_raw_tbuffer_store: {
6170     SDValue VData = Op.getOperand(2);
6171     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6172     if (IsD16)
6173       VData = handleD16VData(VData, DAG);
6174     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6175     SDValue Ops[] = {
6176       Chain,
6177       VData,             // vdata
6178       Op.getOperand(3),  // rsrc
6179       DAG.getConstant(0, DL, MVT::i32), // vindex
6180       Offsets.first,     // voffset
6181       Op.getOperand(5),  // soffset
6182       Offsets.second,    // offset
6183       Op.getOperand(6),  // format
6184       Op.getOperand(7),  // cachepolicy
6185       DAG.getConstant(0, DL, MVT::i1), // idexen
6186     };
6187     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6188                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6189     MemSDNode *M = cast<MemSDNode>(Op);
6190     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6191                                    M->getMemoryVT(), M->getMemOperand());
6192   }
6193 
6194   case Intrinsic::amdgcn_buffer_store:
6195   case Intrinsic::amdgcn_buffer_store_format: {
6196     SDValue VData = Op.getOperand(2);
6197     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6198     if (IsD16)
6199       VData = handleD16VData(VData, DAG);
6200     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6201     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6202     unsigned IdxEn = 1;
6203     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6204       IdxEn = Idx->getZExtValue() != 0;
6205     SDValue Ops[] = {
6206       Chain,
6207       VData,
6208       Op.getOperand(3), // rsrc
6209       Op.getOperand(4), // vindex
6210       SDValue(), // voffset -- will be set by setBufferOffsets
6211       SDValue(), // soffset -- will be set by setBufferOffsets
6212       SDValue(), // offset -- will be set by setBufferOffsets
6213       DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6214       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6215     };
6216     setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6217     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
6218                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6219     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6220     MemSDNode *M = cast<MemSDNode>(Op);
6221     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6222                                    M->getMemoryVT(), M->getMemOperand());
6223   }
6224 
6225   case Intrinsic::amdgcn_raw_buffer_store:
6226   case Intrinsic::amdgcn_raw_buffer_store_format: {
6227     SDValue VData = Op.getOperand(2);
6228     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6229     if (IsD16)
6230       VData = handleD16VData(VData, DAG);
6231     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6232     SDValue Ops[] = {
6233       Chain,
6234       VData,
6235       Op.getOperand(3), // rsrc
6236       DAG.getConstant(0, DL, MVT::i32), // vindex
6237       Offsets.first,    // voffset
6238       Op.getOperand(5), // soffset
6239       Offsets.second,   // offset
6240       Op.getOperand(6), // cachepolicy
6241       DAG.getConstant(0, DL, MVT::i1), // idxen
6242     };
6243     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_raw_buffer_store ?
6244                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6245     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6246     MemSDNode *M = cast<MemSDNode>(Op);
6247     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6248                                    M->getMemoryVT(), M->getMemOperand());
6249   }
6250 
6251   case Intrinsic::amdgcn_struct_buffer_store:
6252   case Intrinsic::amdgcn_struct_buffer_store_format: {
6253     SDValue VData = Op.getOperand(2);
6254     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6255     if (IsD16)
6256       VData = handleD16VData(VData, DAG);
6257     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6258     SDValue Ops[] = {
6259       Chain,
6260       VData,
6261       Op.getOperand(3), // rsrc
6262       Op.getOperand(4), // vindex
6263       Offsets.first,    // voffset
6264       Op.getOperand(6), // soffset
6265       Offsets.second,   // offset
6266       Op.getOperand(7), // cachepolicy
6267       DAG.getConstant(1, DL, MVT::i1), // idxen
6268     };
6269     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ?
6270                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6271     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6272     MemSDNode *M = cast<MemSDNode>(Op);
6273     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6274                                    M->getMemoryVT(), M->getMemOperand());
6275   }
6276 
6277   default: {
6278     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6279             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
6280       return lowerImage(Op, ImageDimIntr, DAG);
6281 
6282     return Op;
6283   }
6284   }
6285 }
6286 
6287 // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args:
6288 // offset (the offset that is included in bounds checking and swizzling, to be
6289 // split between the instruction's voffset and immoffset fields) and soffset
6290 // (the offset that is excluded from bounds checking and swizzling, to go in
6291 // the instruction's soffset field).  This function takes the first kind of
6292 // offset and figures out how to split it between voffset and immoffset.
6293 std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets(
6294     SDValue Offset, SelectionDAG &DAG) const {
6295   SDLoc DL(Offset);
6296   const unsigned MaxImm = 4095;
6297   SDValue N0 = Offset;
6298   ConstantSDNode *C1 = nullptr;
6299 
6300   if ((C1 = dyn_cast<ConstantSDNode>(N0)))
6301     N0 = SDValue();
6302   else if (DAG.isBaseWithConstantOffset(N0)) {
6303     C1 = cast<ConstantSDNode>(N0.getOperand(1));
6304     N0 = N0.getOperand(0);
6305   }
6306 
6307   if (C1) {
6308     unsigned ImmOffset = C1->getZExtValue();
6309     // If the immediate value is too big for the immoffset field, put the value
6310     // and -4096 into the immoffset field so that the value that is copied/added
6311     // for the voffset field is a multiple of 4096, and it stands more chance
6312     // of being CSEd with the copy/add for another similar load/store.
6313     // However, do not do that rounding down to a multiple of 4096 if that is a
6314     // negative number, as it appears to be illegal to have a negative offset
6315     // in the vgpr, even if adding the immediate offset makes it positive.
6316     unsigned Overflow = ImmOffset & ~MaxImm;
6317     ImmOffset -= Overflow;
6318     if ((int32_t)Overflow < 0) {
6319       Overflow += ImmOffset;
6320       ImmOffset = 0;
6321     }
6322     C1 = cast<ConstantSDNode>(DAG.getConstant(ImmOffset, DL, MVT::i32));
6323     if (Overflow) {
6324       auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32);
6325       if (!N0)
6326         N0 = OverflowVal;
6327       else {
6328         SDValue Ops[] = { N0, OverflowVal };
6329         N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops);
6330       }
6331     }
6332   }
6333   if (!N0)
6334     N0 = DAG.getConstant(0, DL, MVT::i32);
6335   if (!C1)
6336     C1 = cast<ConstantSDNode>(DAG.getConstant(0, DL, MVT::i32));
6337   return {N0, SDValue(C1, 0)};
6338 }
6339 
6340 // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the
6341 // three offsets (voffset, soffset and instoffset) into the SDValue[3] array
6342 // pointed to by Offsets.
6343 void SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
6344                                         SelectionDAG &DAG, SDValue *Offsets,
6345                                         unsigned Align) const {
6346   SDLoc DL(CombinedOffset);
6347   if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) {
6348     uint32_t Imm = C->getZExtValue();
6349     uint32_t SOffset, ImmOffset;
6350     if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, Align)) {
6351       Offsets[0] = DAG.getConstant(0, DL, MVT::i32);
6352       Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
6353       Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32);
6354       return;
6355     }
6356   }
6357   if (DAG.isBaseWithConstantOffset(CombinedOffset)) {
6358     SDValue N0 = CombinedOffset.getOperand(0);
6359     SDValue N1 = CombinedOffset.getOperand(1);
6360     uint32_t SOffset, ImmOffset;
6361     int Offset = cast<ConstantSDNode>(N1)->getSExtValue();
6362     if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset,
6363                                                 Subtarget, Align)) {
6364       Offsets[0] = N0;
6365       Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
6366       Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32);
6367       return;
6368     }
6369   }
6370   Offsets[0] = CombinedOffset;
6371   Offsets[1] = DAG.getConstant(0, DL, MVT::i32);
6372   Offsets[2] = DAG.getConstant(0, DL, MVT::i32);
6373 }
6374 
6375 static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
6376                                  ISD::LoadExtType ExtType, SDValue Op,
6377                                  const SDLoc &SL, EVT VT) {
6378   if (VT.bitsLT(Op.getValueType()))
6379     return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
6380 
6381   switch (ExtType) {
6382   case ISD::SEXTLOAD:
6383     return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
6384   case ISD::ZEXTLOAD:
6385     return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
6386   case ISD::EXTLOAD:
6387     return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
6388   case ISD::NON_EXTLOAD:
6389     return Op;
6390   }
6391 
6392   llvm_unreachable("invalid ext type");
6393 }
6394 
6395 SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
6396   SelectionDAG &DAG = DCI.DAG;
6397   if (Ld->getAlignment() < 4 || Ld->isDivergent())
6398     return SDValue();
6399 
6400   // FIXME: Constant loads should all be marked invariant.
6401   unsigned AS = Ld->getAddressSpace();
6402   if (AS != AMDGPUAS::CONSTANT_ADDRESS &&
6403       AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
6404       (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
6405     return SDValue();
6406 
6407   // Don't do this early, since it may interfere with adjacent load merging for
6408   // illegal types. We can avoid losing alignment information for exotic types
6409   // pre-legalize.
6410   EVT MemVT = Ld->getMemoryVT();
6411   if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
6412       MemVT.getSizeInBits() >= 32)
6413     return SDValue();
6414 
6415   SDLoc SL(Ld);
6416 
6417   assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
6418          "unexpected vector extload");
6419 
6420   // TODO: Drop only high part of range.
6421   SDValue Ptr = Ld->getBasePtr();
6422   SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
6423                                 MVT::i32, SL, Ld->getChain(), Ptr,
6424                                 Ld->getOffset(),
6425                                 Ld->getPointerInfo(), MVT::i32,
6426                                 Ld->getAlignment(),
6427                                 Ld->getMemOperand()->getFlags(),
6428                                 Ld->getAAInfo(),
6429                                 nullptr); // Drop ranges
6430 
6431   EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
6432   if (MemVT.isFloatingPoint()) {
6433     assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
6434            "unexpected fp extload");
6435     TruncVT = MemVT.changeTypeToInteger();
6436   }
6437 
6438   SDValue Cvt = NewLoad;
6439   if (Ld->getExtensionType() == ISD::SEXTLOAD) {
6440     Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
6441                       DAG.getValueType(TruncVT));
6442   } else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
6443              Ld->getExtensionType() == ISD::NON_EXTLOAD) {
6444     Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
6445   } else {
6446     assert(Ld->getExtensionType() == ISD::EXTLOAD);
6447   }
6448 
6449   EVT VT = Ld->getValueType(0);
6450   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
6451 
6452   DCI.AddToWorklist(Cvt.getNode());
6453 
6454   // We may need to handle exotic cases, such as i16->i64 extloads, so insert
6455   // the appropriate extension from the 32-bit load.
6456   Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
6457   DCI.AddToWorklist(Cvt.getNode());
6458 
6459   // Handle conversion back to floating point if necessary.
6460   Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
6461 
6462   return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
6463 }
6464 
6465 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
6466   SDLoc DL(Op);
6467   LoadSDNode *Load = cast<LoadSDNode>(Op);
6468   ISD::LoadExtType ExtType = Load->getExtensionType();
6469   EVT MemVT = Load->getMemoryVT();
6470 
6471   if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
6472     if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
6473       return SDValue();
6474 
6475     // FIXME: Copied from PPC
6476     // First, load into 32 bits, then truncate to 1 bit.
6477 
6478     SDValue Chain = Load->getChain();
6479     SDValue BasePtr = Load->getBasePtr();
6480     MachineMemOperand *MMO = Load->getMemOperand();
6481 
6482     EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
6483 
6484     SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
6485                                    BasePtr, RealMemVT, MMO);
6486 
6487     SDValue Ops[] = {
6488       DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
6489       NewLD.getValue(1)
6490     };
6491 
6492     return DAG.getMergeValues(Ops, DL);
6493   }
6494 
6495   if (!MemVT.isVector())
6496     return SDValue();
6497 
6498   assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
6499          "Custom lowering for non-i32 vectors hasn't been implemented.");
6500 
6501   unsigned Alignment = Load->getAlignment();
6502   unsigned AS = Load->getAddressSpace();
6503   if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
6504                           AS, Alignment)) {
6505     SDValue Ops[2];
6506     std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
6507     return DAG.getMergeValues(Ops, DL);
6508   }
6509 
6510   MachineFunction &MF = DAG.getMachineFunction();
6511   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
6512   // If there is a possibilty that flat instruction access scratch memory
6513   // then we need to use the same legalization rules we use for private.
6514   if (AS == AMDGPUAS::FLAT_ADDRESS)
6515     AS = MFI->hasFlatScratchInit() ?
6516          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
6517 
6518   unsigned NumElements = MemVT.getVectorNumElements();
6519 
6520   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
6521       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
6522     if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32)
6523       return SDValue();
6524     // Non-uniform loads will be selected to MUBUF instructions, so they
6525     // have the same legalization requirements as global and private
6526     // loads.
6527     //
6528   }
6529 
6530   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
6531       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
6532       AS == AMDGPUAS::GLOBAL_ADDRESS) {
6533     if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
6534         !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
6535         Alignment >= 4 && NumElements < 32)
6536       return SDValue();
6537     // Non-uniform loads will be selected to MUBUF instructions, so they
6538     // have the same legalization requirements as global and private
6539     // loads.
6540     //
6541   }
6542   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
6543       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
6544       AS == AMDGPUAS::GLOBAL_ADDRESS ||
6545       AS == AMDGPUAS::FLAT_ADDRESS) {
6546     if (NumElements > 4)
6547       return SplitVectorLoad(Op, DAG);
6548     // v4 loads are supported for private and global memory.
6549     return SDValue();
6550   }
6551   if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
6552     // Depending on the setting of the private_element_size field in the
6553     // resource descriptor, we can only make private accesses up to a certain
6554     // size.
6555     switch (Subtarget->getMaxPrivateElementSize()) {
6556     case 4:
6557       return scalarizeVectorLoad(Load, DAG);
6558     case 8:
6559       if (NumElements > 2)
6560         return SplitVectorLoad(Op, DAG);
6561       return SDValue();
6562     case 16:
6563       // Same as global/flat
6564       if (NumElements > 4)
6565         return SplitVectorLoad(Op, DAG);
6566       return SDValue();
6567     default:
6568       llvm_unreachable("unsupported private_element_size");
6569     }
6570   } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
6571     // Use ds_read_b128 if possible.
6572     if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
6573         MemVT.getStoreSize() == 16)
6574       return SDValue();
6575 
6576     if (NumElements > 2)
6577       return SplitVectorLoad(Op, DAG);
6578 
6579     // SI has a hardware bug in the LDS / GDS boounds checking: if the base
6580     // address is negative, then the instruction is incorrectly treated as
6581     // out-of-bounds even if base + offsets is in bounds. Split vectorized
6582     // loads here to avoid emitting ds_read2_b32. We may re-combine the
6583     // load later in the SILoadStoreOptimizer.
6584     if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
6585         NumElements == 2 && MemVT.getStoreSize() == 8 &&
6586         Load->getAlignment() < 8) {
6587       return SplitVectorLoad(Op, DAG);
6588     }
6589   }
6590   return SDValue();
6591 }
6592 
6593 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
6594   EVT VT = Op.getValueType();
6595   assert(VT.getSizeInBits() == 64);
6596 
6597   SDLoc DL(Op);
6598   SDValue Cond = Op.getOperand(0);
6599 
6600   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
6601   SDValue One = DAG.getConstant(1, DL, MVT::i32);
6602 
6603   SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
6604   SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
6605 
6606   SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
6607   SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
6608 
6609   SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
6610 
6611   SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
6612   SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
6613 
6614   SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
6615 
6616   SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
6617   return DAG.getNode(ISD::BITCAST, DL, VT, Res);
6618 }
6619 
6620 // Catch division cases where we can use shortcuts with rcp and rsq
6621 // instructions.
6622 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
6623                                               SelectionDAG &DAG) const {
6624   SDLoc SL(Op);
6625   SDValue LHS = Op.getOperand(0);
6626   SDValue RHS = Op.getOperand(1);
6627   EVT VT = Op.getValueType();
6628   const SDNodeFlags Flags = Op->getFlags();
6629   bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || Flags.hasAllowReciprocal();
6630 
6631   if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
6632     return SDValue();
6633 
6634   if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
6635     if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
6636       if (CLHS->isExactlyValue(1.0)) {
6637         // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
6638         // the CI documentation has a worst case error of 1 ulp.
6639         // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
6640         // use it as long as we aren't trying to use denormals.
6641         //
6642         // v_rcp_f16 and v_rsq_f16 DO support denormals.
6643 
6644         // 1.0 / sqrt(x) -> rsq(x)
6645 
6646         // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
6647         // error seems really high at 2^29 ULP.
6648         if (RHS.getOpcode() == ISD::FSQRT)
6649           return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
6650 
6651         // 1.0 / x -> rcp(x)
6652         return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
6653       }
6654 
6655       // Same as for 1.0, but expand the sign out of the constant.
6656       if (CLHS->isExactlyValue(-1.0)) {
6657         // -1.0 / x -> rcp (fneg x)
6658         SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
6659         return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
6660       }
6661     }
6662   }
6663 
6664   if (Unsafe) {
6665     // Turn into multiply by the reciprocal.
6666     // x / y -> x * (1.0 / y)
6667     SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
6668     return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
6669   }
6670 
6671   return SDValue();
6672 }
6673 
6674 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
6675                           EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
6676   if (GlueChain->getNumValues() <= 1) {
6677     return DAG.getNode(Opcode, SL, VT, A, B);
6678   }
6679 
6680   assert(GlueChain->getNumValues() == 3);
6681 
6682   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
6683   switch (Opcode) {
6684   default: llvm_unreachable("no chain equivalent for opcode");
6685   case ISD::FMUL:
6686     Opcode = AMDGPUISD::FMUL_W_CHAIN;
6687     break;
6688   }
6689 
6690   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
6691                      GlueChain.getValue(2));
6692 }
6693 
6694 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
6695                            EVT VT, SDValue A, SDValue B, SDValue C,
6696                            SDValue GlueChain) {
6697   if (GlueChain->getNumValues() <= 1) {
6698     return DAG.getNode(Opcode, SL, VT, A, B, C);
6699   }
6700 
6701   assert(GlueChain->getNumValues() == 3);
6702 
6703   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
6704   switch (Opcode) {
6705   default: llvm_unreachable("no chain equivalent for opcode");
6706   case ISD::FMA:
6707     Opcode = AMDGPUISD::FMA_W_CHAIN;
6708     break;
6709   }
6710 
6711   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
6712                      GlueChain.getValue(2));
6713 }
6714 
6715 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
6716   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
6717     return FastLowered;
6718 
6719   SDLoc SL(Op);
6720   SDValue Src0 = Op.getOperand(0);
6721   SDValue Src1 = Op.getOperand(1);
6722 
6723   SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
6724   SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
6725 
6726   SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
6727   SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
6728 
6729   SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
6730   SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
6731 
6732   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
6733 }
6734 
6735 // Faster 2.5 ULP division that does not support denormals.
6736 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
6737   SDLoc SL(Op);
6738   SDValue LHS = Op.getOperand(1);
6739   SDValue RHS = Op.getOperand(2);
6740 
6741   SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
6742 
6743   const APFloat K0Val(BitsToFloat(0x6f800000));
6744   const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
6745 
6746   const APFloat K1Val(BitsToFloat(0x2f800000));
6747   const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
6748 
6749   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
6750 
6751   EVT SetCCVT =
6752     getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
6753 
6754   SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
6755 
6756   SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
6757 
6758   // TODO: Should this propagate fast-math-flags?
6759   r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
6760 
6761   // rcp does not support denormals.
6762   SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
6763 
6764   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
6765 
6766   return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
6767 }
6768 
6769 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
6770   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
6771     return FastLowered;
6772 
6773   SDLoc SL(Op);
6774   SDValue LHS = Op.getOperand(0);
6775   SDValue RHS = Op.getOperand(1);
6776 
6777   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
6778 
6779   SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
6780 
6781   SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
6782                                           RHS, RHS, LHS);
6783   SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
6784                                         LHS, RHS, LHS);
6785 
6786   // Denominator is scaled to not be denormal, so using rcp is ok.
6787   SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
6788                                   DenominatorScaled);
6789   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
6790                                      DenominatorScaled);
6791 
6792   const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
6793                                (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
6794                                (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
6795 
6796   const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
6797 
6798   if (!Subtarget->hasFP32Denormals()) {
6799     SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
6800     const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
6801                                                       SL, MVT::i32);
6802     SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
6803                                        DAG.getEntryNode(),
6804                                        EnableDenormValue, BitField);
6805     SDValue Ops[3] = {
6806       NegDivScale0,
6807       EnableDenorm.getValue(0),
6808       EnableDenorm.getValue(1)
6809     };
6810 
6811     NegDivScale0 = DAG.getMergeValues(Ops, SL);
6812   }
6813 
6814   SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
6815                              ApproxRcp, One, NegDivScale0);
6816 
6817   SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
6818                              ApproxRcp, Fma0);
6819 
6820   SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
6821                            Fma1, Fma1);
6822 
6823   SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
6824                              NumeratorScaled, Mul);
6825 
6826   SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
6827 
6828   SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
6829                              NumeratorScaled, Fma3);
6830 
6831   if (!Subtarget->hasFP32Denormals()) {
6832     const SDValue DisableDenormValue =
6833         DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
6834     SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
6835                                         Fma4.getValue(1),
6836                                         DisableDenormValue,
6837                                         BitField,
6838                                         Fma4.getValue(2));
6839 
6840     SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
6841                                       DisableDenorm, DAG.getRoot());
6842     DAG.setRoot(OutputChain);
6843   }
6844 
6845   SDValue Scale = NumeratorScaled.getValue(1);
6846   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
6847                              Fma4, Fma1, Fma3, Scale);
6848 
6849   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
6850 }
6851 
6852 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
6853   if (DAG.getTarget().Options.UnsafeFPMath)
6854     return lowerFastUnsafeFDIV(Op, DAG);
6855 
6856   SDLoc SL(Op);
6857   SDValue X = Op.getOperand(0);
6858   SDValue Y = Op.getOperand(1);
6859 
6860   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
6861 
6862   SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
6863 
6864   SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
6865 
6866   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
6867 
6868   SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
6869 
6870   SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
6871 
6872   SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
6873 
6874   SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
6875 
6876   SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
6877 
6878   SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
6879   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
6880 
6881   SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
6882                              NegDivScale0, Mul, DivScale1);
6883 
6884   SDValue Scale;
6885 
6886   if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
6887     // Workaround a hardware bug on SI where the condition output from div_scale
6888     // is not usable.
6889 
6890     const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
6891 
6892     // Figure out if the scale to use for div_fmas.
6893     SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
6894     SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
6895     SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
6896     SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
6897 
6898     SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
6899     SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
6900 
6901     SDValue Scale0Hi
6902       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
6903     SDValue Scale1Hi
6904       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
6905 
6906     SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
6907     SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
6908     Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
6909   } else {
6910     Scale = DivScale1.getValue(1);
6911   }
6912 
6913   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
6914                              Fma4, Fma3, Mul, Scale);
6915 
6916   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
6917 }
6918 
6919 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
6920   EVT VT = Op.getValueType();
6921 
6922   if (VT == MVT::f32)
6923     return LowerFDIV32(Op, DAG);
6924 
6925   if (VT == MVT::f64)
6926     return LowerFDIV64(Op, DAG);
6927 
6928   if (VT == MVT::f16)
6929     return LowerFDIV16(Op, DAG);
6930 
6931   llvm_unreachable("Unexpected type for fdiv");
6932 }
6933 
6934 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
6935   SDLoc DL(Op);
6936   StoreSDNode *Store = cast<StoreSDNode>(Op);
6937   EVT VT = Store->getMemoryVT();
6938 
6939   if (VT == MVT::i1) {
6940     return DAG.getTruncStore(Store->getChain(), DL,
6941        DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
6942        Store->getBasePtr(), MVT::i1, Store->getMemOperand());
6943   }
6944 
6945   assert(VT.isVector() &&
6946          Store->getValue().getValueType().getScalarType() == MVT::i32);
6947 
6948   unsigned AS = Store->getAddressSpace();
6949   if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
6950                           AS, Store->getAlignment())) {
6951     return expandUnalignedStore(Store, DAG);
6952   }
6953 
6954   MachineFunction &MF = DAG.getMachineFunction();
6955   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
6956   // If there is a possibilty that flat instruction access scratch memory
6957   // then we need to use the same legalization rules we use for private.
6958   if (AS == AMDGPUAS::FLAT_ADDRESS)
6959     AS = MFI->hasFlatScratchInit() ?
6960          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
6961 
6962   unsigned NumElements = VT.getVectorNumElements();
6963   if (AS == AMDGPUAS::GLOBAL_ADDRESS ||
6964       AS == AMDGPUAS::FLAT_ADDRESS) {
6965     if (NumElements > 4)
6966       return SplitVectorStore(Op, DAG);
6967     return SDValue();
6968   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
6969     switch (Subtarget->getMaxPrivateElementSize()) {
6970     case 4:
6971       return scalarizeVectorStore(Store, DAG);
6972     case 8:
6973       if (NumElements > 2)
6974         return SplitVectorStore(Op, DAG);
6975       return SDValue();
6976     case 16:
6977       if (NumElements > 4)
6978         return SplitVectorStore(Op, DAG);
6979       return SDValue();
6980     default:
6981       llvm_unreachable("unsupported private_element_size");
6982     }
6983   } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
6984     // Use ds_write_b128 if possible.
6985     if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
6986         VT.getStoreSize() == 16)
6987       return SDValue();
6988 
6989     if (NumElements > 2)
6990       return SplitVectorStore(Op, DAG);
6991 
6992     // SI has a hardware bug in the LDS / GDS boounds checking: if the base
6993     // address is negative, then the instruction is incorrectly treated as
6994     // out-of-bounds even if base + offsets is in bounds. Split vectorized
6995     // stores here to avoid emitting ds_write2_b32. We may re-combine the
6996     // store later in the SILoadStoreOptimizer.
6997     if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
6998         NumElements == 2 && VT.getStoreSize() == 8 &&
6999         Store->getAlignment() < 8) {
7000       return SplitVectorStore(Op, DAG);
7001     }
7002 
7003     return SDValue();
7004   } else {
7005     llvm_unreachable("unhandled address space");
7006   }
7007 }
7008 
7009 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
7010   SDLoc DL(Op);
7011   EVT VT = Op.getValueType();
7012   SDValue Arg = Op.getOperand(0);
7013   SDValue TrigVal;
7014 
7015   // TODO: Should this propagate fast-math-flags?
7016 
7017   SDValue OneOver2Pi = DAG.getConstantFP(0.5 / M_PI, DL, VT);
7018 
7019   if (Subtarget->hasTrigReducedRange()) {
7020     SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7021     TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal);
7022   } else {
7023     TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7024   }
7025 
7026   switch (Op.getOpcode()) {
7027   case ISD::FCOS:
7028     return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal);
7029   case ISD::FSIN:
7030     return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal);
7031   default:
7032     llvm_unreachable("Wrong trig opcode");
7033   }
7034 }
7035 
7036 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
7037   AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
7038   assert(AtomicNode->isCompareAndSwap());
7039   unsigned AS = AtomicNode->getAddressSpace();
7040 
7041   // No custom lowering required for local address space
7042   if (!isFlatGlobalAddrSpace(AS))
7043     return Op;
7044 
7045   // Non-local address space requires custom lowering for atomic compare
7046   // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
7047   SDLoc DL(Op);
7048   SDValue ChainIn = Op.getOperand(0);
7049   SDValue Addr = Op.getOperand(1);
7050   SDValue Old = Op.getOperand(2);
7051   SDValue New = Op.getOperand(3);
7052   EVT VT = Op.getValueType();
7053   MVT SimpleVT = VT.getSimpleVT();
7054   MVT VecType = MVT::getVectorVT(SimpleVT, 2);
7055 
7056   SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
7057   SDValue Ops[] = { ChainIn, Addr, NewOld };
7058 
7059   return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
7060                                  Ops, VT, AtomicNode->getMemOperand());
7061 }
7062 
7063 //===----------------------------------------------------------------------===//
7064 // Custom DAG optimizations
7065 //===----------------------------------------------------------------------===//
7066 
7067 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
7068                                                      DAGCombinerInfo &DCI) const {
7069   EVT VT = N->getValueType(0);
7070   EVT ScalarVT = VT.getScalarType();
7071   if (ScalarVT != MVT::f32)
7072     return SDValue();
7073 
7074   SelectionDAG &DAG = DCI.DAG;
7075   SDLoc DL(N);
7076 
7077   SDValue Src = N->getOperand(0);
7078   EVT SrcVT = Src.getValueType();
7079 
7080   // TODO: We could try to match extracting the higher bytes, which would be
7081   // easier if i8 vectors weren't promoted to i32 vectors, particularly after
7082   // types are legalized. v4i8 -> v4f32 is probably the only case to worry
7083   // about in practice.
7084   if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
7085     if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
7086       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
7087       DCI.AddToWorklist(Cvt.getNode());
7088       return Cvt;
7089     }
7090   }
7091 
7092   return SDValue();
7093 }
7094 
7095 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
7096 
7097 // This is a variant of
7098 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
7099 //
7100 // The normal DAG combiner will do this, but only if the add has one use since
7101 // that would increase the number of instructions.
7102 //
7103 // This prevents us from seeing a constant offset that can be folded into a
7104 // memory instruction's addressing mode. If we know the resulting add offset of
7105 // a pointer can be folded into an addressing offset, we can replace the pointer
7106 // operand with the add of new constant offset. This eliminates one of the uses,
7107 // and may allow the remaining use to also be simplified.
7108 //
7109 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
7110                                                unsigned AddrSpace,
7111                                                EVT MemVT,
7112                                                DAGCombinerInfo &DCI) const {
7113   SDValue N0 = N->getOperand(0);
7114   SDValue N1 = N->getOperand(1);
7115 
7116   // We only do this to handle cases where it's profitable when there are
7117   // multiple uses of the add, so defer to the standard combine.
7118   if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
7119       N0->hasOneUse())
7120     return SDValue();
7121 
7122   const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
7123   if (!CN1)
7124     return SDValue();
7125 
7126   const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
7127   if (!CAdd)
7128     return SDValue();
7129 
7130   // If the resulting offset is too large, we can't fold it into the addressing
7131   // mode offset.
7132   APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
7133   Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
7134 
7135   AddrMode AM;
7136   AM.HasBaseReg = true;
7137   AM.BaseOffs = Offset.getSExtValue();
7138   if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
7139     return SDValue();
7140 
7141   SelectionDAG &DAG = DCI.DAG;
7142   SDLoc SL(N);
7143   EVT VT = N->getValueType(0);
7144 
7145   SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
7146   SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
7147 
7148   SDNodeFlags Flags;
7149   Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
7150                           (N0.getOpcode() == ISD::OR ||
7151                            N0->getFlags().hasNoUnsignedWrap()));
7152 
7153   return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
7154 }
7155 
7156 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
7157                                                   DAGCombinerInfo &DCI) const {
7158   SDValue Ptr = N->getBasePtr();
7159   SelectionDAG &DAG = DCI.DAG;
7160   SDLoc SL(N);
7161 
7162   // TODO: We could also do this for multiplies.
7163   if (Ptr.getOpcode() == ISD::SHL) {
7164     SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(),  N->getAddressSpace(),
7165                                           N->getMemoryVT(), DCI);
7166     if (NewPtr) {
7167       SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
7168 
7169       NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
7170       return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
7171     }
7172   }
7173 
7174   return SDValue();
7175 }
7176 
7177 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
7178   return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
7179          (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
7180          (Opc == ISD::XOR && Val == 0);
7181 }
7182 
7183 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
7184 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
7185 // integer combine opportunities since most 64-bit operations are decomposed
7186 // this way.  TODO: We won't want this for SALU especially if it is an inline
7187 // immediate.
7188 SDValue SITargetLowering::splitBinaryBitConstantOp(
7189   DAGCombinerInfo &DCI,
7190   const SDLoc &SL,
7191   unsigned Opc, SDValue LHS,
7192   const ConstantSDNode *CRHS) const {
7193   uint64_t Val = CRHS->getZExtValue();
7194   uint32_t ValLo = Lo_32(Val);
7195   uint32_t ValHi = Hi_32(Val);
7196   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7197 
7198     if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
7199          bitOpWithConstantIsReducible(Opc, ValHi)) ||
7200         (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
7201     // If we need to materialize a 64-bit immediate, it will be split up later
7202     // anyway. Avoid creating the harder to understand 64-bit immediate
7203     // materialization.
7204     return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
7205   }
7206 
7207   return SDValue();
7208 }
7209 
7210 // Returns true if argument is a boolean value which is not serialized into
7211 // memory or argument and does not require v_cmdmask_b32 to be deserialized.
7212 static bool isBoolSGPR(SDValue V) {
7213   if (V.getValueType() != MVT::i1)
7214     return false;
7215   switch (V.getOpcode()) {
7216   default: break;
7217   case ISD::SETCC:
7218   case ISD::AND:
7219   case ISD::OR:
7220   case ISD::XOR:
7221   case AMDGPUISD::FP_CLASS:
7222     return true;
7223   }
7224   return false;
7225 }
7226 
7227 // If a constant has all zeroes or all ones within each byte return it.
7228 // Otherwise return 0.
7229 static uint32_t getConstantPermuteMask(uint32_t C) {
7230   // 0xff for any zero byte in the mask
7231   uint32_t ZeroByteMask = 0;
7232   if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
7233   if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
7234   if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
7235   if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
7236   uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte
7237   if ((NonZeroByteMask & C) != NonZeroByteMask)
7238     return 0; // Partial bytes selected.
7239   return C;
7240 }
7241 
7242 // Check if a node selects whole bytes from its operand 0 starting at a byte
7243 // boundary while masking the rest. Returns select mask as in the v_perm_b32
7244 // or -1 if not succeeded.
7245 // Note byte select encoding:
7246 // value 0-3 selects corresponding source byte;
7247 // value 0xc selects zero;
7248 // value 0xff selects 0xff.
7249 static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
7250   assert(V.getValueSizeInBits() == 32);
7251 
7252   if (V.getNumOperands() != 2)
7253     return ~0;
7254 
7255   ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
7256   if (!N1)
7257     return ~0;
7258 
7259   uint32_t C = N1->getZExtValue();
7260 
7261   switch (V.getOpcode()) {
7262   default:
7263     break;
7264   case ISD::AND:
7265     if (uint32_t ConstMask = getConstantPermuteMask(C)) {
7266       return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
7267     }
7268     break;
7269 
7270   case ISD::OR:
7271     if (uint32_t ConstMask = getConstantPermuteMask(C)) {
7272       return (0x03020100 & ~ConstMask) | ConstMask;
7273     }
7274     break;
7275 
7276   case ISD::SHL:
7277     if (C % 8)
7278       return ~0;
7279 
7280     return uint32_t((0x030201000c0c0c0cull << C) >> 32);
7281 
7282   case ISD::SRL:
7283     if (C % 8)
7284       return ~0;
7285 
7286     return uint32_t(0x0c0c0c0c03020100ull >> C);
7287   }
7288 
7289   return ~0;
7290 }
7291 
7292 SDValue SITargetLowering::performAndCombine(SDNode *N,
7293                                             DAGCombinerInfo &DCI) const {
7294   if (DCI.isBeforeLegalize())
7295     return SDValue();
7296 
7297   SelectionDAG &DAG = DCI.DAG;
7298   EVT VT = N->getValueType(0);
7299   SDValue LHS = N->getOperand(0);
7300   SDValue RHS = N->getOperand(1);
7301 
7302 
7303   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
7304   if (VT == MVT::i64 && CRHS) {
7305     if (SDValue Split
7306         = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
7307       return Split;
7308   }
7309 
7310   if (CRHS && VT == MVT::i32) {
7311     // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
7312     // nb = number of trailing zeroes in mask
7313     // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
7314     // given that we are selecting 8 or 16 bit fields starting at byte boundary.
7315     uint64_t Mask = CRHS->getZExtValue();
7316     unsigned Bits = countPopulation(Mask);
7317     if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
7318         (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
7319       if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
7320         unsigned Shift = CShift->getZExtValue();
7321         unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
7322         unsigned Offset = NB + Shift;
7323         if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
7324           SDLoc SL(N);
7325           SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
7326                                     LHS->getOperand(0),
7327                                     DAG.getConstant(Offset, SL, MVT::i32),
7328                                     DAG.getConstant(Bits, SL, MVT::i32));
7329           EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
7330           SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
7331                                     DAG.getValueType(NarrowVT));
7332           SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
7333                                     DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
7334           return Shl;
7335         }
7336       }
7337     }
7338 
7339     // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
7340     if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
7341         isa<ConstantSDNode>(LHS.getOperand(2))) {
7342       uint32_t Sel = getConstantPermuteMask(Mask);
7343       if (!Sel)
7344         return SDValue();
7345 
7346       // Select 0xc for all zero bytes
7347       Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
7348       SDLoc DL(N);
7349       return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
7350                          LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
7351     }
7352   }
7353 
7354   // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
7355   // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
7356   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
7357     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7358     ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
7359 
7360     SDValue X = LHS.getOperand(0);
7361     SDValue Y = RHS.getOperand(0);
7362     if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
7363       return SDValue();
7364 
7365     if (LCC == ISD::SETO) {
7366       if (X != LHS.getOperand(1))
7367         return SDValue();
7368 
7369       if (RCC == ISD::SETUNE) {
7370         const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
7371         if (!C1 || !C1->isInfinity() || C1->isNegative())
7372           return SDValue();
7373 
7374         const uint32_t Mask = SIInstrFlags::N_NORMAL |
7375                               SIInstrFlags::N_SUBNORMAL |
7376                               SIInstrFlags::N_ZERO |
7377                               SIInstrFlags::P_ZERO |
7378                               SIInstrFlags::P_SUBNORMAL |
7379                               SIInstrFlags::P_NORMAL;
7380 
7381         static_assert(((~(SIInstrFlags::S_NAN |
7382                           SIInstrFlags::Q_NAN |
7383                           SIInstrFlags::N_INFINITY |
7384                           SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
7385                       "mask not equal");
7386 
7387         SDLoc DL(N);
7388         return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
7389                            X, DAG.getConstant(Mask, DL, MVT::i32));
7390       }
7391     }
7392   }
7393 
7394   if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS)
7395     std::swap(LHS, RHS);
7396 
7397   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS &&
7398       RHS.hasOneUse()) {
7399     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7400     // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan)
7401     // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan)
7402     const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
7403     if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask &&
7404         (RHS.getOperand(0) == LHS.getOperand(0) &&
7405          LHS.getOperand(0) == LHS.getOperand(1))) {
7406       const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN;
7407       unsigned NewMask = LCC == ISD::SETO ?
7408         Mask->getZExtValue() & ~OrdMask :
7409         Mask->getZExtValue() & OrdMask;
7410 
7411       SDLoc DL(N);
7412       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0),
7413                          DAG.getConstant(NewMask, DL, MVT::i32));
7414     }
7415   }
7416 
7417   if (VT == MVT::i32 &&
7418       (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
7419     // and x, (sext cc from i1) => select cc, x, 0
7420     if (RHS.getOpcode() != ISD::SIGN_EXTEND)
7421       std::swap(LHS, RHS);
7422     if (isBoolSGPR(RHS.getOperand(0)))
7423       return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
7424                            LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
7425   }
7426 
7427   // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
7428   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7429   if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
7430       N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
7431     uint32_t LHSMask = getPermuteMask(DAG, LHS);
7432     uint32_t RHSMask = getPermuteMask(DAG, RHS);
7433     if (LHSMask != ~0u && RHSMask != ~0u) {
7434       // Canonicalize the expression in an attempt to have fewer unique masks
7435       // and therefore fewer registers used to hold the masks.
7436       if (LHSMask > RHSMask) {
7437         std::swap(LHSMask, RHSMask);
7438         std::swap(LHS, RHS);
7439       }
7440 
7441       // Select 0xc for each lane used from source operand. Zero has 0xc mask
7442       // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
7443       uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7444       uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7445 
7446       // Check of we need to combine values from two sources within a byte.
7447       if (!(LHSUsedLanes & RHSUsedLanes) &&
7448           // If we select high and lower word keep it for SDWA.
7449           // TODO: teach SDWA to work with v_perm_b32 and remove the check.
7450           !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
7451         // Each byte in each mask is either selector mask 0-3, or has higher
7452         // bits set in either of masks, which can be 0xff for 0xff or 0x0c for
7453         // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise
7454         // mask which is not 0xff wins. By anding both masks we have a correct
7455         // result except that 0x0c shall be corrected to give 0x0c only.
7456         uint32_t Mask = LHSMask & RHSMask;
7457         for (unsigned I = 0; I < 32; I += 8) {
7458           uint32_t ByteSel = 0xff << I;
7459           if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
7460             Mask &= (0x0c << I) & 0xffffffff;
7461         }
7462 
7463         // Add 4 to each active LHS lane. It will not affect any existing 0xff
7464         // or 0x0c.
7465         uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
7466         SDLoc DL(N);
7467 
7468         return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
7469                            LHS.getOperand(0), RHS.getOperand(0),
7470                            DAG.getConstant(Sel, DL, MVT::i32));
7471       }
7472     }
7473   }
7474 
7475   return SDValue();
7476 }
7477 
7478 SDValue SITargetLowering::performOrCombine(SDNode *N,
7479                                            DAGCombinerInfo &DCI) const {
7480   SelectionDAG &DAG = DCI.DAG;
7481   SDValue LHS = N->getOperand(0);
7482   SDValue RHS = N->getOperand(1);
7483 
7484   EVT VT = N->getValueType(0);
7485   if (VT == MVT::i1) {
7486     // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
7487     if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
7488         RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
7489       SDValue Src = LHS.getOperand(0);
7490       if (Src != RHS.getOperand(0))
7491         return SDValue();
7492 
7493       const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
7494       const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
7495       if (!CLHS || !CRHS)
7496         return SDValue();
7497 
7498       // Only 10 bits are used.
7499       static const uint32_t MaxMask = 0x3ff;
7500 
7501       uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
7502       SDLoc DL(N);
7503       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
7504                          Src, DAG.getConstant(NewMask, DL, MVT::i32));
7505     }
7506 
7507     return SDValue();
7508   }
7509 
7510   // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
7511   if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
7512       LHS.getOpcode() == AMDGPUISD::PERM &&
7513       isa<ConstantSDNode>(LHS.getOperand(2))) {
7514     uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
7515     if (!Sel)
7516       return SDValue();
7517 
7518     Sel |= LHS.getConstantOperandVal(2);
7519     SDLoc DL(N);
7520     return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
7521                        LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
7522   }
7523 
7524   // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
7525   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7526   if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
7527       N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
7528     uint32_t LHSMask = getPermuteMask(DAG, LHS);
7529     uint32_t RHSMask = getPermuteMask(DAG, RHS);
7530     if (LHSMask != ~0u && RHSMask != ~0u) {
7531       // Canonicalize the expression in an attempt to have fewer unique masks
7532       // and therefore fewer registers used to hold the masks.
7533       if (LHSMask > RHSMask) {
7534         std::swap(LHSMask, RHSMask);
7535         std::swap(LHS, RHS);
7536       }
7537 
7538       // Select 0xc for each lane used from source operand. Zero has 0xc mask
7539       // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
7540       uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7541       uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7542 
7543       // Check of we need to combine values from two sources within a byte.
7544       if (!(LHSUsedLanes & RHSUsedLanes) &&
7545           // If we select high and lower word keep it for SDWA.
7546           // TODO: teach SDWA to work with v_perm_b32 and remove the check.
7547           !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
7548         // Kill zero bytes selected by other mask. Zero value is 0xc.
7549         LHSMask &= ~RHSUsedLanes;
7550         RHSMask &= ~LHSUsedLanes;
7551         // Add 4 to each active LHS lane
7552         LHSMask |= LHSUsedLanes & 0x04040404;
7553         // Combine masks
7554         uint32_t Sel = LHSMask | RHSMask;
7555         SDLoc DL(N);
7556 
7557         return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
7558                            LHS.getOperand(0), RHS.getOperand(0),
7559                            DAG.getConstant(Sel, DL, MVT::i32));
7560       }
7561     }
7562   }
7563 
7564   if (VT != MVT::i64)
7565     return SDValue();
7566 
7567   // TODO: This could be a generic combine with a predicate for extracting the
7568   // high half of an integer being free.
7569 
7570   // (or i64:x, (zero_extend i32:y)) ->
7571   //   i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
7572   if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
7573       RHS.getOpcode() != ISD::ZERO_EXTEND)
7574     std::swap(LHS, RHS);
7575 
7576   if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
7577     SDValue ExtSrc = RHS.getOperand(0);
7578     EVT SrcVT = ExtSrc.getValueType();
7579     if (SrcVT == MVT::i32) {
7580       SDLoc SL(N);
7581       SDValue LowLHS, HiBits;
7582       std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
7583       SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
7584 
7585       DCI.AddToWorklist(LowOr.getNode());
7586       DCI.AddToWorklist(HiBits.getNode());
7587 
7588       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
7589                                 LowOr, HiBits);
7590       return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
7591     }
7592   }
7593 
7594   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
7595   if (CRHS) {
7596     if (SDValue Split
7597           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
7598       return Split;
7599   }
7600 
7601   return SDValue();
7602 }
7603 
7604 SDValue SITargetLowering::performXorCombine(SDNode *N,
7605                                             DAGCombinerInfo &DCI) const {
7606   EVT VT = N->getValueType(0);
7607   if (VT != MVT::i64)
7608     return SDValue();
7609 
7610   SDValue LHS = N->getOperand(0);
7611   SDValue RHS = N->getOperand(1);
7612 
7613   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
7614   if (CRHS) {
7615     if (SDValue Split
7616           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
7617       return Split;
7618   }
7619 
7620   return SDValue();
7621 }
7622 
7623 // Instructions that will be lowered with a final instruction that zeros the
7624 // high result bits.
7625 // XXX - probably only need to list legal operations.
7626 static bool fp16SrcZerosHighBits(unsigned Opc) {
7627   switch (Opc) {
7628   case ISD::FADD:
7629   case ISD::FSUB:
7630   case ISD::FMUL:
7631   case ISD::FDIV:
7632   case ISD::FREM:
7633   case ISD::FMA:
7634   case ISD::FMAD:
7635   case ISD::FCANONICALIZE:
7636   case ISD::FP_ROUND:
7637   case ISD::UINT_TO_FP:
7638   case ISD::SINT_TO_FP:
7639   case ISD::FABS:
7640     // Fabs is lowered to a bit operation, but it's an and which will clear the
7641     // high bits anyway.
7642   case ISD::FSQRT:
7643   case ISD::FSIN:
7644   case ISD::FCOS:
7645   case ISD::FPOWI:
7646   case ISD::FPOW:
7647   case ISD::FLOG:
7648   case ISD::FLOG2:
7649   case ISD::FLOG10:
7650   case ISD::FEXP:
7651   case ISD::FEXP2:
7652   case ISD::FCEIL:
7653   case ISD::FTRUNC:
7654   case ISD::FRINT:
7655   case ISD::FNEARBYINT:
7656   case ISD::FROUND:
7657   case ISD::FFLOOR:
7658   case ISD::FMINNUM:
7659   case ISD::FMAXNUM:
7660   case AMDGPUISD::FRACT:
7661   case AMDGPUISD::CLAMP:
7662   case AMDGPUISD::COS_HW:
7663   case AMDGPUISD::SIN_HW:
7664   case AMDGPUISD::FMIN3:
7665   case AMDGPUISD::FMAX3:
7666   case AMDGPUISD::FMED3:
7667   case AMDGPUISD::FMAD_FTZ:
7668   case AMDGPUISD::RCP:
7669   case AMDGPUISD::RSQ:
7670   case AMDGPUISD::RCP_IFLAG:
7671   case AMDGPUISD::LDEXP:
7672     return true;
7673   default:
7674     // fcopysign, select and others may be lowered to 32-bit bit operations
7675     // which don't zero the high bits.
7676     return false;
7677   }
7678 }
7679 
7680 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
7681                                                    DAGCombinerInfo &DCI) const {
7682   if (!Subtarget->has16BitInsts() ||
7683       DCI.getDAGCombineLevel() < AfterLegalizeDAG)
7684     return SDValue();
7685 
7686   EVT VT = N->getValueType(0);
7687   if (VT != MVT::i32)
7688     return SDValue();
7689 
7690   SDValue Src = N->getOperand(0);
7691   if (Src.getValueType() != MVT::i16)
7692     return SDValue();
7693 
7694   // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
7695   // FIXME: It is not universally true that the high bits are zeroed on gfx9.
7696   if (Src.getOpcode() == ISD::BITCAST) {
7697     SDValue BCSrc = Src.getOperand(0);
7698     if (BCSrc.getValueType() == MVT::f16 &&
7699         fp16SrcZerosHighBits(BCSrc.getOpcode()))
7700       return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
7701   }
7702 
7703   return SDValue();
7704 }
7705 
7706 SDValue SITargetLowering::performClassCombine(SDNode *N,
7707                                               DAGCombinerInfo &DCI) const {
7708   SelectionDAG &DAG = DCI.DAG;
7709   SDValue Mask = N->getOperand(1);
7710 
7711   // fp_class x, 0 -> false
7712   if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
7713     if (CMask->isNullValue())
7714       return DAG.getConstant(0, SDLoc(N), MVT::i1);
7715   }
7716 
7717   if (N->getOperand(0).isUndef())
7718     return DAG.getUNDEF(MVT::i1);
7719 
7720   return SDValue();
7721 }
7722 
7723 SDValue SITargetLowering::performRcpCombine(SDNode *N,
7724                                             DAGCombinerInfo &DCI) const {
7725   EVT VT = N->getValueType(0);
7726   SDValue N0 = N->getOperand(0);
7727 
7728   if (N0.isUndef())
7729     return N0;
7730 
7731   if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP ||
7732                          N0.getOpcode() == ISD::SINT_TO_FP)) {
7733     return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0,
7734                            N->getFlags());
7735   }
7736 
7737   return AMDGPUTargetLowering::performRcpCombine(N, DCI);
7738 }
7739 
7740 bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
7741                                        unsigned MaxDepth) const {
7742   unsigned Opcode = Op.getOpcode();
7743   if (Opcode == ISD::FCANONICALIZE)
7744     return true;
7745 
7746   if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
7747     auto F = CFP->getValueAPF();
7748     if (F.isNaN() && F.isSignaling())
7749       return false;
7750     return !F.isDenormal() || denormalsEnabledForType(Op.getValueType());
7751   }
7752 
7753   // If source is a result of another standard FP operation it is already in
7754   // canonical form.
7755   if (MaxDepth == 0)
7756     return false;
7757 
7758   switch (Opcode) {
7759   // These will flush denorms if required.
7760   case ISD::FADD:
7761   case ISD::FSUB:
7762   case ISD::FMUL:
7763   case ISD::FCEIL:
7764   case ISD::FFLOOR:
7765   case ISD::FMA:
7766   case ISD::FMAD:
7767   case ISD::FSQRT:
7768   case ISD::FDIV:
7769   case ISD::FREM:
7770   case ISD::FP_ROUND:
7771   case ISD::FP_EXTEND:
7772   case AMDGPUISD::FMUL_LEGACY:
7773   case AMDGPUISD::FMAD_FTZ:
7774   case AMDGPUISD::RCP:
7775   case AMDGPUISD::RSQ:
7776   case AMDGPUISD::RSQ_CLAMP:
7777   case AMDGPUISD::RCP_LEGACY:
7778   case AMDGPUISD::RSQ_LEGACY:
7779   case AMDGPUISD::RCP_IFLAG:
7780   case AMDGPUISD::TRIG_PREOP:
7781   case AMDGPUISD::DIV_SCALE:
7782   case AMDGPUISD::DIV_FMAS:
7783   case AMDGPUISD::DIV_FIXUP:
7784   case AMDGPUISD::FRACT:
7785   case AMDGPUISD::LDEXP:
7786   case AMDGPUISD::CVT_PKRTZ_F16_F32:
7787   case AMDGPUISD::CVT_F32_UBYTE0:
7788   case AMDGPUISD::CVT_F32_UBYTE1:
7789   case AMDGPUISD::CVT_F32_UBYTE2:
7790   case AMDGPUISD::CVT_F32_UBYTE3:
7791     return true;
7792 
7793   // It can/will be lowered or combined as a bit operation.
7794   // Need to check their input recursively to handle.
7795   case ISD::FNEG:
7796   case ISD::FABS:
7797   case ISD::FCOPYSIGN:
7798     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
7799 
7800   case ISD::FSIN:
7801   case ISD::FCOS:
7802   case ISD::FSINCOS:
7803     return Op.getValueType().getScalarType() != MVT::f16;
7804 
7805   case ISD::FMINNUM:
7806   case ISD::FMAXNUM:
7807   case ISD::FMINNUM_IEEE:
7808   case ISD::FMAXNUM_IEEE:
7809   case AMDGPUISD::CLAMP:
7810   case AMDGPUISD::FMED3:
7811   case AMDGPUISD::FMAX3:
7812   case AMDGPUISD::FMIN3: {
7813     // FIXME: Shouldn't treat the generic operations different based these.
7814     // However, we aren't really required to flush the result from
7815     // minnum/maxnum..
7816 
7817     // snans will be quieted, so we only need to worry about denormals.
7818     if (Subtarget->supportsMinMaxDenormModes() ||
7819         denormalsEnabledForType(Op.getValueType()))
7820       return true;
7821 
7822     // Flushing may be required.
7823     // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such
7824     // targets need to check their input recursively.
7825 
7826     // FIXME: Does this apply with clamp? It's implemented with max.
7827     for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) {
7828       if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1))
7829         return false;
7830     }
7831 
7832     return true;
7833   }
7834   case ISD::SELECT: {
7835     return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) &&
7836            isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1);
7837   }
7838   case ISD::BUILD_VECTOR: {
7839     for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
7840       SDValue SrcOp = Op.getOperand(i);
7841       if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1))
7842         return false;
7843     }
7844 
7845     return true;
7846   }
7847   case ISD::EXTRACT_VECTOR_ELT:
7848   case ISD::EXTRACT_SUBVECTOR: {
7849     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
7850   }
7851   case ISD::INSERT_VECTOR_ELT: {
7852     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
7853            isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
7854   }
7855   case ISD::UNDEF:
7856     // Could be anything.
7857     return false;
7858 
7859   case ISD::BITCAST: {
7860     // Hack round the mess we make when legalizing extract_vector_elt
7861     SDValue Src = Op.getOperand(0);
7862     if (Src.getValueType() == MVT::i16 &&
7863         Src.getOpcode() == ISD::TRUNCATE) {
7864       SDValue TruncSrc = Src.getOperand(0);
7865       if (TruncSrc.getValueType() == MVT::i32 &&
7866           TruncSrc.getOpcode() == ISD::BITCAST &&
7867           TruncSrc.getOperand(0).getValueType() == MVT::v2f16) {
7868         return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1);
7869       }
7870     }
7871 
7872     return false;
7873   }
7874   case ISD::INTRINSIC_WO_CHAIN: {
7875     unsigned IntrinsicID
7876       = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
7877     // TODO: Handle more intrinsics
7878     switch (IntrinsicID) {
7879     case Intrinsic::amdgcn_cvt_pkrtz:
7880     case Intrinsic::amdgcn_cubeid:
7881     case Intrinsic::amdgcn_frexp_mant:
7882     case Intrinsic::amdgcn_fdot2:
7883       return true;
7884     default:
7885       break;
7886     }
7887 
7888     LLVM_FALLTHROUGH;
7889   }
7890   default:
7891     return denormalsEnabledForType(Op.getValueType()) &&
7892            DAG.isKnownNeverSNaN(Op);
7893   }
7894 
7895   llvm_unreachable("invalid operation");
7896 }
7897 
7898 // Constant fold canonicalize.
7899 SDValue SITargetLowering::getCanonicalConstantFP(
7900   SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const {
7901   // Flush denormals to 0 if not enabled.
7902   if (C.isDenormal() && !denormalsEnabledForType(VT))
7903     return DAG.getConstantFP(0.0, SL, VT);
7904 
7905   if (C.isNaN()) {
7906     APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
7907     if (C.isSignaling()) {
7908       // Quiet a signaling NaN.
7909       // FIXME: Is this supposed to preserve payload bits?
7910       return DAG.getConstantFP(CanonicalQNaN, SL, VT);
7911     }
7912 
7913     // Make sure it is the canonical NaN bitpattern.
7914     //
7915     // TODO: Can we use -1 as the canonical NaN value since it's an inline
7916     // immediate?
7917     if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
7918       return DAG.getConstantFP(CanonicalQNaN, SL, VT);
7919   }
7920 
7921   // Already canonical.
7922   return DAG.getConstantFP(C, SL, VT);
7923 }
7924 
7925 static bool vectorEltWillFoldAway(SDValue Op) {
7926   return Op.isUndef() || isa<ConstantFPSDNode>(Op);
7927 }
7928 
7929 SDValue SITargetLowering::performFCanonicalizeCombine(
7930   SDNode *N,
7931   DAGCombinerInfo &DCI) const {
7932   SelectionDAG &DAG = DCI.DAG;
7933   SDValue N0 = N->getOperand(0);
7934   EVT VT = N->getValueType(0);
7935 
7936   // fcanonicalize undef -> qnan
7937   if (N0.isUndef()) {
7938     APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT));
7939     return DAG.getConstantFP(QNaN, SDLoc(N), VT);
7940   }
7941 
7942   if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) {
7943     EVT VT = N->getValueType(0);
7944     return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF());
7945   }
7946 
7947   // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x),
7948   //                                                   (fcanonicalize k)
7949   //
7950   // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0
7951 
7952   // TODO: This could be better with wider vectors that will be split to v2f16,
7953   // and to consider uses since there aren't that many packed operations.
7954   if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 &&
7955       isTypeLegal(MVT::v2f16)) {
7956     SDLoc SL(N);
7957     SDValue NewElts[2];
7958     SDValue Lo = N0.getOperand(0);
7959     SDValue Hi = N0.getOperand(1);
7960     EVT EltVT = Lo.getValueType();
7961 
7962     if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) {
7963       for (unsigned I = 0; I != 2; ++I) {
7964         SDValue Op = N0.getOperand(I);
7965         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
7966           NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT,
7967                                               CFP->getValueAPF());
7968         } else if (Op.isUndef()) {
7969           // Handled below based on what the other operand is.
7970           NewElts[I] = Op;
7971         } else {
7972           NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op);
7973         }
7974       }
7975 
7976       // If one half is undef, and one is constant, perfer a splat vector rather
7977       // than the normal qNaN. If it's a register, prefer 0.0 since that's
7978       // cheaper to use and may be free with a packed operation.
7979       if (NewElts[0].isUndef()) {
7980         if (isa<ConstantFPSDNode>(NewElts[1]))
7981           NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ?
7982             NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT);
7983       }
7984 
7985       if (NewElts[1].isUndef()) {
7986         NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ?
7987           NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT);
7988       }
7989 
7990       return DAG.getBuildVector(VT, SL, NewElts);
7991     }
7992   }
7993 
7994   unsigned SrcOpc = N0.getOpcode();
7995 
7996   // If it's free to do so, push canonicalizes further up the source, which may
7997   // find a canonical source.
7998   //
7999   // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for
8000   // sNaNs.
8001   if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) {
8002     auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
8003     if (CRHS && N0.hasOneUse()) {
8004       SDLoc SL(N);
8005       SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT,
8006                                    N0.getOperand(0));
8007       SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF());
8008       DCI.AddToWorklist(Canon0.getNode());
8009 
8010       return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1);
8011     }
8012   }
8013 
8014   return isCanonicalized(DAG, N0) ? N0 : SDValue();
8015 }
8016 
8017 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
8018   switch (Opc) {
8019   case ISD::FMAXNUM:
8020   case ISD::FMAXNUM_IEEE:
8021     return AMDGPUISD::FMAX3;
8022   case ISD::SMAX:
8023     return AMDGPUISD::SMAX3;
8024   case ISD::UMAX:
8025     return AMDGPUISD::UMAX3;
8026   case ISD::FMINNUM:
8027   case ISD::FMINNUM_IEEE:
8028     return AMDGPUISD::FMIN3;
8029   case ISD::SMIN:
8030     return AMDGPUISD::SMIN3;
8031   case ISD::UMIN:
8032     return AMDGPUISD::UMIN3;
8033   default:
8034     llvm_unreachable("Not a min/max opcode");
8035   }
8036 }
8037 
8038 SDValue SITargetLowering::performIntMed3ImmCombine(
8039   SelectionDAG &DAG, const SDLoc &SL,
8040   SDValue Op0, SDValue Op1, bool Signed) const {
8041   ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
8042   if (!K1)
8043     return SDValue();
8044 
8045   ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
8046   if (!K0)
8047     return SDValue();
8048 
8049   if (Signed) {
8050     if (K0->getAPIntValue().sge(K1->getAPIntValue()))
8051       return SDValue();
8052   } else {
8053     if (K0->getAPIntValue().uge(K1->getAPIntValue()))
8054       return SDValue();
8055   }
8056 
8057   EVT VT = K0->getValueType(0);
8058   unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
8059   if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
8060     return DAG.getNode(Med3Opc, SL, VT,
8061                        Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
8062   }
8063 
8064   // If there isn't a 16-bit med3 operation, convert to 32-bit.
8065   MVT NVT = MVT::i32;
8066   unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
8067 
8068   SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
8069   SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
8070   SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
8071 
8072   SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
8073   return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
8074 }
8075 
8076 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
8077   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
8078     return C;
8079 
8080   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
8081     if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
8082       return C;
8083   }
8084 
8085   return nullptr;
8086 }
8087 
8088 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
8089                                                   const SDLoc &SL,
8090                                                   SDValue Op0,
8091                                                   SDValue Op1) const {
8092   ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
8093   if (!K1)
8094     return SDValue();
8095 
8096   ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
8097   if (!K0)
8098     return SDValue();
8099 
8100   // Ordered >= (although NaN inputs should have folded away by now).
8101   APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
8102   if (Cmp == APFloat::cmpGreaterThan)
8103     return SDValue();
8104 
8105   // TODO: Check IEEE bit enabled?
8106   EVT VT = Op0.getValueType();
8107   if (Subtarget->enableDX10Clamp()) {
8108     // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
8109     // hardware fmed3 behavior converting to a min.
8110     // FIXME: Should this be allowing -0.0?
8111     if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
8112       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
8113   }
8114 
8115   // med3 for f16 is only available on gfx9+, and not available for v2f16.
8116   if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
8117     // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
8118     // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
8119     // then give the other result, which is different from med3 with a NaN
8120     // input.
8121     SDValue Var = Op0.getOperand(0);
8122     if (!DAG.isKnownNeverSNaN(Var))
8123       return SDValue();
8124 
8125     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8126 
8127     if ((!K0->hasOneUse() ||
8128          TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) &&
8129         (!K1->hasOneUse() ||
8130          TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) {
8131       return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
8132                          Var, SDValue(K0, 0), SDValue(K1, 0));
8133     }
8134   }
8135 
8136   return SDValue();
8137 }
8138 
8139 SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
8140                                                DAGCombinerInfo &DCI) const {
8141   SelectionDAG &DAG = DCI.DAG;
8142 
8143   EVT VT = N->getValueType(0);
8144   unsigned Opc = N->getOpcode();
8145   SDValue Op0 = N->getOperand(0);
8146   SDValue Op1 = N->getOperand(1);
8147 
8148   // Only do this if the inner op has one use since this will just increases
8149   // register pressure for no benefit.
8150 
8151 
8152   if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
8153       !VT.isVector() && VT != MVT::f64 &&
8154       ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) {
8155     // max(max(a, b), c) -> max3(a, b, c)
8156     // min(min(a, b), c) -> min3(a, b, c)
8157     if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
8158       SDLoc DL(N);
8159       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
8160                          DL,
8161                          N->getValueType(0),
8162                          Op0.getOperand(0),
8163                          Op0.getOperand(1),
8164                          Op1);
8165     }
8166 
8167     // Try commuted.
8168     // max(a, max(b, c)) -> max3(a, b, c)
8169     // min(a, min(b, c)) -> min3(a, b, c)
8170     if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
8171       SDLoc DL(N);
8172       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
8173                          DL,
8174                          N->getValueType(0),
8175                          Op0,
8176                          Op1.getOperand(0),
8177                          Op1.getOperand(1));
8178     }
8179   }
8180 
8181   // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
8182   if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
8183     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
8184       return Med3;
8185   }
8186 
8187   if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
8188     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
8189       return Med3;
8190   }
8191 
8192   // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
8193   if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
8194        (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) ||
8195        (Opc == AMDGPUISD::FMIN_LEGACY &&
8196         Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
8197       (VT == MVT::f32 || VT == MVT::f64 ||
8198        (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
8199        (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
8200       Op0.hasOneUse()) {
8201     if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
8202       return Res;
8203   }
8204 
8205   return SDValue();
8206 }
8207 
8208 static bool isClampZeroToOne(SDValue A, SDValue B) {
8209   if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
8210     if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
8211       // FIXME: Should this be allowing -0.0?
8212       return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
8213              (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
8214     }
8215   }
8216 
8217   return false;
8218 }
8219 
8220 // FIXME: Should only worry about snans for version with chain.
8221 SDValue SITargetLowering::performFMed3Combine(SDNode *N,
8222                                               DAGCombinerInfo &DCI) const {
8223   EVT VT = N->getValueType(0);
8224   // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
8225   // NaNs. With a NaN input, the order of the operands may change the result.
8226 
8227   SelectionDAG &DAG = DCI.DAG;
8228   SDLoc SL(N);
8229 
8230   SDValue Src0 = N->getOperand(0);
8231   SDValue Src1 = N->getOperand(1);
8232   SDValue Src2 = N->getOperand(2);
8233 
8234   if (isClampZeroToOne(Src0, Src1)) {
8235     // const_a, const_b, x -> clamp is safe in all cases including signaling
8236     // nans.
8237     // FIXME: Should this be allowing -0.0?
8238     return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
8239   }
8240 
8241   // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
8242   // handling no dx10-clamp?
8243   if (Subtarget->enableDX10Clamp()) {
8244     // If NaNs is clamped to 0, we are free to reorder the inputs.
8245 
8246     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
8247       std::swap(Src0, Src1);
8248 
8249     if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
8250       std::swap(Src1, Src2);
8251 
8252     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
8253       std::swap(Src0, Src1);
8254 
8255     if (isClampZeroToOne(Src1, Src2))
8256       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
8257   }
8258 
8259   return SDValue();
8260 }
8261 
8262 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
8263                                                  DAGCombinerInfo &DCI) const {
8264   SDValue Src0 = N->getOperand(0);
8265   SDValue Src1 = N->getOperand(1);
8266   if (Src0.isUndef() && Src1.isUndef())
8267     return DCI.DAG.getUNDEF(N->getValueType(0));
8268   return SDValue();
8269 }
8270 
8271 SDValue SITargetLowering::performExtractVectorEltCombine(
8272   SDNode *N, DAGCombinerInfo &DCI) const {
8273   SDValue Vec = N->getOperand(0);
8274   SelectionDAG &DAG = DCI.DAG;
8275 
8276   EVT VecVT = Vec.getValueType();
8277   EVT EltVT = VecVT.getVectorElementType();
8278 
8279   if ((Vec.getOpcode() == ISD::FNEG ||
8280        Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
8281     SDLoc SL(N);
8282     EVT EltVT = N->getValueType(0);
8283     SDValue Idx = N->getOperand(1);
8284     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
8285                               Vec.getOperand(0), Idx);
8286     return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
8287   }
8288 
8289   // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
8290   //    =>
8291   // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
8292   // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
8293   // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
8294   if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
8295     SDLoc SL(N);
8296     EVT EltVT = N->getValueType(0);
8297     SDValue Idx = N->getOperand(1);
8298     unsigned Opc = Vec.getOpcode();
8299 
8300     switch(Opc) {
8301     default:
8302       break;
8303       // TODO: Support other binary operations.
8304     case ISD::FADD:
8305     case ISD::FSUB:
8306     case ISD::FMUL:
8307     case ISD::ADD:
8308     case ISD::UMIN:
8309     case ISD::UMAX:
8310     case ISD::SMIN:
8311     case ISD::SMAX:
8312     case ISD::FMAXNUM:
8313     case ISD::FMINNUM:
8314     case ISD::FMAXNUM_IEEE:
8315     case ISD::FMINNUM_IEEE: {
8316       SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
8317                                  Vec.getOperand(0), Idx);
8318       SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
8319                                  Vec.getOperand(1), Idx);
8320 
8321       DCI.AddToWorklist(Elt0.getNode());
8322       DCI.AddToWorklist(Elt1.getNode());
8323       return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags());
8324     }
8325     }
8326   }
8327 
8328   unsigned VecSize = VecVT.getSizeInBits();
8329   unsigned EltSize = EltVT.getSizeInBits();
8330 
8331   // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx)
8332   // This elminates non-constant index and subsequent movrel or scratch access.
8333   // Sub-dword vectors of size 2 dword or less have better implementation.
8334   // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
8335   // instructions.
8336   if (VecSize <= 256 && (VecSize > 64 || EltSize >= 32) &&
8337       !isa<ConstantSDNode>(N->getOperand(1))) {
8338     SDLoc SL(N);
8339     SDValue Idx = N->getOperand(1);
8340     EVT IdxVT = Idx.getValueType();
8341     SDValue V;
8342     for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
8343       SDValue IC = DAG.getConstant(I, SL, IdxVT);
8344       SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
8345       if (I == 0)
8346         V = Elt;
8347       else
8348         V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ);
8349     }
8350     return V;
8351   }
8352 
8353   if (!DCI.isBeforeLegalize())
8354     return SDValue();
8355 
8356   // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
8357   // elements. This exposes more load reduction opportunities by replacing
8358   // multiple small extract_vector_elements with a single 32-bit extract.
8359   auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
8360   if (isa<MemSDNode>(Vec) &&
8361       EltSize <= 16 &&
8362       EltVT.isByteSized() &&
8363       VecSize > 32 &&
8364       VecSize % 32 == 0 &&
8365       Idx) {
8366     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
8367 
8368     unsigned BitIndex = Idx->getZExtValue() * EltSize;
8369     unsigned EltIdx = BitIndex / 32;
8370     unsigned LeftoverBitIdx = BitIndex % 32;
8371     SDLoc SL(N);
8372 
8373     SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
8374     DCI.AddToWorklist(Cast.getNode());
8375 
8376     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
8377                               DAG.getConstant(EltIdx, SL, MVT::i32));
8378     DCI.AddToWorklist(Elt.getNode());
8379     SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
8380                               DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
8381     DCI.AddToWorklist(Srl.getNode());
8382 
8383     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
8384     DCI.AddToWorklist(Trunc.getNode());
8385     return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
8386   }
8387 
8388   return SDValue();
8389 }
8390 
8391 SDValue
8392 SITargetLowering::performInsertVectorEltCombine(SDNode *N,
8393                                                 DAGCombinerInfo &DCI) const {
8394   SDValue Vec = N->getOperand(0);
8395   SDValue Idx = N->getOperand(2);
8396   EVT VecVT = Vec.getValueType();
8397   EVT EltVT = VecVT.getVectorElementType();
8398   unsigned VecSize = VecVT.getSizeInBits();
8399   unsigned EltSize = EltVT.getSizeInBits();
8400 
8401   // INSERT_VECTOR_ELT (<n x e>, var-idx)
8402   // => BUILD_VECTOR n x select (e, const-idx)
8403   // This elminates non-constant index and subsequent movrel or scratch access.
8404   // Sub-dword vectors of size 2 dword or less have better implementation.
8405   // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
8406   // instructions.
8407   if (isa<ConstantSDNode>(Idx) ||
8408       VecSize > 256 || (VecSize <= 64 && EltSize < 32))
8409     return SDValue();
8410 
8411   SelectionDAG &DAG = DCI.DAG;
8412   SDLoc SL(N);
8413   SDValue Ins = N->getOperand(1);
8414   EVT IdxVT = Idx.getValueType();
8415 
8416   SmallVector<SDValue, 16> Ops;
8417   for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
8418     SDValue IC = DAG.getConstant(I, SL, IdxVT);
8419     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
8420     SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ);
8421     Ops.push_back(V);
8422   }
8423 
8424   return DAG.getBuildVector(VecVT, SL, Ops);
8425 }
8426 
8427 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
8428                                           const SDNode *N0,
8429                                           const SDNode *N1) const {
8430   EVT VT = N0->getValueType(0);
8431 
8432   // Only do this if we are not trying to support denormals. v_mad_f32 does not
8433   // support denormals ever.
8434   if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
8435       (VT == MVT::f16 && !Subtarget->hasFP16Denormals()))
8436     return ISD::FMAD;
8437 
8438   const TargetOptions &Options = DAG.getTarget().Options;
8439   if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
8440        (N0->getFlags().hasAllowContract() &&
8441         N1->getFlags().hasAllowContract())) &&
8442       isFMAFasterThanFMulAndFAdd(VT)) {
8443     return ISD::FMA;
8444   }
8445 
8446   return 0;
8447 }
8448 
8449 // For a reassociatable opcode perform:
8450 // op x, (op y, z) -> op (op x, z), y, if x and z are uniform
8451 SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
8452                                                SelectionDAG &DAG) const {
8453   EVT VT = N->getValueType(0);
8454   if (VT != MVT::i32 && VT != MVT::i64)
8455     return SDValue();
8456 
8457   unsigned Opc = N->getOpcode();
8458   SDValue Op0 = N->getOperand(0);
8459   SDValue Op1 = N->getOperand(1);
8460 
8461   if (!(Op0->isDivergent() ^ Op1->isDivergent()))
8462     return SDValue();
8463 
8464   if (Op0->isDivergent())
8465     std::swap(Op0, Op1);
8466 
8467   if (Op1.getOpcode() != Opc || !Op1.hasOneUse())
8468     return SDValue();
8469 
8470   SDValue Op2 = Op1.getOperand(1);
8471   Op1 = Op1.getOperand(0);
8472   if (!(Op1->isDivergent() ^ Op2->isDivergent()))
8473     return SDValue();
8474 
8475   if (Op1->isDivergent())
8476     std::swap(Op1, Op2);
8477 
8478   // If either operand is constant this will conflict with
8479   // DAGCombiner::ReassociateOps().
8480   if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) ||
8481       DAG.isConstantIntBuildVectorOrConstantInt(Op1))
8482     return SDValue();
8483 
8484   SDLoc SL(N);
8485   SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1);
8486   return DAG.getNode(Opc, SL, VT, Add1, Op2);
8487 }
8488 
8489 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
8490                            EVT VT,
8491                            SDValue N0, SDValue N1, SDValue N2,
8492                            bool Signed) {
8493   unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
8494   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
8495   SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
8496   return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
8497 }
8498 
8499 SDValue SITargetLowering::performAddCombine(SDNode *N,
8500                                             DAGCombinerInfo &DCI) const {
8501   SelectionDAG &DAG = DCI.DAG;
8502   EVT VT = N->getValueType(0);
8503   SDLoc SL(N);
8504   SDValue LHS = N->getOperand(0);
8505   SDValue RHS = N->getOperand(1);
8506 
8507   if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
8508       && Subtarget->hasMad64_32() &&
8509       !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
8510       VT.getScalarSizeInBits() <= 64) {
8511     if (LHS.getOpcode() != ISD::MUL)
8512       std::swap(LHS, RHS);
8513 
8514     SDValue MulLHS = LHS.getOperand(0);
8515     SDValue MulRHS = LHS.getOperand(1);
8516     SDValue AddRHS = RHS;
8517 
8518     // TODO: Maybe restrict if SGPR inputs.
8519     if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
8520         numBitsUnsigned(MulRHS, DAG) <= 32) {
8521       MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
8522       MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
8523       AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
8524       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
8525     }
8526 
8527     if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
8528       MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
8529       MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
8530       AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
8531       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
8532     }
8533 
8534     return SDValue();
8535   }
8536 
8537   if (SDValue V = reassociateScalarOps(N, DAG)) {
8538     return V;
8539   }
8540 
8541   if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
8542     return SDValue();
8543 
8544   // add x, zext (setcc) => addcarry x, 0, setcc
8545   // add x, sext (setcc) => subcarry x, 0, setcc
8546   unsigned Opc = LHS.getOpcode();
8547   if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
8548       Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
8549     std::swap(RHS, LHS);
8550 
8551   Opc = RHS.getOpcode();
8552   switch (Opc) {
8553   default: break;
8554   case ISD::ZERO_EXTEND:
8555   case ISD::SIGN_EXTEND:
8556   case ISD::ANY_EXTEND: {
8557     auto Cond = RHS.getOperand(0);
8558     if (!isBoolSGPR(Cond))
8559       break;
8560     SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
8561     SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
8562     Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
8563     return DAG.getNode(Opc, SL, VTList, Args);
8564   }
8565   case ISD::ADDCARRY: {
8566     // add x, (addcarry y, 0, cc) => addcarry x, y, cc
8567     auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8568     if (!C || C->getZExtValue() != 0) break;
8569     SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
8570     return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
8571   }
8572   }
8573   return SDValue();
8574 }
8575 
8576 SDValue SITargetLowering::performSubCombine(SDNode *N,
8577                                             DAGCombinerInfo &DCI) const {
8578   SelectionDAG &DAG = DCI.DAG;
8579   EVT VT = N->getValueType(0);
8580 
8581   if (VT != MVT::i32)
8582     return SDValue();
8583 
8584   SDLoc SL(N);
8585   SDValue LHS = N->getOperand(0);
8586   SDValue RHS = N->getOperand(1);
8587 
8588   if (LHS.getOpcode() == ISD::SUBCARRY) {
8589     // sub (subcarry x, 0, cc), y => subcarry x, y, cc
8590     auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
8591     if (!C || !C->isNullValue())
8592       return SDValue();
8593     SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
8594     return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
8595   }
8596   return SDValue();
8597 }
8598 
8599 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
8600   DAGCombinerInfo &DCI) const {
8601 
8602   if (N->getValueType(0) != MVT::i32)
8603     return SDValue();
8604 
8605   auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
8606   if (!C || C->getZExtValue() != 0)
8607     return SDValue();
8608 
8609   SelectionDAG &DAG = DCI.DAG;
8610   SDValue LHS = N->getOperand(0);
8611 
8612   // addcarry (add x, y), 0, cc => addcarry x, y, cc
8613   // subcarry (sub x, y), 0, cc => subcarry x, y, cc
8614   unsigned LHSOpc = LHS.getOpcode();
8615   unsigned Opc = N->getOpcode();
8616   if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
8617       (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
8618     SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
8619     return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
8620   }
8621   return SDValue();
8622 }
8623 
8624 SDValue SITargetLowering::performFAddCombine(SDNode *N,
8625                                              DAGCombinerInfo &DCI) const {
8626   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
8627     return SDValue();
8628 
8629   SelectionDAG &DAG = DCI.DAG;
8630   EVT VT = N->getValueType(0);
8631 
8632   SDLoc SL(N);
8633   SDValue LHS = N->getOperand(0);
8634   SDValue RHS = N->getOperand(1);
8635 
8636   // These should really be instruction patterns, but writing patterns with
8637   // source modiifiers is a pain.
8638 
8639   // fadd (fadd (a, a), b) -> mad 2.0, a, b
8640   if (LHS.getOpcode() == ISD::FADD) {
8641     SDValue A = LHS.getOperand(0);
8642     if (A == LHS.getOperand(1)) {
8643       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
8644       if (FusedOp != 0) {
8645         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
8646         return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
8647       }
8648     }
8649   }
8650 
8651   // fadd (b, fadd (a, a)) -> mad 2.0, a, b
8652   if (RHS.getOpcode() == ISD::FADD) {
8653     SDValue A = RHS.getOperand(0);
8654     if (A == RHS.getOperand(1)) {
8655       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
8656       if (FusedOp != 0) {
8657         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
8658         return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
8659       }
8660     }
8661   }
8662 
8663   return SDValue();
8664 }
8665 
8666 SDValue SITargetLowering::performFSubCombine(SDNode *N,
8667                                              DAGCombinerInfo &DCI) const {
8668   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
8669     return SDValue();
8670 
8671   SelectionDAG &DAG = DCI.DAG;
8672   SDLoc SL(N);
8673   EVT VT = N->getValueType(0);
8674   assert(!VT.isVector());
8675 
8676   // Try to get the fneg to fold into the source modifier. This undoes generic
8677   // DAG combines and folds them into the mad.
8678   //
8679   // Only do this if we are not trying to support denormals. v_mad_f32 does
8680   // not support denormals ever.
8681   SDValue LHS = N->getOperand(0);
8682   SDValue RHS = N->getOperand(1);
8683   if (LHS.getOpcode() == ISD::FADD) {
8684     // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
8685     SDValue A = LHS.getOperand(0);
8686     if (A == LHS.getOperand(1)) {
8687       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
8688       if (FusedOp != 0){
8689         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
8690         SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
8691 
8692         return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
8693       }
8694     }
8695   }
8696 
8697   if (RHS.getOpcode() == ISD::FADD) {
8698     // (fsub c, (fadd a, a)) -> mad -2.0, a, c
8699 
8700     SDValue A = RHS.getOperand(0);
8701     if (A == RHS.getOperand(1)) {
8702       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
8703       if (FusedOp != 0){
8704         const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
8705         return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
8706       }
8707     }
8708   }
8709 
8710   return SDValue();
8711 }
8712 
8713 SDValue SITargetLowering::performFMACombine(SDNode *N,
8714                                             DAGCombinerInfo &DCI) const {
8715   SelectionDAG &DAG = DCI.DAG;
8716   EVT VT = N->getValueType(0);
8717   SDLoc SL(N);
8718 
8719   if (!Subtarget->hasDot2Insts() || VT != MVT::f32)
8720     return SDValue();
8721 
8722   // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) ->
8723   //   FDOT2((V2F16)S0, (V2F16)S1, (F32)z))
8724   SDValue Op1 = N->getOperand(0);
8725   SDValue Op2 = N->getOperand(1);
8726   SDValue FMA = N->getOperand(2);
8727 
8728   if (FMA.getOpcode() != ISD::FMA ||
8729       Op1.getOpcode() != ISD::FP_EXTEND ||
8730       Op2.getOpcode() != ISD::FP_EXTEND)
8731     return SDValue();
8732 
8733   // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
8734   // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract
8735   // is sufficient to allow generaing fdot2.
8736   const TargetOptions &Options = DAG.getTarget().Options;
8737   if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
8738       (N->getFlags().hasAllowContract() &&
8739        FMA->getFlags().hasAllowContract())) {
8740     Op1 = Op1.getOperand(0);
8741     Op2 = Op2.getOperand(0);
8742     if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8743         Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
8744       return SDValue();
8745 
8746     SDValue Vec1 = Op1.getOperand(0);
8747     SDValue Idx1 = Op1.getOperand(1);
8748     SDValue Vec2 = Op2.getOperand(0);
8749 
8750     SDValue FMAOp1 = FMA.getOperand(0);
8751     SDValue FMAOp2 = FMA.getOperand(1);
8752     SDValue FMAAcc = FMA.getOperand(2);
8753 
8754     if (FMAOp1.getOpcode() != ISD::FP_EXTEND ||
8755         FMAOp2.getOpcode() != ISD::FP_EXTEND)
8756       return SDValue();
8757 
8758     FMAOp1 = FMAOp1.getOperand(0);
8759     FMAOp2 = FMAOp2.getOperand(0);
8760     if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8761         FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
8762       return SDValue();
8763 
8764     SDValue Vec3 = FMAOp1.getOperand(0);
8765     SDValue Vec4 = FMAOp2.getOperand(0);
8766     SDValue Idx2 = FMAOp1.getOperand(1);
8767 
8768     if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) ||
8769         // Idx1 and Idx2 cannot be the same.
8770         Idx1 == Idx2)
8771       return SDValue();
8772 
8773     if (Vec1 == Vec2 || Vec3 == Vec4)
8774       return SDValue();
8775 
8776     if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16)
8777       return SDValue();
8778 
8779     if ((Vec1 == Vec3 && Vec2 == Vec4) ||
8780         (Vec1 == Vec4 && Vec2 == Vec3)) {
8781       return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc,
8782                          DAG.getTargetConstant(0, SL, MVT::i1));
8783     }
8784   }
8785   return SDValue();
8786 }
8787 
8788 SDValue SITargetLowering::performSetCCCombine(SDNode *N,
8789                                               DAGCombinerInfo &DCI) const {
8790   SelectionDAG &DAG = DCI.DAG;
8791   SDLoc SL(N);
8792 
8793   SDValue LHS = N->getOperand(0);
8794   SDValue RHS = N->getOperand(1);
8795   EVT VT = LHS.getValueType();
8796   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
8797 
8798   auto CRHS = dyn_cast<ConstantSDNode>(RHS);
8799   if (!CRHS) {
8800     CRHS = dyn_cast<ConstantSDNode>(LHS);
8801     if (CRHS) {
8802       std::swap(LHS, RHS);
8803       CC = getSetCCSwappedOperands(CC);
8804     }
8805   }
8806 
8807   if (CRHS) {
8808     if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
8809         isBoolSGPR(LHS.getOperand(0))) {
8810       // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
8811       // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
8812       // setcc (sext from i1 cc),  0, eq|sge|ule) => not cc => xor cc, -1
8813       // setcc (sext from i1 cc),  0, ne|ugt|slt) => cc
8814       if ((CRHS->isAllOnesValue() &&
8815            (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
8816           (CRHS->isNullValue() &&
8817            (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
8818         return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
8819                            DAG.getConstant(-1, SL, MVT::i1));
8820       if ((CRHS->isAllOnesValue() &&
8821            (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
8822           (CRHS->isNullValue() &&
8823            (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
8824         return LHS.getOperand(0);
8825     }
8826 
8827     uint64_t CRHSVal = CRHS->getZExtValue();
8828     if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
8829         LHS.getOpcode() == ISD::SELECT &&
8830         isa<ConstantSDNode>(LHS.getOperand(1)) &&
8831         isa<ConstantSDNode>(LHS.getOperand(2)) &&
8832         LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) &&
8833         isBoolSGPR(LHS.getOperand(0))) {
8834       // Given CT != FT:
8835       // setcc (select cc, CT, CF), CF, eq => xor cc, -1
8836       // setcc (select cc, CT, CF), CF, ne => cc
8837       // setcc (select cc, CT, CF), CT, ne => xor cc, -1
8838       // setcc (select cc, CT, CF), CT, eq => cc
8839       uint64_t CT = LHS.getConstantOperandVal(1);
8840       uint64_t CF = LHS.getConstantOperandVal(2);
8841 
8842       if ((CF == CRHSVal && CC == ISD::SETEQ) ||
8843           (CT == CRHSVal && CC == ISD::SETNE))
8844         return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
8845                            DAG.getConstant(-1, SL, MVT::i1));
8846       if ((CF == CRHSVal && CC == ISD::SETNE) ||
8847           (CT == CRHSVal && CC == ISD::SETEQ))
8848         return LHS.getOperand(0);
8849     }
8850   }
8851 
8852   if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
8853                                            VT != MVT::f16))
8854     return SDValue();
8855 
8856   // Match isinf/isfinite pattern
8857   // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
8858   // (fcmp one (fabs x), inf) -> (fp_class x,
8859   // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero)
8860   if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) {
8861     const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
8862     if (!CRHS)
8863       return SDValue();
8864 
8865     const APFloat &APF = CRHS->getValueAPF();
8866     if (APF.isInfinity() && !APF.isNegative()) {
8867       const unsigned IsInfMask = SIInstrFlags::P_INFINITY |
8868                                  SIInstrFlags::N_INFINITY;
8869       const unsigned IsFiniteMask = SIInstrFlags::N_ZERO |
8870                                     SIInstrFlags::P_ZERO |
8871                                     SIInstrFlags::N_NORMAL |
8872                                     SIInstrFlags::P_NORMAL |
8873                                     SIInstrFlags::N_SUBNORMAL |
8874                                     SIInstrFlags::P_SUBNORMAL;
8875       unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask;
8876       return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
8877                          DAG.getConstant(Mask, SL, MVT::i32));
8878     }
8879   }
8880 
8881   return SDValue();
8882 }
8883 
8884 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
8885                                                      DAGCombinerInfo &DCI) const {
8886   SelectionDAG &DAG = DCI.DAG;
8887   SDLoc SL(N);
8888   unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
8889 
8890   SDValue Src = N->getOperand(0);
8891   SDValue Srl = N->getOperand(0);
8892   if (Srl.getOpcode() == ISD::ZERO_EXTEND)
8893     Srl = Srl.getOperand(0);
8894 
8895   // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
8896   if (Srl.getOpcode() == ISD::SRL) {
8897     // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
8898     // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
8899     // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
8900 
8901     if (const ConstantSDNode *C =
8902         dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
8903       Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
8904                                EVT(MVT::i32));
8905 
8906       unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
8907       if (SrcOffset < 32 && SrcOffset % 8 == 0) {
8908         return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
8909                            MVT::f32, Srl);
8910       }
8911     }
8912   }
8913 
8914   APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
8915 
8916   KnownBits Known;
8917   TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
8918                                         !DCI.isBeforeLegalizeOps());
8919   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8920   if (TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
8921     DCI.CommitTargetLoweringOpt(TLO);
8922   }
8923 
8924   return SDValue();
8925 }
8926 
8927 SDValue SITargetLowering::performClampCombine(SDNode *N,
8928                                               DAGCombinerInfo &DCI) const {
8929   ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
8930   if (!CSrc)
8931     return SDValue();
8932 
8933   const APFloat &F = CSrc->getValueAPF();
8934   APFloat Zero = APFloat::getZero(F.getSemantics());
8935   APFloat::cmpResult Cmp0 = F.compare(Zero);
8936   if (Cmp0 == APFloat::cmpLessThan ||
8937       (Cmp0 == APFloat::cmpUnordered && Subtarget->enableDX10Clamp())) {
8938     return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
8939   }
8940 
8941   APFloat One(F.getSemantics(), "1.0");
8942   APFloat::cmpResult Cmp1 = F.compare(One);
8943   if (Cmp1 == APFloat::cmpGreaterThan)
8944     return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
8945 
8946   return SDValue(CSrc, 0);
8947 }
8948 
8949 
8950 SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
8951                                             DAGCombinerInfo &DCI) const {
8952   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
8953     return SDValue();
8954 
8955   switch (N->getOpcode()) {
8956   default:
8957     return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
8958   case ISD::ADD:
8959     return performAddCombine(N, DCI);
8960   case ISD::SUB:
8961     return performSubCombine(N, DCI);
8962   case ISD::ADDCARRY:
8963   case ISD::SUBCARRY:
8964     return performAddCarrySubCarryCombine(N, DCI);
8965   case ISD::FADD:
8966     return performFAddCombine(N, DCI);
8967   case ISD::FSUB:
8968     return performFSubCombine(N, DCI);
8969   case ISD::SETCC:
8970     return performSetCCCombine(N, DCI);
8971   case ISD::FMAXNUM:
8972   case ISD::FMINNUM:
8973   case ISD::FMAXNUM_IEEE:
8974   case ISD::FMINNUM_IEEE:
8975   case ISD::SMAX:
8976   case ISD::SMIN:
8977   case ISD::UMAX:
8978   case ISD::UMIN:
8979   case AMDGPUISD::FMIN_LEGACY:
8980   case AMDGPUISD::FMAX_LEGACY:
8981     return performMinMaxCombine(N, DCI);
8982   case ISD::FMA:
8983     return performFMACombine(N, DCI);
8984   case ISD::LOAD: {
8985     if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
8986       return Widended;
8987     LLVM_FALLTHROUGH;
8988   }
8989   case ISD::STORE:
8990   case ISD::ATOMIC_LOAD:
8991   case ISD::ATOMIC_STORE:
8992   case ISD::ATOMIC_CMP_SWAP:
8993   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
8994   case ISD::ATOMIC_SWAP:
8995   case ISD::ATOMIC_LOAD_ADD:
8996   case ISD::ATOMIC_LOAD_SUB:
8997   case ISD::ATOMIC_LOAD_AND:
8998   case ISD::ATOMIC_LOAD_OR:
8999   case ISD::ATOMIC_LOAD_XOR:
9000   case ISD::ATOMIC_LOAD_NAND:
9001   case ISD::ATOMIC_LOAD_MIN:
9002   case ISD::ATOMIC_LOAD_MAX:
9003   case ISD::ATOMIC_LOAD_UMIN:
9004   case ISD::ATOMIC_LOAD_UMAX:
9005   case ISD::ATOMIC_LOAD_FADD:
9006   case AMDGPUISD::ATOMIC_INC:
9007   case AMDGPUISD::ATOMIC_DEC:
9008   case AMDGPUISD::ATOMIC_LOAD_FMIN:
9009   case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
9010     if (DCI.isBeforeLegalize())
9011       break;
9012     return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
9013   case ISD::AND:
9014     return performAndCombine(N, DCI);
9015   case ISD::OR:
9016     return performOrCombine(N, DCI);
9017   case ISD::XOR:
9018     return performXorCombine(N, DCI);
9019   case ISD::ZERO_EXTEND:
9020     return performZeroExtendCombine(N, DCI);
9021   case AMDGPUISD::FP_CLASS:
9022     return performClassCombine(N, DCI);
9023   case ISD::FCANONICALIZE:
9024     return performFCanonicalizeCombine(N, DCI);
9025   case AMDGPUISD::RCP:
9026     return performRcpCombine(N, DCI);
9027   case AMDGPUISD::FRACT:
9028   case AMDGPUISD::RSQ:
9029   case AMDGPUISD::RCP_LEGACY:
9030   case AMDGPUISD::RSQ_LEGACY:
9031   case AMDGPUISD::RCP_IFLAG:
9032   case AMDGPUISD::RSQ_CLAMP:
9033   case AMDGPUISD::LDEXP: {
9034     SDValue Src = N->getOperand(0);
9035     if (Src.isUndef())
9036       return Src;
9037     break;
9038   }
9039   case ISD::SINT_TO_FP:
9040   case ISD::UINT_TO_FP:
9041     return performUCharToFloatCombine(N, DCI);
9042   case AMDGPUISD::CVT_F32_UBYTE0:
9043   case AMDGPUISD::CVT_F32_UBYTE1:
9044   case AMDGPUISD::CVT_F32_UBYTE2:
9045   case AMDGPUISD::CVT_F32_UBYTE3:
9046     return performCvtF32UByteNCombine(N, DCI);
9047   case AMDGPUISD::FMED3:
9048     return performFMed3Combine(N, DCI);
9049   case AMDGPUISD::CVT_PKRTZ_F16_F32:
9050     return performCvtPkRTZCombine(N, DCI);
9051   case AMDGPUISD::CLAMP:
9052     return performClampCombine(N, DCI);
9053   case ISD::SCALAR_TO_VECTOR: {
9054     SelectionDAG &DAG = DCI.DAG;
9055     EVT VT = N->getValueType(0);
9056 
9057     // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
9058     if (VT == MVT::v2i16 || VT == MVT::v2f16) {
9059       SDLoc SL(N);
9060       SDValue Src = N->getOperand(0);
9061       EVT EltVT = Src.getValueType();
9062       if (EltVT == MVT::f16)
9063         Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
9064 
9065       SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
9066       return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
9067     }
9068 
9069     break;
9070   }
9071   case ISD::EXTRACT_VECTOR_ELT:
9072     return performExtractVectorEltCombine(N, DCI);
9073   case ISD::INSERT_VECTOR_ELT:
9074     return performInsertVectorEltCombine(N, DCI);
9075   }
9076   return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
9077 }
9078 
9079 /// Helper function for adjustWritemask
9080 static unsigned SubIdx2Lane(unsigned Idx) {
9081   switch (Idx) {
9082   default: return 0;
9083   case AMDGPU::sub0: return 0;
9084   case AMDGPU::sub1: return 1;
9085   case AMDGPU::sub2: return 2;
9086   case AMDGPU::sub3: return 3;
9087   case AMDGPU::sub4: return 4; // Possible with TFE/LWE
9088   }
9089 }
9090 
9091 /// Adjust the writemask of MIMG instructions
9092 SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
9093                                           SelectionDAG &DAG) const {
9094   unsigned Opcode = Node->getMachineOpcode();
9095 
9096   // Subtract 1 because the vdata output is not a MachineSDNode operand.
9097   int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1;
9098   if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
9099     return Node; // not implemented for D16
9100 
9101   SDNode *Users[5] = { nullptr };
9102   unsigned Lane = 0;
9103   unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
9104   unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
9105   unsigned NewDmask = 0;
9106   unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1;
9107   unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1;
9108   bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) ||
9109                   Node->getConstantOperandVal(LWEIdx)) ? 1 : 0;
9110   unsigned TFCLane = 0;
9111   bool HasChain = Node->getNumValues() > 1;
9112 
9113   if (OldDmask == 0) {
9114     // These are folded out, but on the chance it happens don't assert.
9115     return Node;
9116   }
9117 
9118   unsigned OldBitsSet = countPopulation(OldDmask);
9119   // Work out which is the TFE/LWE lane if that is enabled.
9120   if (UsesTFC) {
9121     TFCLane = OldBitsSet;
9122   }
9123 
9124   // Try to figure out the used register components
9125   for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
9126        I != E; ++I) {
9127 
9128     // Don't look at users of the chain.
9129     if (I.getUse().getResNo() != 0)
9130       continue;
9131 
9132     // Abort if we can't understand the usage
9133     if (!I->isMachineOpcode() ||
9134         I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
9135       return Node;
9136 
9137     // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
9138     // Note that subregs are packed, i.e. Lane==0 is the first bit set
9139     // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
9140     // set, etc.
9141     Lane = SubIdx2Lane(I->getConstantOperandVal(1));
9142 
9143     // Check if the use is for the TFE/LWE generated result at VGPRn+1.
9144     if (UsesTFC && Lane == TFCLane) {
9145       Users[Lane] = *I;
9146     } else {
9147       // Set which texture component corresponds to the lane.
9148       unsigned Comp;
9149       for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) {
9150         Comp = countTrailingZeros(Dmask);
9151         Dmask &= ~(1 << Comp);
9152       }
9153 
9154       // Abort if we have more than one user per component.
9155       if (Users[Lane])
9156         return Node;
9157 
9158       Users[Lane] = *I;
9159       NewDmask |= 1 << Comp;
9160     }
9161   }
9162 
9163   // Don't allow 0 dmask, as hardware assumes one channel enabled.
9164   bool NoChannels = !NewDmask;
9165   if (NoChannels) {
9166     // If the original dmask has one channel - then nothing to do
9167     if (OldBitsSet == 1)
9168       return Node;
9169     // Use an arbitrary dmask - required for the instruction to work
9170     NewDmask = 1;
9171   }
9172   // Abort if there's no change
9173   if (NewDmask == OldDmask)
9174     return Node;
9175 
9176   unsigned BitsSet = countPopulation(NewDmask);
9177 
9178   // Check for TFE or LWE - increase the number of channels by one to account
9179   // for the extra return value
9180   // This will need adjustment for D16 if this is also included in
9181   // adjustWriteMask (this function) but at present D16 are excluded.
9182   unsigned NewChannels = BitsSet + UsesTFC;
9183 
9184   int NewOpcode =
9185       AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels);
9186   assert(NewOpcode != -1 &&
9187          NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
9188          "failed to find equivalent MIMG op");
9189 
9190   // Adjust the writemask in the node
9191   SmallVector<SDValue, 12> Ops;
9192   Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
9193   Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
9194   Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
9195 
9196   MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
9197 
9198   MVT ResultVT = NewChannels == 1 ?
9199     SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 :
9200                            NewChannels == 5 ? 8 : NewChannels);
9201   SDVTList NewVTList = HasChain ?
9202     DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
9203 
9204 
9205   MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
9206                                               NewVTList, Ops);
9207 
9208   if (HasChain) {
9209     // Update chain.
9210     DAG.setNodeMemRefs(NewNode, Node->memoperands());
9211     DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
9212   }
9213 
9214   if (NewChannels == 1) {
9215     assert(Node->hasNUsesOfValue(1, 0));
9216     SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
9217                                       SDLoc(Node), Users[Lane]->getValueType(0),
9218                                       SDValue(NewNode, 0));
9219     DAG.ReplaceAllUsesWith(Users[Lane], Copy);
9220     return nullptr;
9221   }
9222 
9223   // Update the users of the node with the new indices
9224   for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) {
9225     SDNode *User = Users[i];
9226     if (!User) {
9227       // Handle the special case of NoChannels. We set NewDmask to 1 above, but
9228       // Users[0] is still nullptr because channel 0 doesn't really have a use.
9229       if (i || !NoChannels)
9230         continue;
9231     } else {
9232       SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
9233       DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
9234     }
9235 
9236     switch (Idx) {
9237     default: break;
9238     case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
9239     case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
9240     case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
9241     case AMDGPU::sub3: Idx = AMDGPU::sub4; break;
9242     }
9243   }
9244 
9245   DAG.RemoveDeadNode(Node);
9246   return nullptr;
9247 }
9248 
9249 static bool isFrameIndexOp(SDValue Op) {
9250   if (Op.getOpcode() == ISD::AssertZext)
9251     Op = Op.getOperand(0);
9252 
9253   return isa<FrameIndexSDNode>(Op);
9254 }
9255 
9256 /// Legalize target independent instructions (e.g. INSERT_SUBREG)
9257 /// with frame index operands.
9258 /// LLVM assumes that inputs are to these instructions are registers.
9259 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
9260                                                         SelectionDAG &DAG) const {
9261   if (Node->getOpcode() == ISD::CopyToReg) {
9262     RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
9263     SDValue SrcVal = Node->getOperand(2);
9264 
9265     // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
9266     // to try understanding copies to physical registers.
9267     if (SrcVal.getValueType() == MVT::i1 &&
9268         TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
9269       SDLoc SL(Node);
9270       MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
9271       SDValue VReg = DAG.getRegister(
9272         MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
9273 
9274       SDNode *Glued = Node->getGluedNode();
9275       SDValue ToVReg
9276         = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
9277                          SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
9278       SDValue ToResultReg
9279         = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
9280                            VReg, ToVReg.getValue(1));
9281       DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
9282       DAG.RemoveDeadNode(Node);
9283       return ToResultReg.getNode();
9284     }
9285   }
9286 
9287   SmallVector<SDValue, 8> Ops;
9288   for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
9289     if (!isFrameIndexOp(Node->getOperand(i))) {
9290       Ops.push_back(Node->getOperand(i));
9291       continue;
9292     }
9293 
9294     SDLoc DL(Node);
9295     Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
9296                                      Node->getOperand(i).getValueType(),
9297                                      Node->getOperand(i)), 0));
9298   }
9299 
9300   return DAG.UpdateNodeOperands(Node, Ops);
9301 }
9302 
9303 /// Fold the instructions after selecting them.
9304 /// Returns null if users were already updated.
9305 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
9306                                           SelectionDAG &DAG) const {
9307   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
9308   unsigned Opcode = Node->getMachineOpcode();
9309 
9310   if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
9311       !TII->isGather4(Opcode)) {
9312     return adjustWritemask(Node, DAG);
9313   }
9314 
9315   if (Opcode == AMDGPU::INSERT_SUBREG ||
9316       Opcode == AMDGPU::REG_SEQUENCE) {
9317     legalizeTargetIndependentNode(Node, DAG);
9318     return Node;
9319   }
9320 
9321   switch (Opcode) {
9322   case AMDGPU::V_DIV_SCALE_F32:
9323   case AMDGPU::V_DIV_SCALE_F64: {
9324     // Satisfy the operand register constraint when one of the inputs is
9325     // undefined. Ordinarily each undef value will have its own implicit_def of
9326     // a vreg, so force these to use a single register.
9327     SDValue Src0 = Node->getOperand(0);
9328     SDValue Src1 = Node->getOperand(1);
9329     SDValue Src2 = Node->getOperand(2);
9330 
9331     if ((Src0.isMachineOpcode() &&
9332          Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
9333         (Src0 == Src1 || Src0 == Src2))
9334       break;
9335 
9336     MVT VT = Src0.getValueType().getSimpleVT();
9337     const TargetRegisterClass *RC = getRegClassFor(VT);
9338 
9339     MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
9340     SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
9341 
9342     SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
9343                                       UndefReg, Src0, SDValue());
9344 
9345     // src0 must be the same register as src1 or src2, even if the value is
9346     // undefined, so make sure we don't violate this constraint.
9347     if (Src0.isMachineOpcode() &&
9348         Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
9349       if (Src1.isMachineOpcode() &&
9350           Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
9351         Src0 = Src1;
9352       else if (Src2.isMachineOpcode() &&
9353                Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
9354         Src0 = Src2;
9355       else {
9356         assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
9357         Src0 = UndefReg;
9358         Src1 = UndefReg;
9359       }
9360     } else
9361       break;
9362 
9363     SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
9364     for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
9365       Ops.push_back(Node->getOperand(I));
9366 
9367     Ops.push_back(ImpDef.getValue(1));
9368     return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
9369   }
9370   default:
9371     break;
9372   }
9373 
9374   return Node;
9375 }
9376 
9377 /// Assign the register class depending on the number of
9378 /// bits set in the writemask
9379 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9380                                                      SDNode *Node) const {
9381   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
9382 
9383   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
9384 
9385   if (TII->isVOP3(MI.getOpcode())) {
9386     // Make sure constant bus requirements are respected.
9387     TII->legalizeOperandsVOP3(MRI, MI);
9388     return;
9389   }
9390 
9391   // Replace unused atomics with the no return version.
9392   int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
9393   if (NoRetAtomicOp != -1) {
9394     if (!Node->hasAnyUseOfValue(0)) {
9395       MI.setDesc(TII->get(NoRetAtomicOp));
9396       MI.RemoveOperand(0);
9397       return;
9398     }
9399 
9400     // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
9401     // instruction, because the return type of these instructions is a vec2 of
9402     // the memory type, so it can be tied to the input operand.
9403     // This means these instructions always have a use, so we need to add a
9404     // special case to check if the atomic has only one extract_subreg use,
9405     // which itself has no uses.
9406     if ((Node->hasNUsesOfValue(1, 0) &&
9407          Node->use_begin()->isMachineOpcode() &&
9408          Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
9409          !Node->use_begin()->hasAnyUseOfValue(0))) {
9410       unsigned Def = MI.getOperand(0).getReg();
9411 
9412       // Change this into a noret atomic.
9413       MI.setDesc(TII->get(NoRetAtomicOp));
9414       MI.RemoveOperand(0);
9415 
9416       // If we only remove the def operand from the atomic instruction, the
9417       // extract_subreg will be left with a use of a vreg without a def.
9418       // So we need to insert an implicit_def to avoid machine verifier
9419       // errors.
9420       BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
9421               TII->get(AMDGPU::IMPLICIT_DEF), Def);
9422     }
9423     return;
9424   }
9425 }
9426 
9427 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
9428                               uint64_t Val) {
9429   SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
9430   return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
9431 }
9432 
9433 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
9434                                                 const SDLoc &DL,
9435                                                 SDValue Ptr) const {
9436   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
9437 
9438   // Build the half of the subregister with the constants before building the
9439   // full 128-bit register. If we are building multiple resource descriptors,
9440   // this will allow CSEing of the 2-component register.
9441   const SDValue Ops0[] = {
9442     DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
9443     buildSMovImm32(DAG, DL, 0),
9444     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
9445     buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
9446     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
9447   };
9448 
9449   SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
9450                                                 MVT::v2i32, Ops0), 0);
9451 
9452   // Combine the constants and the pointer.
9453   const SDValue Ops1[] = {
9454     DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
9455     Ptr,
9456     DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
9457     SubRegHi,
9458     DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
9459   };
9460 
9461   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
9462 }
9463 
9464 /// Return a resource descriptor with the 'Add TID' bit enabled
9465 ///        The TID (Thread ID) is multiplied by the stride value (bits [61:48]
9466 ///        of the resource descriptor) to create an offset, which is added to
9467 ///        the resource pointer.
9468 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
9469                                            SDValue Ptr, uint32_t RsrcDword1,
9470                                            uint64_t RsrcDword2And3) const {
9471   SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
9472   SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
9473   if (RsrcDword1) {
9474     PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
9475                                      DAG.getConstant(RsrcDword1, DL, MVT::i32)),
9476                     0);
9477   }
9478 
9479   SDValue DataLo = buildSMovImm32(DAG, DL,
9480                                   RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
9481   SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
9482 
9483   const SDValue Ops[] = {
9484     DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
9485     PtrLo,
9486     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
9487     PtrHi,
9488     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
9489     DataLo,
9490     DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
9491     DataHi,
9492     DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
9493   };
9494 
9495   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
9496 }
9497 
9498 //===----------------------------------------------------------------------===//
9499 //                         SI Inline Assembly Support
9500 //===----------------------------------------------------------------------===//
9501 
9502 std::pair<unsigned, const TargetRegisterClass *>
9503 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
9504                                                StringRef Constraint,
9505                                                MVT VT) const {
9506   const TargetRegisterClass *RC = nullptr;
9507   if (Constraint.size() == 1) {
9508     switch (Constraint[0]) {
9509     default:
9510       return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
9511     case 's':
9512     case 'r':
9513       switch (VT.getSizeInBits()) {
9514       default:
9515         return std::make_pair(0U, nullptr);
9516       case 32:
9517       case 16:
9518         RC = &AMDGPU::SReg_32_XM0RegClass;
9519         break;
9520       case 64:
9521         RC = &AMDGPU::SGPR_64RegClass;
9522         break;
9523       case 128:
9524         RC = &AMDGPU::SReg_128RegClass;
9525         break;
9526       case 256:
9527         RC = &AMDGPU::SReg_256RegClass;
9528         break;
9529       case 512:
9530         RC = &AMDGPU::SReg_512RegClass;
9531         break;
9532       }
9533       break;
9534     case 'v':
9535       switch (VT.getSizeInBits()) {
9536       default:
9537         return std::make_pair(0U, nullptr);
9538       case 32:
9539       case 16:
9540         RC = &AMDGPU::VGPR_32RegClass;
9541         break;
9542       case 64:
9543         RC = &AMDGPU::VReg_64RegClass;
9544         break;
9545       case 96:
9546         RC = &AMDGPU::VReg_96RegClass;
9547         break;
9548       case 128:
9549         RC = &AMDGPU::VReg_128RegClass;
9550         break;
9551       case 256:
9552         RC = &AMDGPU::VReg_256RegClass;
9553         break;
9554       case 512:
9555         RC = &AMDGPU::VReg_512RegClass;
9556         break;
9557       }
9558       break;
9559     }
9560     // We actually support i128, i16 and f16 as inline parameters
9561     // even if they are not reported as legal
9562     if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
9563                VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
9564       return std::make_pair(0U, RC);
9565   }
9566 
9567   if (Constraint.size() > 1) {
9568     if (Constraint[1] == 'v') {
9569       RC = &AMDGPU::VGPR_32RegClass;
9570     } else if (Constraint[1] == 's') {
9571       RC = &AMDGPU::SGPR_32RegClass;
9572     }
9573 
9574     if (RC) {
9575       uint32_t Idx;
9576       bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
9577       if (!Failed && Idx < RC->getNumRegs())
9578         return std::make_pair(RC->getRegister(Idx), RC);
9579     }
9580   }
9581   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
9582 }
9583 
9584 SITargetLowering::ConstraintType
9585 SITargetLowering::getConstraintType(StringRef Constraint) const {
9586   if (Constraint.size() == 1) {
9587     switch (Constraint[0]) {
9588     default: break;
9589     case 's':
9590     case 'v':
9591       return C_RegisterClass;
9592     }
9593   }
9594   return TargetLowering::getConstraintType(Constraint);
9595 }
9596 
9597 // Figure out which registers should be reserved for stack access. Only after
9598 // the function is legalized do we know all of the non-spill stack objects or if
9599 // calls are present.
9600 void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
9601   MachineRegisterInfo &MRI = MF.getRegInfo();
9602   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
9603   const MachineFrameInfo &MFI = MF.getFrameInfo();
9604   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
9605 
9606   if (Info->isEntryFunction()) {
9607     // Callable functions have fixed registers used for stack access.
9608     reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
9609   }
9610 
9611   // We have to assume the SP is needed in case there are calls in the function
9612   // during lowering. Calls are only detected after the function is
9613   // lowered. We're about to reserve registers, so don't bother using it if we
9614   // aren't really going to use it.
9615   bool NeedSP = !Info->isEntryFunction() ||
9616     MFI.hasVarSizedObjects() ||
9617     MFI.hasCalls();
9618 
9619   if (NeedSP) {
9620     unsigned ReservedStackPtrOffsetReg = TRI->reservedStackPtrOffsetReg(MF);
9621     Info->setStackPtrOffsetReg(ReservedStackPtrOffsetReg);
9622 
9623     assert(Info->getStackPtrOffsetReg() != Info->getFrameOffsetReg());
9624     assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
9625                                Info->getStackPtrOffsetReg()));
9626     MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
9627   }
9628 
9629   MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
9630   MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
9631   MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
9632                      Info->getScratchWaveOffsetReg());
9633 
9634   Info->limitOccupancy(MF);
9635 
9636   TargetLoweringBase::finalizeLowering(MF);
9637 }
9638 
9639 void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
9640                                                      KnownBits &Known,
9641                                                      const APInt &DemandedElts,
9642                                                      const SelectionDAG &DAG,
9643                                                      unsigned Depth) const {
9644   TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
9645                                                 DAG, Depth);
9646 
9647   if (getSubtarget()->enableHugePrivateBuffer())
9648     return;
9649 
9650   // Technically it may be possible to have a dispatch with a single workitem
9651   // that uses the full private memory size, but that's not really useful. We
9652   // can't use vaddr in MUBUF instructions if we don't know the address
9653   // calculation won't overflow, so assume the sign bit is never set.
9654   Known.Zero.setHighBits(AssumeFrameIndexHighZeroBits);
9655 }
9656 
9657 LLVM_ATTRIBUTE_UNUSED
9658 static bool isCopyFromRegOfInlineAsm(const SDNode *N) {
9659   assert(N->getOpcode() == ISD::CopyFromReg);
9660   do {
9661     // Follow the chain until we find an INLINEASM node.
9662     N = N->getOperand(0).getNode();
9663     if (N->getOpcode() == ISD::INLINEASM ||
9664         N->getOpcode() == ISD::INLINEASM_BR)
9665       return true;
9666   } while (N->getOpcode() == ISD::CopyFromReg);
9667   return false;
9668 }
9669 
9670 bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
9671   FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const
9672 {
9673   switch (N->getOpcode()) {
9674     case ISD::CopyFromReg:
9675     {
9676       const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1));
9677       const MachineFunction * MF = FLI->MF;
9678       const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
9679       const MachineRegisterInfo &MRI = MF->getRegInfo();
9680       const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
9681       unsigned Reg = R->getReg();
9682       if (TRI.isPhysicalRegister(Reg))
9683         return !TRI.isSGPRReg(MRI, Reg);
9684 
9685       if (MRI.isLiveIn(Reg)) {
9686         // workitem.id.x workitem.id.y workitem.id.z
9687         // Any VGPR formal argument is also considered divergent
9688         if (!TRI.isSGPRReg(MRI, Reg))
9689           return true;
9690         // Formal arguments of non-entry functions
9691         // are conservatively considered divergent
9692         else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv()))
9693           return true;
9694         return false;
9695       }
9696       const Value *V = FLI->getValueFromVirtualReg(Reg);
9697       if (V)
9698         return KDA->isDivergent(V);
9699       assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N));
9700       return !TRI.isSGPRReg(MRI, Reg);
9701     }
9702     break;
9703     case ISD::LOAD: {
9704       const LoadSDNode *L = cast<LoadSDNode>(N);
9705       unsigned AS = L->getAddressSpace();
9706       // A flat load may access private memory.
9707       return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
9708     } break;
9709     case ISD::CALLSEQ_END:
9710     return true;
9711     break;
9712     case ISD::INTRINSIC_WO_CHAIN:
9713     {
9714 
9715     }
9716       return AMDGPU::isIntrinsicSourceOfDivergence(
9717       cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
9718     case ISD::INTRINSIC_W_CHAIN:
9719       return AMDGPU::isIntrinsicSourceOfDivergence(
9720       cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
9721     // In some cases intrinsics that are a source of divergence have been
9722     // lowered to AMDGPUISD so we also need to check those too.
9723     case AMDGPUISD::INTERP_MOV:
9724     case AMDGPUISD::INTERP_P1:
9725     case AMDGPUISD::INTERP_P2:
9726       return true;
9727   }
9728   return false;
9729 }
9730 
9731 bool SITargetLowering::denormalsEnabledForType(EVT VT) const {
9732   switch (VT.getScalarType().getSimpleVT().SimpleTy) {
9733   case MVT::f32:
9734     return Subtarget->hasFP32Denormals();
9735   case MVT::f64:
9736     return Subtarget->hasFP64Denormals();
9737   case MVT::f16:
9738     return Subtarget->hasFP16Denormals();
9739   default:
9740     return false;
9741   }
9742 }
9743 
9744 bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
9745                                                     const SelectionDAG &DAG,
9746                                                     bool SNaN,
9747                                                     unsigned Depth) const {
9748   if (Op.getOpcode() == AMDGPUISD::CLAMP) {
9749     if (Subtarget->enableDX10Clamp())
9750       return true; // Clamped to 0.
9751     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
9752   }
9753 
9754   return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG,
9755                                                             SNaN, Depth);
9756 }
9757 
9758 TargetLowering::AtomicExpansionKind
9759 SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
9760   switch (RMW->getOperation()) {
9761   case AtomicRMWInst::FAdd: {
9762     Type *Ty = RMW->getType();
9763 
9764     // We don't have a way to support 16-bit atomics now, so just leave them
9765     // as-is.
9766     if (Ty->isHalfTy())
9767       return AtomicExpansionKind::None;
9768 
9769     if (!Ty->isFloatTy())
9770       return AtomicExpansionKind::CmpXChg;
9771 
9772     // TODO: Do have these for flat. Older targets also had them for buffers.
9773     unsigned AS = RMW->getPointerAddressSpace();
9774     return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ?
9775       AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg;
9776   }
9777   default:
9778     break;
9779   }
9780 
9781   return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
9782 }
9783