1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Custom DAG lowering for SI
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #if defined(_MSC_VER) || defined(__MINGW32__)
15 // Provide M_PI.
16 #define _USE_MATH_DEFINES
17 #endif
18 
19 #include "SIISelLowering.h"
20 #include "AMDGPU.h"
21 #include "AMDGPUSubtarget.h"
22 #include "AMDGPUTargetMachine.h"
23 #include "SIDefines.h"
24 #include "SIInstrInfo.h"
25 #include "SIMachineFunctionInfo.h"
26 #include "SIRegisterInfo.h"
27 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
28 #include "Utils/AMDGPUBaseInfo.h"
29 #include "llvm/ADT/APFloat.h"
30 #include "llvm/ADT/APInt.h"
31 #include "llvm/ADT/ArrayRef.h"
32 #include "llvm/ADT/BitVector.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/CodeGen/Analysis.h"
39 #include "llvm/CodeGen/CallingConvLower.h"
40 #include "llvm/CodeGen/DAGCombine.h"
41 #include "llvm/CodeGen/ISDOpcodes.h"
42 #include "llvm/CodeGen/MachineBasicBlock.h"
43 #include "llvm/CodeGen/MachineFrameInfo.h"
44 #include "llvm/CodeGen/MachineFunction.h"
45 #include "llvm/CodeGen/MachineInstr.h"
46 #include "llvm/CodeGen/MachineInstrBuilder.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineModuleInfo.h"
49 #include "llvm/CodeGen/MachineOperand.h"
50 #include "llvm/CodeGen/MachineRegisterInfo.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetCallingConv.h"
54 #include "llvm/CodeGen/TargetRegisterInfo.h"
55 #include "llvm/CodeGen/ValueTypes.h"
56 #include "llvm/IR/Constants.h"
57 #include "llvm/IR/DataLayout.h"
58 #include "llvm/IR/DebugLoc.h"
59 #include "llvm/IR/DerivedTypes.h"
60 #include "llvm/IR/DiagnosticInfo.h"
61 #include "llvm/IR/Function.h"
62 #include "llvm/IR/GlobalValue.h"
63 #include "llvm/IR/InstrTypes.h"
64 #include "llvm/IR/Instruction.h"
65 #include "llvm/IR/Instructions.h"
66 #include "llvm/IR/IntrinsicInst.h"
67 #include "llvm/IR/Type.h"
68 #include "llvm/Support/Casting.h"
69 #include "llvm/Support/CodeGen.h"
70 #include "llvm/Support/CommandLine.h"
71 #include "llvm/Support/Compiler.h"
72 #include "llvm/Support/ErrorHandling.h"
73 #include "llvm/Support/KnownBits.h"
74 #include "llvm/Support/MachineValueType.h"
75 #include "llvm/Support/MathExtras.h"
76 #include "llvm/Target/TargetOptions.h"
77 #include <cassert>
78 #include <cmath>
79 #include <cstdint>
80 #include <iterator>
81 #include <tuple>
82 #include <utility>
83 #include <vector>
84 
85 using namespace llvm;
86 
87 #define DEBUG_TYPE "si-lower"
88 
89 STATISTIC(NumTailCalls, "Number of tail calls");
90 
91 static cl::opt<bool> EnableVGPRIndexMode(
92   "amdgpu-vgpr-index-mode",
93   cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
94   cl::init(false));
95 
96 static cl::opt<bool> DisableLoopAlignment(
97   "amdgpu-disable-loop-alignment",
98   cl::desc("Do not align and prefetch loops"),
99   cl::init(false));
100 
101 static unsigned findFirstFreeSGPR(CCState &CCInfo) {
102   unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
103   for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
104     if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
105       return AMDGPU::SGPR0 + Reg;
106     }
107   }
108   llvm_unreachable("Cannot allocate sgpr");
109 }
110 
111 SITargetLowering::SITargetLowering(const TargetMachine &TM,
112                                    const GCNSubtarget &STI)
113     : AMDGPUTargetLowering(TM, STI),
114       Subtarget(&STI) {
115   addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
116   addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
117 
118   addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
119   addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
120 
121   addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
122   addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
123   addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
124 
125   addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
126   addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass);
127 
128   addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
129   addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
130 
131   addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
132   addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
133 
134   addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
135   addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass);
136 
137   addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
138   addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
139 
140   addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
141   addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
142 
143   if (Subtarget->has16BitInsts()) {
144     addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
145     addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
146 
147     // Unless there are also VOP3P operations, not operations are really legal.
148     addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
149     addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
150     addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
151     addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
152   }
153 
154   if (Subtarget->hasMAIInsts()) {
155     addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass);
156     addRegisterClass(MVT::v32f32, &AMDGPU::VReg_1024RegClass);
157   }
158 
159   computeRegisterProperties(Subtarget->getRegisterInfo());
160 
161   // We need to custom lower vector stores from local memory
162   setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
163   setOperationAction(ISD::LOAD, MVT::v3i32, Custom);
164   setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
165   setOperationAction(ISD::LOAD, MVT::v5i32, Custom);
166   setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
167   setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
168   setOperationAction(ISD::LOAD, MVT::i1, Custom);
169   setOperationAction(ISD::LOAD, MVT::v32i32, Custom);
170 
171   setOperationAction(ISD::STORE, MVT::v2i32, Custom);
172   setOperationAction(ISD::STORE, MVT::v3i32, Custom);
173   setOperationAction(ISD::STORE, MVT::v4i32, Custom);
174   setOperationAction(ISD::STORE, MVT::v5i32, Custom);
175   setOperationAction(ISD::STORE, MVT::v8i32, Custom);
176   setOperationAction(ISD::STORE, MVT::v16i32, Custom);
177   setOperationAction(ISD::STORE, MVT::i1, Custom);
178   setOperationAction(ISD::STORE, MVT::v32i32, Custom);
179 
180   setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
181   setTruncStoreAction(MVT::v3i32, MVT::v3i16, Expand);
182   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
183   setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
184   setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
185   setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
186   setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
187   setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
188   setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
189   setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
190   setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
191 
192   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
193   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
194 
195   setOperationAction(ISD::SELECT, MVT::i1, Promote);
196   setOperationAction(ISD::SELECT, MVT::i64, Custom);
197   setOperationAction(ISD::SELECT, MVT::f64, Promote);
198   AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
199 
200   setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
201   setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
202   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
203   setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
204   setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
205 
206   setOperationAction(ISD::SETCC, MVT::i1, Promote);
207   setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
208   setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
209   AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
210 
211   setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
212   setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
213 
214   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
215   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
216   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
217   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
218   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
219   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v3i16, Custom);
220   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
221   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
222 
223   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
224   setOperationAction(ISD::BR_CC, MVT::i1, Expand);
225   setOperationAction(ISD::BR_CC, MVT::i32, Expand);
226   setOperationAction(ISD::BR_CC, MVT::i64, Expand);
227   setOperationAction(ISD::BR_CC, MVT::f32, Expand);
228   setOperationAction(ISD::BR_CC, MVT::f64, Expand);
229 
230   setOperationAction(ISD::UADDO, MVT::i32, Legal);
231   setOperationAction(ISD::USUBO, MVT::i32, Legal);
232 
233   setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
234   setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
235 
236   setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
237   setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
238   setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
239 
240 #if 0
241   setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
242   setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
243 #endif
244 
245   // We only support LOAD/STORE and vector manipulation ops for vectors
246   // with > 4 elements.
247   for (MVT VT : { MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
248                   MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16,
249                   MVT::v32i32, MVT::v32f32 }) {
250     for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
251       switch (Op) {
252       case ISD::LOAD:
253       case ISD::STORE:
254       case ISD::BUILD_VECTOR:
255       case ISD::BITCAST:
256       case ISD::EXTRACT_VECTOR_ELT:
257       case ISD::INSERT_VECTOR_ELT:
258       case ISD::INSERT_SUBVECTOR:
259       case ISD::EXTRACT_SUBVECTOR:
260       case ISD::SCALAR_TO_VECTOR:
261         break;
262       case ISD::CONCAT_VECTORS:
263         setOperationAction(Op, VT, Custom);
264         break;
265       default:
266         setOperationAction(Op, VT, Expand);
267         break;
268       }
269     }
270   }
271 
272   setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
273 
274   // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
275   // is expanded to avoid having two separate loops in case the index is a VGPR.
276 
277   // Most operations are naturally 32-bit vector operations. We only support
278   // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
279   for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
280     setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
281     AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
282 
283     setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
284     AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
285 
286     setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
287     AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
288 
289     setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
290     AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
291   }
292 
293   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
294   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
295   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
296   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
297 
298   setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom);
299   setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
300 
301   // Avoid stack access for these.
302   // TODO: Generalize to more vector types.
303   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
304   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
305   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
306   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
307 
308   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
309   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
310   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom);
311   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom);
312   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom);
313 
314   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom);
315   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
316   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom);
317 
318   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom);
319   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom);
320   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
321   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
322 
323   // Deal with vec3 vector operations when widened to vec4.
324   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom);
325   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom);
326   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom);
327   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom);
328 
329   // Deal with vec5 vector operations when widened to vec8.
330   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom);
331   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom);
332   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom);
333   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom);
334 
335   // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
336   // and output demarshalling
337   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
338   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
339 
340   // We can't return success/failure, only the old value,
341   // let LLVM add the comparison
342   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
343   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
344 
345   if (Subtarget->hasFlatAddressSpace()) {
346     setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
347     setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
348   }
349 
350   setOperationAction(ISD::BSWAP, MVT::i32, Legal);
351   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
352 
353   // On SI this is s_memtime and s_memrealtime on VI.
354   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
355   setOperationAction(ISD::TRAP, MVT::Other, Custom);
356   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
357 
358   if (Subtarget->has16BitInsts()) {
359     setOperationAction(ISD::FLOG, MVT::f16, Custom);
360     setOperationAction(ISD::FEXP, MVT::f16, Custom);
361     setOperationAction(ISD::FLOG10, MVT::f16, Custom);
362   }
363 
364   // v_mad_f32 does not support denormals according to some sources.
365   if (!Subtarget->hasFP32Denormals())
366     setOperationAction(ISD::FMAD, MVT::f32, Legal);
367 
368   if (!Subtarget->hasBFI()) {
369     // fcopysign can be done in a single instruction with BFI.
370     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
371     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
372   }
373 
374   if (!Subtarget->hasBCNT(32))
375     setOperationAction(ISD::CTPOP, MVT::i32, Expand);
376 
377   if (!Subtarget->hasBCNT(64))
378     setOperationAction(ISD::CTPOP, MVT::i64, Expand);
379 
380   if (Subtarget->hasFFBH())
381     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
382 
383   if (Subtarget->hasFFBL())
384     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
385 
386   // We only really have 32-bit BFE instructions (and 16-bit on VI).
387   //
388   // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
389   // effort to match them now. We want this to be false for i64 cases when the
390   // extraction isn't restricted to the upper or lower half. Ideally we would
391   // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
392   // span the midpoint are probably relatively rare, so don't worry about them
393   // for now.
394   if (Subtarget->hasBFE())
395     setHasExtractBitsInsn(true);
396 
397   setOperationAction(ISD::FMINNUM, MVT::f32, Custom);
398   setOperationAction(ISD::FMAXNUM, MVT::f32, Custom);
399   setOperationAction(ISD::FMINNUM, MVT::f64, Custom);
400   setOperationAction(ISD::FMAXNUM, MVT::f64, Custom);
401 
402 
403   // These are really only legal for ieee_mode functions. We should be avoiding
404   // them for functions that don't have ieee_mode enabled, so just say they are
405   // legal.
406   setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
407   setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
408   setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
409   setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
410 
411 
412   if (Subtarget->haveRoundOpsF64()) {
413     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
414     setOperationAction(ISD::FCEIL, MVT::f64, Legal);
415     setOperationAction(ISD::FRINT, MVT::f64, Legal);
416   } else {
417     setOperationAction(ISD::FCEIL, MVT::f64, Custom);
418     setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
419     setOperationAction(ISD::FRINT, MVT::f64, Custom);
420     setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
421   }
422 
423   setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
424 
425   setOperationAction(ISD::FSIN, MVT::f32, Custom);
426   setOperationAction(ISD::FCOS, MVT::f32, Custom);
427   setOperationAction(ISD::FDIV, MVT::f32, Custom);
428   setOperationAction(ISD::FDIV, MVT::f64, Custom);
429 
430   if (Subtarget->has16BitInsts()) {
431     setOperationAction(ISD::Constant, MVT::i16, Legal);
432 
433     setOperationAction(ISD::SMIN, MVT::i16, Legal);
434     setOperationAction(ISD::SMAX, MVT::i16, Legal);
435 
436     setOperationAction(ISD::UMIN, MVT::i16, Legal);
437     setOperationAction(ISD::UMAX, MVT::i16, Legal);
438 
439     setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
440     AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
441 
442     setOperationAction(ISD::ROTR, MVT::i16, Promote);
443     setOperationAction(ISD::ROTL, MVT::i16, Promote);
444 
445     setOperationAction(ISD::SDIV, MVT::i16, Promote);
446     setOperationAction(ISD::UDIV, MVT::i16, Promote);
447     setOperationAction(ISD::SREM, MVT::i16, Promote);
448     setOperationAction(ISD::UREM, MVT::i16, Promote);
449 
450     setOperationAction(ISD::BSWAP, MVT::i16, Promote);
451     setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
452 
453     setOperationAction(ISD::CTTZ, MVT::i16, Promote);
454     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
455     setOperationAction(ISD::CTLZ, MVT::i16, Promote);
456     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
457     setOperationAction(ISD::CTPOP, MVT::i16, Promote);
458 
459     setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
460 
461     setOperationAction(ISD::BR_CC, MVT::i16, Expand);
462 
463     setOperationAction(ISD::LOAD, MVT::i16, Custom);
464 
465     setTruncStoreAction(MVT::i64, MVT::i16, Expand);
466 
467     setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
468     AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
469     setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
470     AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
471 
472     setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
473     setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
474     setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
475     setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
476 
477     // F16 - Constant Actions.
478     setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
479 
480     // F16 - Load/Store Actions.
481     setOperationAction(ISD::LOAD, MVT::f16, Promote);
482     AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
483     setOperationAction(ISD::STORE, MVT::f16, Promote);
484     AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
485 
486     // F16 - VOP1 Actions.
487     setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
488     setOperationAction(ISD::FCOS, MVT::f16, Promote);
489     setOperationAction(ISD::FSIN, MVT::f16, Promote);
490     setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
491     setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
492     setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
493     setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
494     setOperationAction(ISD::FROUND, MVT::f16, Custom);
495 
496     // F16 - VOP2 Actions.
497     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
498     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
499 
500     setOperationAction(ISD::FDIV, MVT::f16, Custom);
501 
502     // F16 - VOP3 Actions.
503     setOperationAction(ISD::FMA, MVT::f16, Legal);
504     if (!Subtarget->hasFP16Denormals() && STI.hasMadF16())
505       setOperationAction(ISD::FMAD, MVT::f16, Legal);
506 
507     for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
508       for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
509         switch (Op) {
510         case ISD::LOAD:
511         case ISD::STORE:
512         case ISD::BUILD_VECTOR:
513         case ISD::BITCAST:
514         case ISD::EXTRACT_VECTOR_ELT:
515         case ISD::INSERT_VECTOR_ELT:
516         case ISD::INSERT_SUBVECTOR:
517         case ISD::EXTRACT_SUBVECTOR:
518         case ISD::SCALAR_TO_VECTOR:
519           break;
520         case ISD::CONCAT_VECTORS:
521           setOperationAction(Op, VT, Custom);
522           break;
523         default:
524           setOperationAction(Op, VT, Expand);
525           break;
526         }
527       }
528     }
529 
530     // XXX - Do these do anything? Vector constants turn into build_vector.
531     setOperationAction(ISD::Constant, MVT::v2i16, Legal);
532     setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
533 
534     setOperationAction(ISD::UNDEF, MVT::v2i16, Legal);
535     setOperationAction(ISD::UNDEF, MVT::v2f16, Legal);
536 
537     setOperationAction(ISD::STORE, MVT::v2i16, Promote);
538     AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
539     setOperationAction(ISD::STORE, MVT::v2f16, Promote);
540     AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
541 
542     setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
543     AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
544     setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
545     AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
546 
547     setOperationAction(ISD::AND, MVT::v2i16, Promote);
548     AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
549     setOperationAction(ISD::OR, MVT::v2i16, Promote);
550     AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
551     setOperationAction(ISD::XOR, MVT::v2i16, Promote);
552     AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
553 
554     setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
555     AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
556     setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
557     AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
558 
559     setOperationAction(ISD::STORE, MVT::v4i16, Promote);
560     AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
561     setOperationAction(ISD::STORE, MVT::v4f16, Promote);
562     AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
563 
564     setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
565     setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
566     setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
567     setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
568 
569     setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand);
570     setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand);
571     setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand);
572 
573     if (!Subtarget->hasVOP3PInsts()) {
574       setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
575       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
576     }
577 
578     setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
579     // This isn't really legal, but this avoids the legalizer unrolling it (and
580     // allows matching fneg (fabs x) patterns)
581     setOperationAction(ISD::FABS, MVT::v2f16, Legal);
582 
583     setOperationAction(ISD::FMAXNUM, MVT::f16, Custom);
584     setOperationAction(ISD::FMINNUM, MVT::f16, Custom);
585     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal);
586     setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal);
587 
588     setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom);
589     setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom);
590 
591     setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand);
592     setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand);
593   }
594 
595   if (Subtarget->hasVOP3PInsts()) {
596     setOperationAction(ISD::ADD, MVT::v2i16, Legal);
597     setOperationAction(ISD::SUB, MVT::v2i16, Legal);
598     setOperationAction(ISD::MUL, MVT::v2i16, Legal);
599     setOperationAction(ISD::SHL, MVT::v2i16, Legal);
600     setOperationAction(ISD::SRL, MVT::v2i16, Legal);
601     setOperationAction(ISD::SRA, MVT::v2i16, Legal);
602     setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
603     setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
604     setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
605     setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
606 
607     setOperationAction(ISD::FADD, MVT::v2f16, Legal);
608     setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
609     setOperationAction(ISD::FMA, MVT::v2f16, Legal);
610 
611     setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal);
612     setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal);
613 
614     setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal);
615 
616     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
617     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
618 
619     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f16, Custom);
620     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
621 
622     setOperationAction(ISD::SHL, MVT::v4i16, Custom);
623     setOperationAction(ISD::SRA, MVT::v4i16, Custom);
624     setOperationAction(ISD::SRL, MVT::v4i16, Custom);
625     setOperationAction(ISD::ADD, MVT::v4i16, Custom);
626     setOperationAction(ISD::SUB, MVT::v4i16, Custom);
627     setOperationAction(ISD::MUL, MVT::v4i16, Custom);
628 
629     setOperationAction(ISD::SMIN, MVT::v4i16, Custom);
630     setOperationAction(ISD::SMAX, MVT::v4i16, Custom);
631     setOperationAction(ISD::UMIN, MVT::v4i16, Custom);
632     setOperationAction(ISD::UMAX, MVT::v4i16, Custom);
633 
634     setOperationAction(ISD::FADD, MVT::v4f16, Custom);
635     setOperationAction(ISD::FMUL, MVT::v4f16, Custom);
636     setOperationAction(ISD::FMA, MVT::v4f16, Custom);
637 
638     setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom);
639     setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom);
640 
641     setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom);
642     setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom);
643     setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom);
644 
645     setOperationAction(ISD::FEXP, MVT::v2f16, Custom);
646     setOperationAction(ISD::SELECT, MVT::v4i16, Custom);
647     setOperationAction(ISD::SELECT, MVT::v4f16, Custom);
648   }
649 
650   setOperationAction(ISD::FNEG, MVT::v4f16, Custom);
651   setOperationAction(ISD::FABS, MVT::v4f16, Custom);
652 
653   if (Subtarget->has16BitInsts()) {
654     setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
655     AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
656     setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
657     AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
658   } else {
659     // Legalization hack.
660     setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
661     setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
662 
663     setOperationAction(ISD::FNEG, MVT::v2f16, Custom);
664     setOperationAction(ISD::FABS, MVT::v2f16, Custom);
665   }
666 
667   for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
668     setOperationAction(ISD::SELECT, VT, Custom);
669   }
670 
671   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
672   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
673   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
674   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
675   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom);
676   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
677   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
678 
679   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
680   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2i16, Custom);
681   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
682   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4i16, Custom);
683   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom);
684   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
685   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::f16, Custom);
686   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
687   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
688 
689   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
690   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
691   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
692   setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
693   setOperationAction(ISD::INTRINSIC_VOID, MVT::v4i16, Custom);
694   setOperationAction(ISD::INTRINSIC_VOID, MVT::f16, Custom);
695   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
696   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
697 
698   setTargetDAGCombine(ISD::ADD);
699   setTargetDAGCombine(ISD::ADDCARRY);
700   setTargetDAGCombine(ISD::SUB);
701   setTargetDAGCombine(ISD::SUBCARRY);
702   setTargetDAGCombine(ISD::FADD);
703   setTargetDAGCombine(ISD::FSUB);
704   setTargetDAGCombine(ISD::FMINNUM);
705   setTargetDAGCombine(ISD::FMAXNUM);
706   setTargetDAGCombine(ISD::FMINNUM_IEEE);
707   setTargetDAGCombine(ISD::FMAXNUM_IEEE);
708   setTargetDAGCombine(ISD::FMA);
709   setTargetDAGCombine(ISD::SMIN);
710   setTargetDAGCombine(ISD::SMAX);
711   setTargetDAGCombine(ISD::UMIN);
712   setTargetDAGCombine(ISD::UMAX);
713   setTargetDAGCombine(ISD::SETCC);
714   setTargetDAGCombine(ISD::AND);
715   setTargetDAGCombine(ISD::OR);
716   setTargetDAGCombine(ISD::XOR);
717   setTargetDAGCombine(ISD::SINT_TO_FP);
718   setTargetDAGCombine(ISD::UINT_TO_FP);
719   setTargetDAGCombine(ISD::FCANONICALIZE);
720   setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
721   setTargetDAGCombine(ISD::ZERO_EXTEND);
722   setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
723   setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
724   setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
725 
726   // All memory operations. Some folding on the pointer operand is done to help
727   // matching the constant offsets in the addressing modes.
728   setTargetDAGCombine(ISD::LOAD);
729   setTargetDAGCombine(ISD::STORE);
730   setTargetDAGCombine(ISD::ATOMIC_LOAD);
731   setTargetDAGCombine(ISD::ATOMIC_STORE);
732   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
733   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
734   setTargetDAGCombine(ISD::ATOMIC_SWAP);
735   setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
736   setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
737   setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
738   setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
739   setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
740   setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
741   setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
742   setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
743   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
744   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
745   setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD);
746 
747   setSchedulingPreference(Sched::RegPressure);
748 }
749 
750 const GCNSubtarget *SITargetLowering::getSubtarget() const {
751   return Subtarget;
752 }
753 
754 //===----------------------------------------------------------------------===//
755 // TargetLowering queries
756 //===----------------------------------------------------------------------===//
757 
758 // v_mad_mix* support a conversion from f16 to f32.
759 //
760 // There is only one special case when denormals are enabled we don't currently,
761 // where this is OK to use.
762 bool SITargetLowering::isFPExtFoldable(unsigned Opcode,
763                                            EVT DestVT, EVT SrcVT) const {
764   return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
765           (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
766          DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() &&
767          SrcVT.getScalarType() == MVT::f16;
768 }
769 
770 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
771   // SI has some legal vector types, but no legal vector operations. Say no
772   // shuffles are legal in order to prefer scalarizing some vector operations.
773   return false;
774 }
775 
776 MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
777                                                     CallingConv::ID CC,
778                                                     EVT VT) const {
779   if (CC == CallingConv::AMDGPU_KERNEL)
780     return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
781 
782   if (VT.isVector()) {
783     EVT ScalarVT = VT.getScalarType();
784     unsigned Size = ScalarVT.getSizeInBits();
785     if (Size == 32)
786       return ScalarVT.getSimpleVT();
787 
788     if (Size > 32)
789       return MVT::i32;
790 
791     if (Size == 16 && Subtarget->has16BitInsts())
792       return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
793   } else if (VT.getSizeInBits() > 32)
794     return MVT::i32;
795 
796   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
797 }
798 
799 unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
800                                                          CallingConv::ID CC,
801                                                          EVT VT) const {
802   if (CC == CallingConv::AMDGPU_KERNEL)
803     return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
804 
805   if (VT.isVector()) {
806     unsigned NumElts = VT.getVectorNumElements();
807     EVT ScalarVT = VT.getScalarType();
808     unsigned Size = ScalarVT.getSizeInBits();
809 
810     if (Size == 32)
811       return NumElts;
812 
813     if (Size > 32)
814       return NumElts * ((Size + 31) / 32);
815 
816     if (Size == 16 && Subtarget->has16BitInsts())
817       return (NumElts + 1) / 2;
818   } else if (VT.getSizeInBits() > 32)
819     return (VT.getSizeInBits() + 31) / 32;
820 
821   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
822 }
823 
824 unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
825   LLVMContext &Context, CallingConv::ID CC,
826   EVT VT, EVT &IntermediateVT,
827   unsigned &NumIntermediates, MVT &RegisterVT) const {
828   if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
829     unsigned NumElts = VT.getVectorNumElements();
830     EVT ScalarVT = VT.getScalarType();
831     unsigned Size = ScalarVT.getSizeInBits();
832     if (Size == 32) {
833       RegisterVT = ScalarVT.getSimpleVT();
834       IntermediateVT = RegisterVT;
835       NumIntermediates = NumElts;
836       return NumIntermediates;
837     }
838 
839     if (Size > 32) {
840       RegisterVT = MVT::i32;
841       IntermediateVT = RegisterVT;
842       NumIntermediates = NumElts * ((Size + 31) / 32);
843       return NumIntermediates;
844     }
845 
846     // FIXME: We should fix the ABI to be the same on targets without 16-bit
847     // support, but unless we can properly handle 3-vectors, it will be still be
848     // inconsistent.
849     if (Size == 16 && Subtarget->has16BitInsts()) {
850       RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
851       IntermediateVT = RegisterVT;
852       NumIntermediates = (NumElts + 1) / 2;
853       return NumIntermediates;
854     }
855   }
856 
857   return TargetLowering::getVectorTypeBreakdownForCallingConv(
858     Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
859 }
860 
861 static MVT memVTFromAggregate(Type *Ty) {
862   // Only limited forms of aggregate type currently expected.
863   assert(Ty->isStructTy() && "Expected struct type");
864 
865 
866   Type *ElementType = nullptr;
867   unsigned NumElts;
868   if (Ty->getContainedType(0)->isVectorTy()) {
869     VectorType *VecComponent = cast<VectorType>(Ty->getContainedType(0));
870     ElementType = VecComponent->getElementType();
871     NumElts = VecComponent->getNumElements();
872   } else {
873     ElementType = Ty->getContainedType(0);
874     NumElts = 1;
875   }
876 
877   assert((Ty->getContainedType(1) && Ty->getContainedType(1)->isIntegerTy(32)) && "Expected int32 type");
878 
879   // Calculate the size of the memVT type from the aggregate
880   unsigned Pow2Elts = 0;
881   unsigned ElementSize;
882   switch (ElementType->getTypeID()) {
883     default:
884       llvm_unreachable("Unknown type!");
885     case Type::IntegerTyID:
886       ElementSize = cast<IntegerType>(ElementType)->getBitWidth();
887       break;
888     case Type::HalfTyID:
889       ElementSize = 16;
890       break;
891     case Type::FloatTyID:
892       ElementSize = 32;
893       break;
894   }
895   unsigned AdditionalElts = ElementSize == 16 ? 2 : 1;
896   Pow2Elts = 1 << Log2_32_Ceil(NumElts + AdditionalElts);
897 
898   return MVT::getVectorVT(MVT::getVT(ElementType, false),
899                           Pow2Elts);
900 }
901 
902 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
903                                           const CallInst &CI,
904                                           MachineFunction &MF,
905                                           unsigned IntrID) const {
906   if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
907           AMDGPU::lookupRsrcIntrinsic(IntrID)) {
908     AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
909                                                   (Intrinsic::ID)IntrID);
910     if (Attr.hasFnAttribute(Attribute::ReadNone))
911       return false;
912 
913     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
914 
915     if (RsrcIntr->IsImage) {
916       Info.ptrVal = MFI->getImagePSV(
917         *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
918         CI.getArgOperand(RsrcIntr->RsrcArg));
919       Info.align.reset();
920     } else {
921       Info.ptrVal = MFI->getBufferPSV(
922         *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
923         CI.getArgOperand(RsrcIntr->RsrcArg));
924     }
925 
926     Info.flags = MachineMemOperand::MODereferenceable;
927     if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
928       Info.opc = ISD::INTRINSIC_W_CHAIN;
929       Info.memVT = MVT::getVT(CI.getType(), true);
930       if (Info.memVT == MVT::Other) {
931         // Some intrinsics return an aggregate type - special case to work out
932         // the correct memVT
933         Info.memVT = memVTFromAggregate(CI.getType());
934       }
935       Info.flags |= MachineMemOperand::MOLoad;
936     } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
937       Info.opc = ISD::INTRINSIC_VOID;
938       Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
939       Info.flags |= MachineMemOperand::MOStore;
940     } else {
941       // Atomic
942       Info.opc = ISD::INTRINSIC_W_CHAIN;
943       Info.memVT = MVT::getVT(CI.getType());
944       Info.flags = MachineMemOperand::MOLoad |
945                    MachineMemOperand::MOStore |
946                    MachineMemOperand::MODereferenceable;
947 
948       // XXX - Should this be volatile without known ordering?
949       Info.flags |= MachineMemOperand::MOVolatile;
950     }
951     return true;
952   }
953 
954   switch (IntrID) {
955   case Intrinsic::amdgcn_atomic_inc:
956   case Intrinsic::amdgcn_atomic_dec:
957   case Intrinsic::amdgcn_ds_ordered_add:
958   case Intrinsic::amdgcn_ds_ordered_swap:
959   case Intrinsic::amdgcn_ds_fadd:
960   case Intrinsic::amdgcn_ds_fmin:
961   case Intrinsic::amdgcn_ds_fmax: {
962     Info.opc = ISD::INTRINSIC_W_CHAIN;
963     Info.memVT = MVT::getVT(CI.getType());
964     Info.ptrVal = CI.getOperand(0);
965     Info.align.reset();
966     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
967 
968     const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4));
969     if (!Vol->isZero())
970       Info.flags |= MachineMemOperand::MOVolatile;
971 
972     return true;
973   }
974   case Intrinsic::amdgcn_buffer_atomic_fadd: {
975     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
976 
977     Info.opc = ISD::INTRINSIC_VOID;
978     Info.memVT = MVT::getVT(CI.getOperand(0)->getType());
979     Info.ptrVal = MFI->getBufferPSV(
980       *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
981       CI.getArgOperand(1));
982     Info.align.reset();
983     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
984 
985     const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
986     if (!Vol || !Vol->isZero())
987       Info.flags |= MachineMemOperand::MOVolatile;
988 
989     return true;
990   }
991   case Intrinsic::amdgcn_global_atomic_fadd: {
992     Info.opc = ISD::INTRINSIC_VOID;
993     Info.memVT = MVT::getVT(CI.getOperand(0)->getType()
994                             ->getPointerElementType());
995     Info.ptrVal = CI.getOperand(0);
996     Info.align.reset();
997     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
998 
999     return true;
1000   }
1001   case Intrinsic::amdgcn_ds_append:
1002   case Intrinsic::amdgcn_ds_consume: {
1003     Info.opc = ISD::INTRINSIC_W_CHAIN;
1004     Info.memVT = MVT::getVT(CI.getType());
1005     Info.ptrVal = CI.getOperand(0);
1006     Info.align.reset();
1007     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1008 
1009     const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1));
1010     if (!Vol->isZero())
1011       Info.flags |= MachineMemOperand::MOVolatile;
1012 
1013     return true;
1014   }
1015   case Intrinsic::amdgcn_ds_gws_init:
1016   case Intrinsic::amdgcn_ds_gws_barrier:
1017   case Intrinsic::amdgcn_ds_gws_sema_v:
1018   case Intrinsic::amdgcn_ds_gws_sema_br:
1019   case Intrinsic::amdgcn_ds_gws_sema_p:
1020   case Intrinsic::amdgcn_ds_gws_sema_release_all: {
1021     Info.opc = ISD::INTRINSIC_VOID;
1022 
1023     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1024     Info.ptrVal =
1025         MFI->getGWSPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo());
1026 
1027     // This is an abstract access, but we need to specify a type and size.
1028     Info.memVT = MVT::i32;
1029     Info.size = 4;
1030     Info.align = Align(4);
1031 
1032     Info.flags = MachineMemOperand::MOStore;
1033     if (IntrID == Intrinsic::amdgcn_ds_gws_barrier)
1034       Info.flags = MachineMemOperand::MOLoad;
1035     return true;
1036   }
1037   default:
1038     return false;
1039   }
1040 }
1041 
1042 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
1043                                             SmallVectorImpl<Value*> &Ops,
1044                                             Type *&AccessTy) const {
1045   switch (II->getIntrinsicID()) {
1046   case Intrinsic::amdgcn_atomic_inc:
1047   case Intrinsic::amdgcn_atomic_dec:
1048   case Intrinsic::amdgcn_ds_ordered_add:
1049   case Intrinsic::amdgcn_ds_ordered_swap:
1050   case Intrinsic::amdgcn_ds_fadd:
1051   case Intrinsic::amdgcn_ds_fmin:
1052   case Intrinsic::amdgcn_ds_fmax: {
1053     Value *Ptr = II->getArgOperand(0);
1054     AccessTy = II->getType();
1055     Ops.push_back(Ptr);
1056     return true;
1057   }
1058   default:
1059     return false;
1060   }
1061 }
1062 
1063 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
1064   if (!Subtarget->hasFlatInstOffsets()) {
1065     // Flat instructions do not have offsets, and only have the register
1066     // address.
1067     return AM.BaseOffs == 0 && AM.Scale == 0;
1068   }
1069 
1070   // GFX9 added a 13-bit signed offset. When using regular flat instructions,
1071   // the sign bit is ignored and is treated as a 12-bit unsigned offset.
1072 
1073   // GFX10 shrinked signed offset to 12 bits. When using regular flat
1074   // instructions, the sign bit is also ignored and is treated as 11-bit
1075   // unsigned offset.
1076 
1077   if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
1078     return isUInt<11>(AM.BaseOffs) && AM.Scale == 0;
1079 
1080   // Just r + i
1081   return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
1082 }
1083 
1084 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
1085   if (Subtarget->hasFlatGlobalInsts())
1086     return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
1087 
1088   if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
1089       // Assume the we will use FLAT for all global memory accesses
1090       // on VI.
1091       // FIXME: This assumption is currently wrong.  On VI we still use
1092       // MUBUF instructions for the r + i addressing mode.  As currently
1093       // implemented, the MUBUF instructions only work on buffer < 4GB.
1094       // It may be possible to support > 4GB buffers with MUBUF instructions,
1095       // by setting the stride value in the resource descriptor which would
1096       // increase the size limit to (stride * 4GB).  However, this is risky,
1097       // because it has never been validated.
1098     return isLegalFlatAddressingMode(AM);
1099   }
1100 
1101   return isLegalMUBUFAddressingMode(AM);
1102 }
1103 
1104 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
1105   // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
1106   // additionally can do r + r + i with addr64. 32-bit has more addressing
1107   // mode options. Depending on the resource constant, it can also do
1108   // (i64 r0) + (i32 r1) * (i14 i).
1109   //
1110   // Private arrays end up using a scratch buffer most of the time, so also
1111   // assume those use MUBUF instructions. Scratch loads / stores are currently
1112   // implemented as mubuf instructions with offen bit set, so slightly
1113   // different than the normal addr64.
1114   if (!isUInt<12>(AM.BaseOffs))
1115     return false;
1116 
1117   // FIXME: Since we can split immediate into soffset and immediate offset,
1118   // would it make sense to allow any immediate?
1119 
1120   switch (AM.Scale) {
1121   case 0: // r + i or just i, depending on HasBaseReg.
1122     return true;
1123   case 1:
1124     return true; // We have r + r or r + i.
1125   case 2:
1126     if (AM.HasBaseReg) {
1127       // Reject 2 * r + r.
1128       return false;
1129     }
1130 
1131     // Allow 2 * r as r + r
1132     // Or  2 * r + i is allowed as r + r + i.
1133     return true;
1134   default: // Don't allow n * r
1135     return false;
1136   }
1137 }
1138 
1139 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
1140                                              const AddrMode &AM, Type *Ty,
1141                                              unsigned AS, Instruction *I) const {
1142   // No global is ever allowed as a base.
1143   if (AM.BaseGV)
1144     return false;
1145 
1146   if (AS == AMDGPUAS::GLOBAL_ADDRESS)
1147     return isLegalGlobalAddressingMode(AM);
1148 
1149   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
1150       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
1151       AS == AMDGPUAS::BUFFER_FAT_POINTER) {
1152     // If the offset isn't a multiple of 4, it probably isn't going to be
1153     // correctly aligned.
1154     // FIXME: Can we get the real alignment here?
1155     if (AM.BaseOffs % 4 != 0)
1156       return isLegalMUBUFAddressingMode(AM);
1157 
1158     // There are no SMRD extloads, so if we have to do a small type access we
1159     // will use a MUBUF load.
1160     // FIXME?: We also need to do this if unaligned, but we don't know the
1161     // alignment here.
1162     if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
1163       return isLegalGlobalAddressingMode(AM);
1164 
1165     if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
1166       // SMRD instructions have an 8-bit, dword offset on SI.
1167       if (!isUInt<8>(AM.BaseOffs / 4))
1168         return false;
1169     } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
1170       // On CI+, this can also be a 32-bit literal constant offset. If it fits
1171       // in 8-bits, it can use a smaller encoding.
1172       if (!isUInt<32>(AM.BaseOffs / 4))
1173         return false;
1174     } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
1175       // On VI, these use the SMEM format and the offset is 20-bit in bytes.
1176       if (!isUInt<20>(AM.BaseOffs))
1177         return false;
1178     } else
1179       llvm_unreachable("unhandled generation");
1180 
1181     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1182       return true;
1183 
1184     if (AM.Scale == 1 && AM.HasBaseReg)
1185       return true;
1186 
1187     return false;
1188 
1189   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1190     return isLegalMUBUFAddressingMode(AM);
1191   } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
1192              AS == AMDGPUAS::REGION_ADDRESS) {
1193     // Basic, single offset DS instructions allow a 16-bit unsigned immediate
1194     // field.
1195     // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
1196     // an 8-bit dword offset but we don't know the alignment here.
1197     if (!isUInt<16>(AM.BaseOffs))
1198       return false;
1199 
1200     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1201       return true;
1202 
1203     if (AM.Scale == 1 && AM.HasBaseReg)
1204       return true;
1205 
1206     return false;
1207   } else if (AS == AMDGPUAS::FLAT_ADDRESS ||
1208              AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) {
1209     // For an unknown address space, this usually means that this is for some
1210     // reason being used for pure arithmetic, and not based on some addressing
1211     // computation. We don't have instructions that compute pointers with any
1212     // addressing modes, so treat them as having no offset like flat
1213     // instructions.
1214     return isLegalFlatAddressingMode(AM);
1215   } else {
1216     llvm_unreachable("unhandled address space");
1217   }
1218 }
1219 
1220 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1221                                         const SelectionDAG &DAG) const {
1222   if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
1223     return (MemVT.getSizeInBits() <= 4 * 32);
1224   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1225     unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1226     return (MemVT.getSizeInBits() <= MaxPrivateBits);
1227   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
1228     return (MemVT.getSizeInBits() <= 2 * 32);
1229   }
1230   return true;
1231 }
1232 
1233 bool SITargetLowering::allowsMisalignedMemoryAccessesImpl(
1234     unsigned Size, unsigned AddrSpace, unsigned Align,
1235     MachineMemOperand::Flags Flags, bool *IsFast) const {
1236   if (IsFast)
1237     *IsFast = false;
1238 
1239   if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1240       AddrSpace == AMDGPUAS::REGION_ADDRESS) {
1241     // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1242     // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1243     // with adjacent offsets.
1244     bool AlignedBy4 = (Align % 4 == 0);
1245     if (IsFast)
1246       *IsFast = AlignedBy4;
1247 
1248     return AlignedBy4;
1249   }
1250 
1251   // FIXME: We have to be conservative here and assume that flat operations
1252   // will access scratch.  If we had access to the IR function, then we
1253   // could determine if any private memory was used in the function.
1254   if (!Subtarget->hasUnalignedScratchAccess() &&
1255       (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1256        AddrSpace == AMDGPUAS::FLAT_ADDRESS)) {
1257     bool AlignedBy4 = Align >= 4;
1258     if (IsFast)
1259       *IsFast = AlignedBy4;
1260 
1261     return AlignedBy4;
1262   }
1263 
1264   if (Subtarget->hasUnalignedBufferAccess()) {
1265     // If we have an uniform constant load, it still requires using a slow
1266     // buffer instruction if unaligned.
1267     if (IsFast) {
1268       *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
1269                  AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
1270         (Align % 4 == 0) : true;
1271     }
1272 
1273     return true;
1274   }
1275 
1276   // Smaller than dword value must be aligned.
1277   if (Size < 32)
1278     return false;
1279 
1280   // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1281   // byte-address are ignored, thus forcing Dword alignment.
1282   // This applies to private, global, and constant memory.
1283   if (IsFast)
1284     *IsFast = true;
1285 
1286   return Size >= 32 && Align >= 4;
1287 }
1288 
1289 bool SITargetLowering::allowsMisalignedMemoryAccesses(
1290     EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
1291     bool *IsFast) const {
1292   if (IsFast)
1293     *IsFast = false;
1294 
1295   // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
1296   // which isn't a simple VT.
1297   // Until MVT is extended to handle this, simply check for the size and
1298   // rely on the condition below: allow accesses if the size is a multiple of 4.
1299   if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
1300                            VT.getStoreSize() > 16)) {
1301     return false;
1302   }
1303 
1304   return allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AddrSpace,
1305                                             Align, Flags, IsFast);
1306 }
1307 
1308 EVT SITargetLowering::getOptimalMemOpType(
1309     uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
1310     bool ZeroMemset, bool MemcpyStrSrc,
1311     const AttributeList &FuncAttributes) const {
1312   // FIXME: Should account for address space here.
1313 
1314   // The default fallback uses the private pointer size as a guess for a type to
1315   // use. Make sure we switch these to 64-bit accesses.
1316 
1317   if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
1318     return MVT::v4i32;
1319 
1320   if (Size >= 8 && DstAlign >= 4)
1321     return MVT::v2i32;
1322 
1323   // Use the default.
1324   return MVT::Other;
1325 }
1326 
1327 static bool isFlatGlobalAddrSpace(unsigned AS) {
1328   return AS == AMDGPUAS::GLOBAL_ADDRESS ||
1329          AS == AMDGPUAS::FLAT_ADDRESS ||
1330          AS == AMDGPUAS::CONSTANT_ADDRESS ||
1331          AS > AMDGPUAS::MAX_AMDGPU_ADDRESS;
1332 }
1333 
1334 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1335                                            unsigned DestAS) const {
1336   return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
1337 }
1338 
1339 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
1340   const MemSDNode *MemNode = cast<MemSDNode>(N);
1341   const Value *Ptr = MemNode->getMemOperand()->getValue();
1342   const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
1343   return I && I->getMetadata("amdgpu.noclobber");
1344 }
1345 
1346 bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS,
1347                                            unsigned DestAS) const {
1348   // Flat -> private/local is a simple truncate.
1349   // Flat -> global is no-op
1350   if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
1351     return true;
1352 
1353   return isNoopAddrSpaceCast(SrcAS, DestAS);
1354 }
1355 
1356 bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1357   const MemSDNode *MemNode = cast<MemSDNode>(N);
1358 
1359   return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
1360 }
1361 
1362 TargetLoweringBase::LegalizeTypeAction
1363 SITargetLowering::getPreferredVectorAction(MVT VT) const {
1364   int NumElts = VT.getVectorNumElements();
1365   if (NumElts != 1 && VT.getScalarType().bitsLE(MVT::i16))
1366     return VT.isPow2VectorType() ? TypeSplitVector : TypeWidenVector;
1367   return TargetLoweringBase::getPreferredVectorAction(VT);
1368 }
1369 
1370 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1371                                                          Type *Ty) const {
1372   // FIXME: Could be smarter if called for vector constants.
1373   return true;
1374 }
1375 
1376 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
1377   if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1378     switch (Op) {
1379     case ISD::LOAD:
1380     case ISD::STORE:
1381 
1382     // These operations are done with 32-bit instructions anyway.
1383     case ISD::AND:
1384     case ISD::OR:
1385     case ISD::XOR:
1386     case ISD::SELECT:
1387       // TODO: Extensions?
1388       return true;
1389     default:
1390       return false;
1391     }
1392   }
1393 
1394   // SimplifySetCC uses this function to determine whether or not it should
1395   // create setcc with i1 operands.  We don't have instructions for i1 setcc.
1396   if (VT == MVT::i1 && Op == ISD::SETCC)
1397     return false;
1398 
1399   return TargetLowering::isTypeDesirableForOp(Op, VT);
1400 }
1401 
1402 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1403                                                    const SDLoc &SL,
1404                                                    SDValue Chain,
1405                                                    uint64_t Offset) const {
1406   const DataLayout &DL = DAG.getDataLayout();
1407   MachineFunction &MF = DAG.getMachineFunction();
1408   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1409 
1410   const ArgDescriptor *InputPtrReg;
1411   const TargetRegisterClass *RC;
1412 
1413   std::tie(InputPtrReg, RC)
1414     = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1415 
1416   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1417   MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
1418   SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
1419     MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1420 
1421   return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
1422 }
1423 
1424 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1425                                             const SDLoc &SL) const {
1426   uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1427                                                FIRST_IMPLICIT);
1428   return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1429 }
1430 
1431 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1432                                          const SDLoc &SL, SDValue Val,
1433                                          bool Signed,
1434                                          const ISD::InputArg *Arg) const {
1435   // First, if it is a widened vector, narrow it.
1436   if (VT.isVector() &&
1437       VT.getVectorNumElements() != MemVT.getVectorNumElements()) {
1438     EVT NarrowedVT =
1439         EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(),
1440                          VT.getVectorNumElements());
1441     Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val,
1442                       DAG.getConstant(0, SL, MVT::i32));
1443   }
1444 
1445   // Then convert the vector elements or scalar value.
1446   if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1447       VT.bitsLT(MemVT)) {
1448     unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1449     Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1450   }
1451 
1452   if (MemVT.isFloatingPoint())
1453     Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
1454   else if (Signed)
1455     Val = DAG.getSExtOrTrunc(Val, SL, VT);
1456   else
1457     Val = DAG.getZExtOrTrunc(Val, SL, VT);
1458 
1459   return Val;
1460 }
1461 
1462 SDValue SITargetLowering::lowerKernargMemParameter(
1463   SelectionDAG &DAG, EVT VT, EVT MemVT,
1464   const SDLoc &SL, SDValue Chain,
1465   uint64_t Offset, unsigned Align, bool Signed,
1466   const ISD::InputArg *Arg) const {
1467   Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
1468   PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
1469   MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1470 
1471   // Try to avoid using an extload by loading earlier than the argument address,
1472   // and extracting the relevant bits. The load should hopefully be merged with
1473   // the previous argument.
1474   if (MemVT.getStoreSize() < 4 && Align < 4) {
1475     // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
1476     int64_t AlignDownOffset = alignDown(Offset, 4);
1477     int64_t OffsetDiff = Offset - AlignDownOffset;
1478 
1479     EVT IntVT = MemVT.changeTypeToInteger();
1480 
1481     // TODO: If we passed in the base kernel offset we could have a better
1482     // alignment than 4, but we don't really need it.
1483     SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1484     SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1485                                MachineMemOperand::MODereferenceable |
1486                                MachineMemOperand::MOInvariant);
1487 
1488     SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1489     SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1490 
1491     SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1492     ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1493     ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1494 
1495 
1496     return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1497   }
1498 
1499   SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1500   SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
1501                              MachineMemOperand::MODereferenceable |
1502                              MachineMemOperand::MOInvariant);
1503 
1504   SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
1505   return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
1506 }
1507 
1508 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1509                                               const SDLoc &SL, SDValue Chain,
1510                                               const ISD::InputArg &Arg) const {
1511   MachineFunction &MF = DAG.getMachineFunction();
1512   MachineFrameInfo &MFI = MF.getFrameInfo();
1513 
1514   if (Arg.Flags.isByVal()) {
1515     unsigned Size = Arg.Flags.getByValSize();
1516     int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1517     return DAG.getFrameIndex(FrameIdx, MVT::i32);
1518   }
1519 
1520   unsigned ArgOffset = VA.getLocMemOffset();
1521   unsigned ArgSize = VA.getValVT().getStoreSize();
1522 
1523   int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1524 
1525   // Create load nodes to retrieve arguments from the stack.
1526   SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1527   SDValue ArgValue;
1528 
1529   // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1530   ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1531   MVT MemVT = VA.getValVT();
1532 
1533   switch (VA.getLocInfo()) {
1534   default:
1535     break;
1536   case CCValAssign::BCvt:
1537     MemVT = VA.getLocVT();
1538     break;
1539   case CCValAssign::SExt:
1540     ExtType = ISD::SEXTLOAD;
1541     break;
1542   case CCValAssign::ZExt:
1543     ExtType = ISD::ZEXTLOAD;
1544     break;
1545   case CCValAssign::AExt:
1546     ExtType = ISD::EXTLOAD;
1547     break;
1548   }
1549 
1550   ArgValue = DAG.getExtLoad(
1551     ExtType, SL, VA.getLocVT(), Chain, FIN,
1552     MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1553     MemVT);
1554   return ArgValue;
1555 }
1556 
1557 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1558   const SIMachineFunctionInfo &MFI,
1559   EVT VT,
1560   AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1561   const ArgDescriptor *Reg;
1562   const TargetRegisterClass *RC;
1563 
1564   std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1565   return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1566 }
1567 
1568 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1569                                    CallingConv::ID CallConv,
1570                                    ArrayRef<ISD::InputArg> Ins,
1571                                    BitVector &Skipped,
1572                                    FunctionType *FType,
1573                                    SIMachineFunctionInfo *Info) {
1574   for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1575     const ISD::InputArg *Arg = &Ins[I];
1576 
1577     assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1578            "vector type argument should have been split");
1579 
1580     // First check if it's a PS input addr.
1581     if (CallConv == CallingConv::AMDGPU_PS &&
1582         !Arg->Flags.isInReg() && PSInputNum <= 15) {
1583       bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1584 
1585       // Inconveniently only the first part of the split is marked as isSplit,
1586       // so skip to the end. We only want to increment PSInputNum once for the
1587       // entire split argument.
1588       if (Arg->Flags.isSplit()) {
1589         while (!Arg->Flags.isSplitEnd()) {
1590           assert((!Arg->VT.isVector() ||
1591                   Arg->VT.getScalarSizeInBits() == 16) &&
1592                  "unexpected vector split in ps argument type");
1593           if (!SkipArg)
1594             Splits.push_back(*Arg);
1595           Arg = &Ins[++I];
1596         }
1597       }
1598 
1599       if (SkipArg) {
1600         // We can safely skip PS inputs.
1601         Skipped.set(Arg->getOrigArgIndex());
1602         ++PSInputNum;
1603         continue;
1604       }
1605 
1606       Info->markPSInputAllocated(PSInputNum);
1607       if (Arg->Used)
1608         Info->markPSInputEnabled(PSInputNum);
1609 
1610       ++PSInputNum;
1611     }
1612 
1613     Splits.push_back(*Arg);
1614   }
1615 }
1616 
1617 // Allocate special inputs passed in VGPRs.
1618 void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1619                                                       MachineFunction &MF,
1620                                                       const SIRegisterInfo &TRI,
1621                                                       SIMachineFunctionInfo &Info) const {
1622   const LLT S32 = LLT::scalar(32);
1623   MachineRegisterInfo &MRI = MF.getRegInfo();
1624 
1625   if (Info.hasWorkItemIDX()) {
1626     Register Reg = AMDGPU::VGPR0;
1627     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1628 
1629     CCInfo.AllocateReg(Reg);
1630     Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1631   }
1632 
1633   if (Info.hasWorkItemIDY()) {
1634     Register Reg = AMDGPU::VGPR1;
1635     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1636 
1637     CCInfo.AllocateReg(Reg);
1638     Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1639   }
1640 
1641   if (Info.hasWorkItemIDZ()) {
1642     Register Reg = AMDGPU::VGPR2;
1643     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1644 
1645     CCInfo.AllocateReg(Reg);
1646     Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1647   }
1648 }
1649 
1650 // Try to allocate a VGPR at the end of the argument list, or if no argument
1651 // VGPRs are left allocating a stack slot.
1652 // If \p Mask is is given it indicates bitfield position in the register.
1653 // If \p Arg is given use it with new ]p Mask instead of allocating new.
1654 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u,
1655                                          ArgDescriptor Arg = ArgDescriptor()) {
1656   if (Arg.isSet())
1657     return ArgDescriptor::createArg(Arg, Mask);
1658 
1659   ArrayRef<MCPhysReg> ArgVGPRs
1660     = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1661   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1662   if (RegIdx == ArgVGPRs.size()) {
1663     // Spill to stack required.
1664     int64_t Offset = CCInfo.AllocateStack(4, 4);
1665 
1666     return ArgDescriptor::createStack(Offset, Mask);
1667   }
1668 
1669   unsigned Reg = ArgVGPRs[RegIdx];
1670   Reg = CCInfo.AllocateReg(Reg);
1671   assert(Reg != AMDGPU::NoRegister);
1672 
1673   MachineFunction &MF = CCInfo.getMachineFunction();
1674   Register LiveInVReg = MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1675   MF.getRegInfo().setType(LiveInVReg, LLT::scalar(32));
1676   return ArgDescriptor::createRegister(Reg, Mask);
1677 }
1678 
1679 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1680                                              const TargetRegisterClass *RC,
1681                                              unsigned NumArgRegs) {
1682   ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1683   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1684   if (RegIdx == ArgSGPRs.size())
1685     report_fatal_error("ran out of SGPRs for arguments");
1686 
1687   unsigned Reg = ArgSGPRs[RegIdx];
1688   Reg = CCInfo.AllocateReg(Reg);
1689   assert(Reg != AMDGPU::NoRegister);
1690 
1691   MachineFunction &MF = CCInfo.getMachineFunction();
1692   MF.addLiveIn(Reg, RC);
1693   return ArgDescriptor::createRegister(Reg);
1694 }
1695 
1696 static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1697   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1698 }
1699 
1700 static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1701   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1702 }
1703 
1704 void SITargetLowering::allocateSpecialInputVGPRs(CCState &CCInfo,
1705                                                  MachineFunction &MF,
1706                                                  const SIRegisterInfo &TRI,
1707                                                  SIMachineFunctionInfo &Info) const {
1708   const unsigned Mask = 0x3ff;
1709   ArgDescriptor Arg;
1710 
1711   if (Info.hasWorkItemIDX()) {
1712     Arg = allocateVGPR32Input(CCInfo, Mask);
1713     Info.setWorkItemIDX(Arg);
1714   }
1715 
1716   if (Info.hasWorkItemIDY()) {
1717     Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg);
1718     Info.setWorkItemIDY(Arg);
1719   }
1720 
1721   if (Info.hasWorkItemIDZ())
1722     Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg));
1723 }
1724 
1725 void SITargetLowering::allocateSpecialInputSGPRs(
1726   CCState &CCInfo,
1727   MachineFunction &MF,
1728   const SIRegisterInfo &TRI,
1729   SIMachineFunctionInfo &Info) const {
1730   auto &ArgInfo = Info.getArgInfo();
1731 
1732   // TODO: Unify handling with private memory pointers.
1733 
1734   if (Info.hasDispatchPtr())
1735     ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1736 
1737   if (Info.hasQueuePtr())
1738     ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1739 
1740   if (Info.hasKernargSegmentPtr())
1741     ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1742 
1743   if (Info.hasDispatchID())
1744     ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1745 
1746   // flat_scratch_init is not applicable for non-kernel functions.
1747 
1748   if (Info.hasWorkGroupIDX())
1749     ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1750 
1751   if (Info.hasWorkGroupIDY())
1752     ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1753 
1754   if (Info.hasWorkGroupIDZ())
1755     ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
1756 
1757   if (Info.hasImplicitArgPtr())
1758     ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
1759 }
1760 
1761 // Allocate special inputs passed in user SGPRs.
1762 void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo,
1763                                             MachineFunction &MF,
1764                                             const SIRegisterInfo &TRI,
1765                                             SIMachineFunctionInfo &Info) const {
1766   if (Info.hasImplicitBufferPtr()) {
1767     unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1768     MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1769     CCInfo.AllocateReg(ImplicitBufferPtrReg);
1770   }
1771 
1772   // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1773   if (Info.hasPrivateSegmentBuffer()) {
1774     unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1775     MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1776     CCInfo.AllocateReg(PrivateSegmentBufferReg);
1777   }
1778 
1779   if (Info.hasDispatchPtr()) {
1780     unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1781     MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1782     CCInfo.AllocateReg(DispatchPtrReg);
1783   }
1784 
1785   if (Info.hasQueuePtr()) {
1786     unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1787     MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1788     CCInfo.AllocateReg(QueuePtrReg);
1789   }
1790 
1791   if (Info.hasKernargSegmentPtr()) {
1792     MachineRegisterInfo &MRI = MF.getRegInfo();
1793     Register InputPtrReg = Info.addKernargSegmentPtr(TRI);
1794     CCInfo.AllocateReg(InputPtrReg);
1795 
1796     Register VReg = MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1797     MRI.setType(VReg, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
1798   }
1799 
1800   if (Info.hasDispatchID()) {
1801     unsigned DispatchIDReg = Info.addDispatchID(TRI);
1802     MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1803     CCInfo.AllocateReg(DispatchIDReg);
1804   }
1805 
1806   if (Info.hasFlatScratchInit()) {
1807     unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1808     MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1809     CCInfo.AllocateReg(FlatScratchInitReg);
1810   }
1811 
1812   // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1813   // these from the dispatch pointer.
1814 }
1815 
1816 // Allocate special input registers that are initialized per-wave.
1817 void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo,
1818                                            MachineFunction &MF,
1819                                            SIMachineFunctionInfo &Info,
1820                                            CallingConv::ID CallConv,
1821                                            bool IsShader) const {
1822   if (Info.hasWorkGroupIDX()) {
1823     unsigned Reg = Info.addWorkGroupIDX();
1824     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1825     CCInfo.AllocateReg(Reg);
1826   }
1827 
1828   if (Info.hasWorkGroupIDY()) {
1829     unsigned Reg = Info.addWorkGroupIDY();
1830     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1831     CCInfo.AllocateReg(Reg);
1832   }
1833 
1834   if (Info.hasWorkGroupIDZ()) {
1835     unsigned Reg = Info.addWorkGroupIDZ();
1836     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1837     CCInfo.AllocateReg(Reg);
1838   }
1839 
1840   if (Info.hasWorkGroupInfo()) {
1841     unsigned Reg = Info.addWorkGroupInfo();
1842     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1843     CCInfo.AllocateReg(Reg);
1844   }
1845 
1846   if (Info.hasPrivateSegmentWaveByteOffset()) {
1847     // Scratch wave offset passed in system SGPR.
1848     unsigned PrivateSegmentWaveByteOffsetReg;
1849 
1850     if (IsShader) {
1851       PrivateSegmentWaveByteOffsetReg =
1852         Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1853 
1854       // This is true if the scratch wave byte offset doesn't have a fixed
1855       // location.
1856       if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1857         PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1858         Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1859       }
1860     } else
1861       PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1862 
1863     MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1864     CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1865   }
1866 }
1867 
1868 static void reservePrivateMemoryRegs(const TargetMachine &TM,
1869                                      MachineFunction &MF,
1870                                      const SIRegisterInfo &TRI,
1871                                      SIMachineFunctionInfo &Info) {
1872   // Now that we've figured out where the scratch register inputs are, see if
1873   // should reserve the arguments and use them directly.
1874   MachineFrameInfo &MFI = MF.getFrameInfo();
1875   bool HasStackObjects = MFI.hasStackObjects();
1876   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1877 
1878   // Record that we know we have non-spill stack objects so we don't need to
1879   // check all stack objects later.
1880   if (HasStackObjects)
1881     Info.setHasNonSpillStackObjects(true);
1882 
1883   // Everything live out of a block is spilled with fast regalloc, so it's
1884   // almost certain that spilling will be required.
1885   if (TM.getOptLevel() == CodeGenOpt::None)
1886     HasStackObjects = true;
1887 
1888   // For now assume stack access is needed in any callee functions, so we need
1889   // the scratch registers to pass in.
1890   bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1891 
1892   if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) {
1893     // If we have stack objects, we unquestionably need the private buffer
1894     // resource. For the Code Object V2 ABI, this will be the first 4 user
1895     // SGPR inputs. We can reserve those and use them directly.
1896 
1897     Register PrivateSegmentBufferReg =
1898         Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
1899     Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1900   } else {
1901     unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1902     // We tentatively reserve the last registers (skipping the last registers
1903     // which may contain VCC, FLAT_SCR, and XNACK). After register allocation,
1904     // we'll replace these with the ones immediately after those which were
1905     // really allocated. In the prologue copies will be inserted from the
1906     // argument to these reserved registers.
1907 
1908     // Without HSA, relocations are used for the scratch pointer and the
1909     // buffer resource setup is always inserted in the prologue. Scratch wave
1910     // offset is still in an input SGPR.
1911     Info.setScratchRSrcReg(ReservedBufferReg);
1912   }
1913 
1914   // hasFP should be accurate for kernels even before the frame is finalized.
1915   if (ST.getFrameLowering()->hasFP(MF)) {
1916     MachineRegisterInfo &MRI = MF.getRegInfo();
1917 
1918     // Try to use s32 as the SP, but move it if it would interfere with input
1919     // arguments. This won't work with calls though.
1920     //
1921     // FIXME: Move SP to avoid any possible inputs, or find a way to spill input
1922     // registers.
1923     if (!MRI.isLiveIn(AMDGPU::SGPR32)) {
1924       Info.setStackPtrOffsetReg(AMDGPU::SGPR32);
1925     } else {
1926       assert(AMDGPU::isShader(MF.getFunction().getCallingConv()));
1927 
1928       if (MFI.hasCalls())
1929         report_fatal_error("call in graphics shader with too many input SGPRs");
1930 
1931       for (unsigned Reg : AMDGPU::SGPR_32RegClass) {
1932         if (!MRI.isLiveIn(Reg)) {
1933           Info.setStackPtrOffsetReg(Reg);
1934           break;
1935         }
1936       }
1937 
1938       if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG)
1939         report_fatal_error("failed to find register for SP");
1940     }
1941 
1942     if (MFI.hasCalls()) {
1943       Info.setScratchWaveOffsetReg(AMDGPU::SGPR33);
1944       Info.setFrameOffsetReg(AMDGPU::SGPR33);
1945     } else {
1946       unsigned ReservedOffsetReg =
1947         TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1948       Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1949       Info.setFrameOffsetReg(ReservedOffsetReg);
1950     }
1951   } else if (RequiresStackAccess) {
1952     assert(!MFI.hasCalls());
1953     // We know there are accesses and they will be done relative to SP, so just
1954     // pin it to the input.
1955     //
1956     // FIXME: Should not do this if inline asm is reading/writing these
1957     // registers.
1958     Register PreloadedSP = Info.getPreloadedReg(
1959         AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1960 
1961     Info.setStackPtrOffsetReg(PreloadedSP);
1962     Info.setScratchWaveOffsetReg(PreloadedSP);
1963     Info.setFrameOffsetReg(PreloadedSP);
1964   } else {
1965     assert(!MFI.hasCalls());
1966 
1967     // There may not be stack access at all. There may still be spills, or
1968     // access of a constant pointer (in which cases an extra copy will be
1969     // emitted in the prolog).
1970     unsigned ReservedOffsetReg
1971       = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1972     Info.setStackPtrOffsetReg(ReservedOffsetReg);
1973     Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1974     Info.setFrameOffsetReg(ReservedOffsetReg);
1975   }
1976 }
1977 
1978 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1979   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1980   return !Info->isEntryFunction();
1981 }
1982 
1983 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1984 
1985 }
1986 
1987 void SITargetLowering::insertCopiesSplitCSR(
1988   MachineBasicBlock *Entry,
1989   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1990   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1991 
1992   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1993   if (!IStart)
1994     return;
1995 
1996   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1997   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1998   MachineBasicBlock::iterator MBBI = Entry->begin();
1999   for (const MCPhysReg *I = IStart; *I; ++I) {
2000     const TargetRegisterClass *RC = nullptr;
2001     if (AMDGPU::SReg_64RegClass.contains(*I))
2002       RC = &AMDGPU::SGPR_64RegClass;
2003     else if (AMDGPU::SReg_32RegClass.contains(*I))
2004       RC = &AMDGPU::SGPR_32RegClass;
2005     else
2006       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2007 
2008     Register NewVR = MRI->createVirtualRegister(RC);
2009     // Create copy from CSR to a virtual register.
2010     Entry->addLiveIn(*I);
2011     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
2012       .addReg(*I);
2013 
2014     // Insert the copy-back instructions right before the terminator.
2015     for (auto *Exit : Exits)
2016       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
2017               TII->get(TargetOpcode::COPY), *I)
2018         .addReg(NewVR);
2019   }
2020 }
2021 
2022 SDValue SITargetLowering::LowerFormalArguments(
2023     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
2024     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2025     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2026   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2027 
2028   MachineFunction &MF = DAG.getMachineFunction();
2029   const Function &Fn = MF.getFunction();
2030   FunctionType *FType = MF.getFunction().getFunctionType();
2031   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2032 
2033   if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
2034     DiagnosticInfoUnsupported NoGraphicsHSA(
2035         Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
2036     DAG.getContext()->diagnose(NoGraphicsHSA);
2037     return DAG.getEntryNode();
2038   }
2039 
2040   SmallVector<ISD::InputArg, 16> Splits;
2041   SmallVector<CCValAssign, 16> ArgLocs;
2042   BitVector Skipped(Ins.size());
2043   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
2044                  *DAG.getContext());
2045 
2046   bool IsShader = AMDGPU::isShader(CallConv);
2047   bool IsKernel = AMDGPU::isKernel(CallConv);
2048   bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
2049 
2050   if (IsShader) {
2051     processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
2052 
2053     // At least one interpolation mode must be enabled or else the GPU will
2054     // hang.
2055     //
2056     // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
2057     // set PSInputAddr, the user wants to enable some bits after the compilation
2058     // based on run-time states. Since we can't know what the final PSInputEna
2059     // will look like, so we shouldn't do anything here and the user should take
2060     // responsibility for the correct programming.
2061     //
2062     // Otherwise, the following restrictions apply:
2063     // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
2064     // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
2065     //   enabled too.
2066     if (CallConv == CallingConv::AMDGPU_PS) {
2067       if ((Info->getPSInputAddr() & 0x7F) == 0 ||
2068            ((Info->getPSInputAddr() & 0xF) == 0 &&
2069             Info->isPSInputAllocated(11))) {
2070         CCInfo.AllocateReg(AMDGPU::VGPR0);
2071         CCInfo.AllocateReg(AMDGPU::VGPR1);
2072         Info->markPSInputAllocated(0);
2073         Info->markPSInputEnabled(0);
2074       }
2075       if (Subtarget->isAmdPalOS()) {
2076         // For isAmdPalOS, the user does not enable some bits after compilation
2077         // based on run-time states; the register values being generated here are
2078         // the final ones set in hardware. Therefore we need to apply the
2079         // workaround to PSInputAddr and PSInputEnable together.  (The case where
2080         // a bit is set in PSInputAddr but not PSInputEnable is where the
2081         // frontend set up an input arg for a particular interpolation mode, but
2082         // nothing uses that input arg. Really we should have an earlier pass
2083         // that removes such an arg.)
2084         unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
2085         if ((PsInputBits & 0x7F) == 0 ||
2086             ((PsInputBits & 0xF) == 0 &&
2087              (PsInputBits >> 11 & 1)))
2088           Info->markPSInputEnabled(
2089               countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
2090       }
2091     }
2092 
2093     assert(!Info->hasDispatchPtr() &&
2094            !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
2095            !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
2096            !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
2097            !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
2098            !Info->hasWorkItemIDZ());
2099   } else if (IsKernel) {
2100     assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
2101   } else {
2102     Splits.append(Ins.begin(), Ins.end());
2103   }
2104 
2105   if (IsEntryFunc) {
2106     allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
2107     allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
2108   }
2109 
2110   if (IsKernel) {
2111     analyzeFormalArgumentsCompute(CCInfo, Ins);
2112   } else {
2113     CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
2114     CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
2115   }
2116 
2117   SmallVector<SDValue, 16> Chains;
2118 
2119   // FIXME: This is the minimum kernel argument alignment. We should improve
2120   // this to the maximum alignment of the arguments.
2121   //
2122   // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
2123   // kern arg offset.
2124   const unsigned KernelArgBaseAlign = 16;
2125 
2126    for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
2127     const ISD::InputArg &Arg = Ins[i];
2128     if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
2129       InVals.push_back(DAG.getUNDEF(Arg.VT));
2130       continue;
2131     }
2132 
2133     CCValAssign &VA = ArgLocs[ArgIdx++];
2134     MVT VT = VA.getLocVT();
2135 
2136     if (IsEntryFunc && VA.isMemLoc()) {
2137       VT = Ins[i].VT;
2138       EVT MemVT = VA.getLocVT();
2139 
2140       const uint64_t Offset = VA.getLocMemOffset();
2141       unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
2142 
2143       SDValue Arg = lowerKernargMemParameter(
2144         DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
2145       Chains.push_back(Arg.getValue(1));
2146 
2147       auto *ParamTy =
2148         dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
2149       if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
2150           ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
2151                       ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) {
2152         // On SI local pointers are just offsets into LDS, so they are always
2153         // less than 16-bits.  On CI and newer they could potentially be
2154         // real pointers, so we can't guarantee their size.
2155         Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
2156                           DAG.getValueType(MVT::i16));
2157       }
2158 
2159       InVals.push_back(Arg);
2160       continue;
2161     } else if (!IsEntryFunc && VA.isMemLoc()) {
2162       SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
2163       InVals.push_back(Val);
2164       if (!Arg.Flags.isByVal())
2165         Chains.push_back(Val.getValue(1));
2166       continue;
2167     }
2168 
2169     assert(VA.isRegLoc() && "Parameter must be in a register!");
2170 
2171     Register Reg = VA.getLocReg();
2172     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2173     EVT ValVT = VA.getValVT();
2174 
2175     Reg = MF.addLiveIn(Reg, RC);
2176     SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
2177 
2178     if (Arg.Flags.isSRet()) {
2179       // The return object should be reasonably addressable.
2180 
2181       // FIXME: This helps when the return is a real sret. If it is a
2182       // automatically inserted sret (i.e. CanLowerReturn returns false), an
2183       // extra copy is inserted in SelectionDAGBuilder which obscures this.
2184       unsigned NumBits
2185         = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex();
2186       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2187         DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
2188     }
2189 
2190     // If this is an 8 or 16-bit value, it is really passed promoted
2191     // to 32 bits. Insert an assert[sz]ext to capture this, then
2192     // truncate to the right size.
2193     switch (VA.getLocInfo()) {
2194     case CCValAssign::Full:
2195       break;
2196     case CCValAssign::BCvt:
2197       Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
2198       break;
2199     case CCValAssign::SExt:
2200       Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
2201                         DAG.getValueType(ValVT));
2202       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2203       break;
2204     case CCValAssign::ZExt:
2205       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2206                         DAG.getValueType(ValVT));
2207       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2208       break;
2209     case CCValAssign::AExt:
2210       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2211       break;
2212     default:
2213       llvm_unreachable("Unknown loc info!");
2214     }
2215 
2216     InVals.push_back(Val);
2217   }
2218 
2219   if (!IsEntryFunc) {
2220     // Special inputs come after user arguments.
2221     allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
2222   }
2223 
2224   // Start adding system SGPRs.
2225   if (IsEntryFunc) {
2226     allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
2227   } else {
2228     CCInfo.AllocateReg(Info->getScratchRSrcReg());
2229     CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
2230     CCInfo.AllocateReg(Info->getFrameOffsetReg());
2231     allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
2232   }
2233 
2234   auto &ArgUsageInfo =
2235     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2236   ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
2237 
2238   unsigned StackArgSize = CCInfo.getNextStackOffset();
2239   Info->setBytesInStackArgArea(StackArgSize);
2240 
2241   return Chains.empty() ? Chain :
2242     DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
2243 }
2244 
2245 // TODO: If return values can't fit in registers, we should return as many as
2246 // possible in registers before passing on stack.
2247 bool SITargetLowering::CanLowerReturn(
2248   CallingConv::ID CallConv,
2249   MachineFunction &MF, bool IsVarArg,
2250   const SmallVectorImpl<ISD::OutputArg> &Outs,
2251   LLVMContext &Context) const {
2252   // Replacing returns with sret/stack usage doesn't make sense for shaders.
2253   // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
2254   // for shaders. Vector types should be explicitly handled by CC.
2255   if (AMDGPU::isEntryFunctionCC(CallConv))
2256     return true;
2257 
2258   SmallVector<CCValAssign, 16> RVLocs;
2259   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2260   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2261 }
2262 
2263 SDValue
2264 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2265                               bool isVarArg,
2266                               const SmallVectorImpl<ISD::OutputArg> &Outs,
2267                               const SmallVectorImpl<SDValue> &OutVals,
2268                               const SDLoc &DL, SelectionDAG &DAG) const {
2269   MachineFunction &MF = DAG.getMachineFunction();
2270   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2271 
2272   if (AMDGPU::isKernel(CallConv)) {
2273     return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2274                                              OutVals, DL, DAG);
2275   }
2276 
2277   bool IsShader = AMDGPU::isShader(CallConv);
2278 
2279   Info->setIfReturnsVoid(Outs.empty());
2280   bool IsWaveEnd = Info->returnsVoid() && IsShader;
2281 
2282   // CCValAssign - represent the assignment of the return value to a location.
2283   SmallVector<CCValAssign, 48> RVLocs;
2284   SmallVector<ISD::OutputArg, 48> Splits;
2285 
2286   // CCState - Info about the registers and stack slots.
2287   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2288                  *DAG.getContext());
2289 
2290   // Analyze outgoing return values.
2291   CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2292 
2293   SDValue Flag;
2294   SmallVector<SDValue, 48> RetOps;
2295   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2296 
2297   // Add return address for callable functions.
2298   if (!Info->isEntryFunction()) {
2299     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2300     SDValue ReturnAddrReg = CreateLiveInRegister(
2301       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2302 
2303     SDValue ReturnAddrVirtualReg = DAG.getRegister(
2304         MF.getRegInfo().createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass),
2305         MVT::i64);
2306     Chain =
2307         DAG.getCopyToReg(Chain, DL, ReturnAddrVirtualReg, ReturnAddrReg, Flag);
2308     Flag = Chain.getValue(1);
2309     RetOps.push_back(ReturnAddrVirtualReg);
2310   }
2311 
2312   // Copy the result values into the output registers.
2313   for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2314        ++I, ++RealRVLocIdx) {
2315     CCValAssign &VA = RVLocs[I];
2316     assert(VA.isRegLoc() && "Can only return in registers!");
2317     // TODO: Partially return in registers if return values don't fit.
2318     SDValue Arg = OutVals[RealRVLocIdx];
2319 
2320     // Copied from other backends.
2321     switch (VA.getLocInfo()) {
2322     case CCValAssign::Full:
2323       break;
2324     case CCValAssign::BCvt:
2325       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2326       break;
2327     case CCValAssign::SExt:
2328       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2329       break;
2330     case CCValAssign::ZExt:
2331       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2332       break;
2333     case CCValAssign::AExt:
2334       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2335       break;
2336     default:
2337       llvm_unreachable("Unknown loc info!");
2338     }
2339 
2340     Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2341     Flag = Chain.getValue(1);
2342     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2343   }
2344 
2345   // FIXME: Does sret work properly?
2346   if (!Info->isEntryFunction()) {
2347     const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2348     const MCPhysReg *I =
2349       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2350     if (I) {
2351       for (; *I; ++I) {
2352         if (AMDGPU::SReg_64RegClass.contains(*I))
2353           RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2354         else if (AMDGPU::SReg_32RegClass.contains(*I))
2355           RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2356         else
2357           llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2358       }
2359     }
2360   }
2361 
2362   // Update chain and glue.
2363   RetOps[0] = Chain;
2364   if (Flag.getNode())
2365     RetOps.push_back(Flag);
2366 
2367   unsigned Opc = AMDGPUISD::ENDPGM;
2368   if (!IsWaveEnd)
2369     Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
2370   return DAG.getNode(Opc, DL, MVT::Other, RetOps);
2371 }
2372 
2373 SDValue SITargetLowering::LowerCallResult(
2374     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2375     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2376     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2377     SDValue ThisVal) const {
2378   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2379 
2380   // Assign locations to each value returned by this call.
2381   SmallVector<CCValAssign, 16> RVLocs;
2382   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2383                  *DAG.getContext());
2384   CCInfo.AnalyzeCallResult(Ins, RetCC);
2385 
2386   // Copy all of the result registers out of their specified physreg.
2387   for (unsigned i = 0; i != RVLocs.size(); ++i) {
2388     CCValAssign VA = RVLocs[i];
2389     SDValue Val;
2390 
2391     if (VA.isRegLoc()) {
2392       Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2393       Chain = Val.getValue(1);
2394       InFlag = Val.getValue(2);
2395     } else if (VA.isMemLoc()) {
2396       report_fatal_error("TODO: return values in memory");
2397     } else
2398       llvm_unreachable("unknown argument location type");
2399 
2400     switch (VA.getLocInfo()) {
2401     case CCValAssign::Full:
2402       break;
2403     case CCValAssign::BCvt:
2404       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2405       break;
2406     case CCValAssign::ZExt:
2407       Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2408                         DAG.getValueType(VA.getValVT()));
2409       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2410       break;
2411     case CCValAssign::SExt:
2412       Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2413                         DAG.getValueType(VA.getValVT()));
2414       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2415       break;
2416     case CCValAssign::AExt:
2417       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2418       break;
2419     default:
2420       llvm_unreachable("Unknown loc info!");
2421     }
2422 
2423     InVals.push_back(Val);
2424   }
2425 
2426   return Chain;
2427 }
2428 
2429 // Add code to pass special inputs required depending on used features separate
2430 // from the explicit user arguments present in the IR.
2431 void SITargetLowering::passSpecialInputs(
2432     CallLoweringInfo &CLI,
2433     CCState &CCInfo,
2434     const SIMachineFunctionInfo &Info,
2435     SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2436     SmallVectorImpl<SDValue> &MemOpChains,
2437     SDValue Chain) const {
2438   // If we don't have a call site, this was a call inserted by
2439   // legalization. These can never use special inputs.
2440   if (!CLI.CS)
2441     return;
2442 
2443   const Function *CalleeFunc = CLI.CS.getCalledFunction();
2444   assert(CalleeFunc);
2445 
2446   SelectionDAG &DAG = CLI.DAG;
2447   const SDLoc &DL = CLI.DL;
2448 
2449   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2450 
2451   auto &ArgUsageInfo =
2452     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2453   const AMDGPUFunctionArgInfo &CalleeArgInfo
2454     = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2455 
2456   const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2457 
2458   // TODO: Unify with private memory register handling. This is complicated by
2459   // the fact that at least in kernels, the input argument is not necessarily
2460   // in the same location as the input.
2461   AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2462     AMDGPUFunctionArgInfo::DISPATCH_PTR,
2463     AMDGPUFunctionArgInfo::QUEUE_PTR,
2464     AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
2465     AMDGPUFunctionArgInfo::DISPATCH_ID,
2466     AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2467     AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2468     AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
2469     AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
2470   };
2471 
2472   for (auto InputID : InputRegs) {
2473     const ArgDescriptor *OutgoingArg;
2474     const TargetRegisterClass *ArgRC;
2475 
2476     std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2477     if (!OutgoingArg)
2478       continue;
2479 
2480     const ArgDescriptor *IncomingArg;
2481     const TargetRegisterClass *IncomingArgRC;
2482     std::tie(IncomingArg, IncomingArgRC)
2483       = CallerArgInfo.getPreloadedValue(InputID);
2484     assert(IncomingArgRC == ArgRC);
2485 
2486     // All special arguments are ints for now.
2487     EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
2488     SDValue InputReg;
2489 
2490     if (IncomingArg) {
2491       InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2492     } else {
2493       // The implicit arg ptr is special because it doesn't have a corresponding
2494       // input for kernels, and is computed from the kernarg segment pointer.
2495       assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2496       InputReg = getImplicitArgPtr(DAG, DL);
2497     }
2498 
2499     if (OutgoingArg->isRegister()) {
2500       RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2501     } else {
2502       unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4);
2503       SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2504                                               SpecialArgOffset);
2505       MemOpChains.push_back(ArgStore);
2506     }
2507   }
2508 
2509   // Pack workitem IDs into a single register or pass it as is if already
2510   // packed.
2511   const ArgDescriptor *OutgoingArg;
2512   const TargetRegisterClass *ArgRC;
2513 
2514   std::tie(OutgoingArg, ArgRC) =
2515     CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
2516   if (!OutgoingArg)
2517     std::tie(OutgoingArg, ArgRC) =
2518       CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
2519   if (!OutgoingArg)
2520     std::tie(OutgoingArg, ArgRC) =
2521       CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
2522   if (!OutgoingArg)
2523     return;
2524 
2525   const ArgDescriptor *IncomingArgX
2526     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X).first;
2527   const ArgDescriptor *IncomingArgY
2528     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y).first;
2529   const ArgDescriptor *IncomingArgZ
2530     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z).first;
2531 
2532   SDValue InputReg;
2533   SDLoc SL;
2534 
2535   // If incoming ids are not packed we need to pack them.
2536   if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo.WorkItemIDX)
2537     InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX);
2538 
2539   if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo.WorkItemIDY) {
2540     SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY);
2541     Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y,
2542                     DAG.getShiftAmountConstant(10, MVT::i32, SL));
2543     InputReg = InputReg.getNode() ?
2544                  DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y;
2545   }
2546 
2547   if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo.WorkItemIDZ) {
2548     SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ);
2549     Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z,
2550                     DAG.getShiftAmountConstant(20, MVT::i32, SL));
2551     InputReg = InputReg.getNode() ?
2552                  DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z;
2553   }
2554 
2555   if (!InputReg.getNode()) {
2556     // Workitem ids are already packed, any of present incoming arguments
2557     // will carry all required fields.
2558     ArgDescriptor IncomingArg = ArgDescriptor::createArg(
2559       IncomingArgX ? *IncomingArgX :
2560       IncomingArgY ? *IncomingArgY :
2561                      *IncomingArgZ, ~0u);
2562     InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg);
2563   }
2564 
2565   if (OutgoingArg->isRegister()) {
2566     RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2567   } else {
2568     unsigned SpecialArgOffset = CCInfo.AllocateStack(4, 4);
2569     SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2570                                             SpecialArgOffset);
2571     MemOpChains.push_back(ArgStore);
2572   }
2573 }
2574 
2575 static bool canGuaranteeTCO(CallingConv::ID CC) {
2576   return CC == CallingConv::Fast;
2577 }
2578 
2579 /// Return true if we might ever do TCO for calls with this calling convention.
2580 static bool mayTailCallThisCC(CallingConv::ID CC) {
2581   switch (CC) {
2582   case CallingConv::C:
2583     return true;
2584   default:
2585     return canGuaranteeTCO(CC);
2586   }
2587 }
2588 
2589 bool SITargetLowering::isEligibleForTailCallOptimization(
2590     SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2591     const SmallVectorImpl<ISD::OutputArg> &Outs,
2592     const SmallVectorImpl<SDValue> &OutVals,
2593     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2594   if (!mayTailCallThisCC(CalleeCC))
2595     return false;
2596 
2597   MachineFunction &MF = DAG.getMachineFunction();
2598   const Function &CallerF = MF.getFunction();
2599   CallingConv::ID CallerCC = CallerF.getCallingConv();
2600   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2601   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2602 
2603   // Kernels aren't callable, and don't have a live in return address so it
2604   // doesn't make sense to do a tail call with entry functions.
2605   if (!CallerPreserved)
2606     return false;
2607 
2608   bool CCMatch = CallerCC == CalleeCC;
2609 
2610   if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2611     if (canGuaranteeTCO(CalleeCC) && CCMatch)
2612       return true;
2613     return false;
2614   }
2615 
2616   // TODO: Can we handle var args?
2617   if (IsVarArg)
2618     return false;
2619 
2620   for (const Argument &Arg : CallerF.args()) {
2621     if (Arg.hasByValAttr())
2622       return false;
2623   }
2624 
2625   LLVMContext &Ctx = *DAG.getContext();
2626 
2627   // Check that the call results are passed in the same way.
2628   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2629                                   CCAssignFnForCall(CalleeCC, IsVarArg),
2630                                   CCAssignFnForCall(CallerCC, IsVarArg)))
2631     return false;
2632 
2633   // The callee has to preserve all registers the caller needs to preserve.
2634   if (!CCMatch) {
2635     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2636     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2637       return false;
2638   }
2639 
2640   // Nothing more to check if the callee is taking no arguments.
2641   if (Outs.empty())
2642     return true;
2643 
2644   SmallVector<CCValAssign, 16> ArgLocs;
2645   CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2646 
2647   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2648 
2649   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2650   // If the stack arguments for this call do not fit into our own save area then
2651   // the call cannot be made tail.
2652   // TODO: Is this really necessary?
2653   if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2654     return false;
2655 
2656   const MachineRegisterInfo &MRI = MF.getRegInfo();
2657   return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2658 }
2659 
2660 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2661   if (!CI->isTailCall())
2662     return false;
2663 
2664   const Function *ParentFn = CI->getParent()->getParent();
2665   if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2666     return false;
2667 
2668   auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2669   return (Attr.getValueAsString() != "true");
2670 }
2671 
2672 // The wave scratch offset register is used as the global base pointer.
2673 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2674                                     SmallVectorImpl<SDValue> &InVals) const {
2675   SelectionDAG &DAG = CLI.DAG;
2676   const SDLoc &DL = CLI.DL;
2677   SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2678   SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2679   SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2680   SDValue Chain = CLI.Chain;
2681   SDValue Callee = CLI.Callee;
2682   bool &IsTailCall = CLI.IsTailCall;
2683   CallingConv::ID CallConv = CLI.CallConv;
2684   bool IsVarArg = CLI.IsVarArg;
2685   bool IsSibCall = false;
2686   bool IsThisReturn = false;
2687   MachineFunction &MF = DAG.getMachineFunction();
2688 
2689   if (IsVarArg) {
2690     return lowerUnhandledCall(CLI, InVals,
2691                               "unsupported call to variadic function ");
2692   }
2693 
2694   if (!CLI.CS.getInstruction())
2695     report_fatal_error("unsupported libcall legalization");
2696 
2697   if (!CLI.CS.getCalledFunction()) {
2698     return lowerUnhandledCall(CLI, InVals,
2699                               "unsupported indirect call to function ");
2700   }
2701 
2702   if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2703     return lowerUnhandledCall(CLI, InVals,
2704                               "unsupported required tail call to function ");
2705   }
2706 
2707   if (AMDGPU::isShader(MF.getFunction().getCallingConv())) {
2708     // Note the issue is with the CC of the calling function, not of the call
2709     // itself.
2710     return lowerUnhandledCall(CLI, InVals,
2711                           "unsupported call from graphics shader of function ");
2712   }
2713 
2714   if (IsTailCall) {
2715     IsTailCall = isEligibleForTailCallOptimization(
2716       Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2717     if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2718       report_fatal_error("failed to perform tail call elimination on a call "
2719                          "site marked musttail");
2720     }
2721 
2722     bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2723 
2724     // A sibling call is one where we're under the usual C ABI and not planning
2725     // to change that but can still do a tail call:
2726     if (!TailCallOpt && IsTailCall)
2727       IsSibCall = true;
2728 
2729     if (IsTailCall)
2730       ++NumTailCalls;
2731   }
2732 
2733   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2734 
2735   // Analyze operands of the call, assigning locations to each operand.
2736   SmallVector<CCValAssign, 16> ArgLocs;
2737   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2738   CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2739 
2740   CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2741 
2742   // Get a count of how many bytes are to be pushed on the stack.
2743   unsigned NumBytes = CCInfo.getNextStackOffset();
2744 
2745   if (IsSibCall) {
2746     // Since we're not changing the ABI to make this a tail call, the memory
2747     // operands are already available in the caller's incoming argument space.
2748     NumBytes = 0;
2749   }
2750 
2751   // FPDiff is the byte offset of the call's argument area from the callee's.
2752   // Stores to callee stack arguments will be placed in FixedStackSlots offset
2753   // by this amount for a tail call. In a sibling call it must be 0 because the
2754   // caller will deallocate the entire stack and the callee still expects its
2755   // arguments to begin at SP+0. Completely unused for non-tail calls.
2756   int32_t FPDiff = 0;
2757   MachineFrameInfo &MFI = MF.getFrameInfo();
2758   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2759 
2760   // Adjust the stack pointer for the new arguments...
2761   // These operations are automatically eliminated by the prolog/epilog pass
2762   if (!IsSibCall) {
2763     Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2764 
2765     SmallVector<SDValue, 4> CopyFromChains;
2766 
2767     // In the HSA case, this should be an identity copy.
2768     SDValue ScratchRSrcReg
2769       = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2770     RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2771     CopyFromChains.push_back(ScratchRSrcReg.getValue(1));
2772     Chain = DAG.getTokenFactor(DL, CopyFromChains);
2773   }
2774 
2775   SmallVector<SDValue, 8> MemOpChains;
2776   MVT PtrVT = MVT::i32;
2777 
2778   // Walk the register/memloc assignments, inserting copies/loads.
2779   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2780        ++i, ++realArgIdx) {
2781     CCValAssign &VA = ArgLocs[i];
2782     SDValue Arg = OutVals[realArgIdx];
2783 
2784     // Promote the value if needed.
2785     switch (VA.getLocInfo()) {
2786     case CCValAssign::Full:
2787       break;
2788     case CCValAssign::BCvt:
2789       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2790       break;
2791     case CCValAssign::ZExt:
2792       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2793       break;
2794     case CCValAssign::SExt:
2795       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2796       break;
2797     case CCValAssign::AExt:
2798       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2799       break;
2800     case CCValAssign::FPExt:
2801       Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2802       break;
2803     default:
2804       llvm_unreachable("Unknown loc info!");
2805     }
2806 
2807     if (VA.isRegLoc()) {
2808       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2809     } else {
2810       assert(VA.isMemLoc());
2811 
2812       SDValue DstAddr;
2813       MachinePointerInfo DstInfo;
2814 
2815       unsigned LocMemOffset = VA.getLocMemOffset();
2816       int32_t Offset = LocMemOffset;
2817 
2818       SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT);
2819       unsigned Align = 0;
2820 
2821       if (IsTailCall) {
2822         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2823         unsigned OpSize = Flags.isByVal() ?
2824           Flags.getByValSize() : VA.getValVT().getStoreSize();
2825 
2826         // FIXME: We can have better than the minimum byval required alignment.
2827         Align = Flags.isByVal() ? Flags.getByValAlign() :
2828           MinAlign(Subtarget->getStackAlignment(), Offset);
2829 
2830         Offset = Offset + FPDiff;
2831         int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2832 
2833         DstAddr = DAG.getFrameIndex(FI, PtrVT);
2834         DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2835 
2836         // Make sure any stack arguments overlapping with where we're storing
2837         // are loaded before this eventual operation. Otherwise they'll be
2838         // clobbered.
2839 
2840         // FIXME: Why is this really necessary? This seems to just result in a
2841         // lot of code to copy the stack and write them back to the same
2842         // locations, which are supposed to be immutable?
2843         Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2844       } else {
2845         DstAddr = PtrOff;
2846         DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2847         Align = MinAlign(Subtarget->getStackAlignment(), LocMemOffset);
2848       }
2849 
2850       if (Outs[i].Flags.isByVal()) {
2851         SDValue SizeNode =
2852             DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2853         SDValue Cpy = DAG.getMemcpy(
2854             Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2855             /*isVol = */ false, /*AlwaysInline = */ true,
2856             /*isTailCall = */ false, DstInfo,
2857             MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy(
2858                 *DAG.getContext(), AMDGPUAS::PRIVATE_ADDRESS))));
2859 
2860         MemOpChains.push_back(Cpy);
2861       } else {
2862         SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Align);
2863         MemOpChains.push_back(Store);
2864       }
2865     }
2866   }
2867 
2868   // Copy special input registers after user input arguments.
2869   passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
2870 
2871   if (!MemOpChains.empty())
2872     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2873 
2874   // Build a sequence of copy-to-reg nodes chained together with token chain
2875   // and flag operands which copy the outgoing args into the appropriate regs.
2876   SDValue InFlag;
2877   for (auto &RegToPass : RegsToPass) {
2878     Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2879                              RegToPass.second, InFlag);
2880     InFlag = Chain.getValue(1);
2881   }
2882 
2883 
2884   SDValue PhysReturnAddrReg;
2885   if (IsTailCall) {
2886     // Since the return is being combined with the call, we need to pass on the
2887     // return address.
2888 
2889     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2890     SDValue ReturnAddrReg = CreateLiveInRegister(
2891       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2892 
2893     PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2894                                         MVT::i64);
2895     Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2896     InFlag = Chain.getValue(1);
2897   }
2898 
2899   // We don't usually want to end the call-sequence here because we would tidy
2900   // the frame up *after* the call, however in the ABI-changing tail-call case
2901   // we've carefully laid out the parameters so that when sp is reset they'll be
2902   // in the correct location.
2903   if (IsTailCall && !IsSibCall) {
2904     Chain = DAG.getCALLSEQ_END(Chain,
2905                                DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2906                                DAG.getTargetConstant(0, DL, MVT::i32),
2907                                InFlag, DL);
2908     InFlag = Chain.getValue(1);
2909   }
2910 
2911   std::vector<SDValue> Ops;
2912   Ops.push_back(Chain);
2913   Ops.push_back(Callee);
2914   // Add a redundant copy of the callee global which will not be legalized, as
2915   // we need direct access to the callee later.
2916   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee);
2917   const GlobalValue *GV = GSD->getGlobal();
2918   Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
2919 
2920   if (IsTailCall) {
2921     // Each tail call may have to adjust the stack by a different amount, so
2922     // this information must travel along with the operation for eventual
2923     // consumption by emitEpilogue.
2924     Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
2925 
2926     Ops.push_back(PhysReturnAddrReg);
2927   }
2928 
2929   // Add argument registers to the end of the list so that they are known live
2930   // into the call.
2931   for (auto &RegToPass : RegsToPass) {
2932     Ops.push_back(DAG.getRegister(RegToPass.first,
2933                                   RegToPass.second.getValueType()));
2934   }
2935 
2936   // Add a register mask operand representing the call-preserved registers.
2937 
2938   auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
2939   const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2940   assert(Mask && "Missing call preserved mask for calling convention");
2941   Ops.push_back(DAG.getRegisterMask(Mask));
2942 
2943   if (InFlag.getNode())
2944     Ops.push_back(InFlag);
2945 
2946   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2947 
2948   // If we're doing a tall call, use a TC_RETURN here rather than an
2949   // actual call instruction.
2950   if (IsTailCall) {
2951     MFI.setHasTailCall();
2952     return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
2953   }
2954 
2955   // Returns a chain and a flag for retval copy to use.
2956   SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2957   Chain = Call.getValue(0);
2958   InFlag = Call.getValue(1);
2959 
2960   uint64_t CalleePopBytes = NumBytes;
2961   Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
2962                              DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2963                              InFlag, DL);
2964   if (!Ins.empty())
2965     InFlag = Chain.getValue(1);
2966 
2967   // Handle result values, copying them out of physregs into vregs that we
2968   // return.
2969   return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2970                          InVals, IsThisReturn,
2971                          IsThisReturn ? OutVals[0] : SDValue());
2972 }
2973 
2974 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2975                                              SelectionDAG &DAG) const {
2976   unsigned Reg = StringSwitch<unsigned>(RegName)
2977     .Case("m0", AMDGPU::M0)
2978     .Case("exec", AMDGPU::EXEC)
2979     .Case("exec_lo", AMDGPU::EXEC_LO)
2980     .Case("exec_hi", AMDGPU::EXEC_HI)
2981     .Case("flat_scratch", AMDGPU::FLAT_SCR)
2982     .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2983     .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2984     .Default(AMDGPU::NoRegister);
2985 
2986   if (Reg == AMDGPU::NoRegister) {
2987     report_fatal_error(Twine("invalid register name \""
2988                              + StringRef(RegName)  + "\"."));
2989 
2990   }
2991 
2992   if (!Subtarget->hasFlatScrRegister() &&
2993        Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2994     report_fatal_error(Twine("invalid register \""
2995                              + StringRef(RegName)  + "\" for subtarget."));
2996   }
2997 
2998   switch (Reg) {
2999   case AMDGPU::M0:
3000   case AMDGPU::EXEC_LO:
3001   case AMDGPU::EXEC_HI:
3002   case AMDGPU::FLAT_SCR_LO:
3003   case AMDGPU::FLAT_SCR_HI:
3004     if (VT.getSizeInBits() == 32)
3005       return Reg;
3006     break;
3007   case AMDGPU::EXEC:
3008   case AMDGPU::FLAT_SCR:
3009     if (VT.getSizeInBits() == 64)
3010       return Reg;
3011     break;
3012   default:
3013     llvm_unreachable("missing register type checking");
3014   }
3015 
3016   report_fatal_error(Twine("invalid type for register \""
3017                            + StringRef(RegName) + "\"."));
3018 }
3019 
3020 // If kill is not the last instruction, split the block so kill is always a
3021 // proper terminator.
3022 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
3023                                                     MachineBasicBlock *BB) const {
3024   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3025 
3026   MachineBasicBlock::iterator SplitPoint(&MI);
3027   ++SplitPoint;
3028 
3029   if (SplitPoint == BB->end()) {
3030     // Don't bother with a new block.
3031     MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
3032     return BB;
3033   }
3034 
3035   MachineFunction *MF = BB->getParent();
3036   MachineBasicBlock *SplitBB
3037     = MF->CreateMachineBasicBlock(BB->getBasicBlock());
3038 
3039   MF->insert(++MachineFunction::iterator(BB), SplitBB);
3040   SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
3041 
3042   SplitBB->transferSuccessorsAndUpdatePHIs(BB);
3043   BB->addSuccessor(SplitBB);
3044 
3045   MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
3046   return SplitBB;
3047 }
3048 
3049 // Split block \p MBB at \p MI, as to insert a loop. If \p InstInLoop is true,
3050 // \p MI will be the only instruction in the loop body block. Otherwise, it will
3051 // be the first instruction in the remainder block.
3052 //
3053 /// \returns { LoopBody, Remainder }
3054 static std::pair<MachineBasicBlock *, MachineBasicBlock *>
3055 splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) {
3056   MachineFunction *MF = MBB.getParent();
3057   MachineBasicBlock::iterator I(&MI);
3058 
3059   // To insert the loop we need to split the block. Move everything after this
3060   // point to a new block, and insert a new empty block between the two.
3061   MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
3062   MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
3063   MachineFunction::iterator MBBI(MBB);
3064   ++MBBI;
3065 
3066   MF->insert(MBBI, LoopBB);
3067   MF->insert(MBBI, RemainderBB);
3068 
3069   LoopBB->addSuccessor(LoopBB);
3070   LoopBB->addSuccessor(RemainderBB);
3071 
3072   // Move the rest of the block into a new block.
3073   RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
3074 
3075   if (InstInLoop) {
3076     auto Next = std::next(I);
3077 
3078     // Move instruction to loop body.
3079     LoopBB->splice(LoopBB->begin(), &MBB, I, Next);
3080 
3081     // Move the rest of the block.
3082     RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end());
3083   } else {
3084     RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
3085   }
3086 
3087   MBB.addSuccessor(LoopBB);
3088 
3089   return std::make_pair(LoopBB, RemainderBB);
3090 }
3091 
3092 /// Insert \p MI into a BUNDLE with an S_WAITCNT 0 immediately following it.
3093 void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const {
3094   MachineBasicBlock *MBB = MI.getParent();
3095   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3096   auto I = MI.getIterator();
3097   auto E = std::next(I);
3098 
3099   BuildMI(*MBB, E, MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT))
3100     .addImm(0);
3101 
3102   MIBundleBuilder Bundler(*MBB, I, E);
3103   finalizeBundle(*MBB, Bundler.begin());
3104 }
3105 
3106 MachineBasicBlock *
3107 SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI,
3108                                          MachineBasicBlock *BB) const {
3109   const DebugLoc &DL = MI.getDebugLoc();
3110 
3111   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3112 
3113   MachineBasicBlock *LoopBB;
3114   MachineBasicBlock *RemainderBB;
3115   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3116 
3117   // Apparently kill flags are only valid if the def is in the same block?
3118   if (MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0))
3119     Src->setIsKill(false);
3120 
3121   std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true);
3122 
3123   MachineBasicBlock::iterator I = LoopBB->end();
3124 
3125   const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg(
3126     AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1);
3127 
3128   // Clear TRAP_STS.MEM_VIOL
3129   BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32))
3130     .addImm(0)
3131     .addImm(EncodedReg);
3132 
3133   bundleInstWithWaitcnt(MI);
3134 
3135   Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3136 
3137   // Load and check TRAP_STS.MEM_VIOL
3138   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg)
3139     .addImm(EncodedReg);
3140 
3141   // FIXME: Do we need to use an isel pseudo that may clobber scc?
3142   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32))
3143     .addReg(Reg, RegState::Kill)
3144     .addImm(0);
3145   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3146     .addMBB(LoopBB);
3147 
3148   return RemainderBB;
3149 }
3150 
3151 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
3152 // wavefront. If the value is uniform and just happens to be in a VGPR, this
3153 // will only do one iteration. In the worst case, this will loop 64 times.
3154 //
3155 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
3156 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
3157   const SIInstrInfo *TII,
3158   MachineRegisterInfo &MRI,
3159   MachineBasicBlock &OrigBB,
3160   MachineBasicBlock &LoopBB,
3161   const DebugLoc &DL,
3162   const MachineOperand &IdxReg,
3163   unsigned InitReg,
3164   unsigned ResultReg,
3165   unsigned PhiReg,
3166   unsigned InitSaveExecReg,
3167   int Offset,
3168   bool UseGPRIdxMode,
3169   bool IsIndirectSrc) {
3170   MachineFunction *MF = OrigBB.getParent();
3171   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3172   const SIRegisterInfo *TRI = ST.getRegisterInfo();
3173   MachineBasicBlock::iterator I = LoopBB.begin();
3174 
3175   const TargetRegisterClass *BoolRC = TRI->getBoolRC();
3176   Register PhiExec = MRI.createVirtualRegister(BoolRC);
3177   Register NewExec = MRI.createVirtualRegister(BoolRC);
3178   Register CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3179   Register CondReg = MRI.createVirtualRegister(BoolRC);
3180 
3181   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
3182     .addReg(InitReg)
3183     .addMBB(&OrigBB)
3184     .addReg(ResultReg)
3185     .addMBB(&LoopBB);
3186 
3187   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
3188     .addReg(InitSaveExecReg)
3189     .addMBB(&OrigBB)
3190     .addReg(NewExec)
3191     .addMBB(&LoopBB);
3192 
3193   // Read the next variant <- also loop target.
3194   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
3195     .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
3196 
3197   // Compare the just read M0 value to all possible Idx values.
3198   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
3199     .addReg(CurrentIdxReg)
3200     .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
3201 
3202   // Update EXEC, save the original EXEC value to VCC.
3203   BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32
3204                                                 : AMDGPU::S_AND_SAVEEXEC_B64),
3205           NewExec)
3206     .addReg(CondReg, RegState::Kill);
3207 
3208   MRI.setSimpleHint(NewExec, CondReg);
3209 
3210   if (UseGPRIdxMode) {
3211     unsigned IdxReg;
3212     if (Offset == 0) {
3213       IdxReg = CurrentIdxReg;
3214     } else {
3215       IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3216       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
3217         .addReg(CurrentIdxReg, RegState::Kill)
3218         .addImm(Offset);
3219     }
3220     unsigned IdxMode = IsIndirectSrc ?
3221       AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
3222     MachineInstr *SetOn =
3223       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3224       .addReg(IdxReg, RegState::Kill)
3225       .addImm(IdxMode);
3226     SetOn->getOperand(3).setIsUndef();
3227   } else {
3228     // Move index from VCC into M0
3229     if (Offset == 0) {
3230       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3231         .addReg(CurrentIdxReg, RegState::Kill);
3232     } else {
3233       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3234         .addReg(CurrentIdxReg, RegState::Kill)
3235         .addImm(Offset);
3236     }
3237   }
3238 
3239   // Update EXEC, switch all done bits to 0 and all todo bits to 1.
3240   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3241   MachineInstr *InsertPt =
3242     BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term
3243                                                   : AMDGPU::S_XOR_B64_term), Exec)
3244       .addReg(Exec)
3245       .addReg(NewExec);
3246 
3247   // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
3248   // s_cbranch_scc0?
3249 
3250   // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
3251   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
3252     .addMBB(&LoopBB);
3253 
3254   return InsertPt->getIterator();
3255 }
3256 
3257 // This has slightly sub-optimal regalloc when the source vector is killed by
3258 // the read. The register allocator does not understand that the kill is
3259 // per-workitem, so is kept alive for the whole loop so we end up not re-using a
3260 // subregister from it, using 1 more VGPR than necessary. This was saved when
3261 // this was expanded after register allocation.
3262 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
3263                                                   MachineBasicBlock &MBB,
3264                                                   MachineInstr &MI,
3265                                                   unsigned InitResultReg,
3266                                                   unsigned PhiReg,
3267                                                   int Offset,
3268                                                   bool UseGPRIdxMode,
3269                                                   bool IsIndirectSrc) {
3270   MachineFunction *MF = MBB.getParent();
3271   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3272   const SIRegisterInfo *TRI = ST.getRegisterInfo();
3273   MachineRegisterInfo &MRI = MF->getRegInfo();
3274   const DebugLoc &DL = MI.getDebugLoc();
3275   MachineBasicBlock::iterator I(&MI);
3276 
3277   const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
3278   Register DstReg = MI.getOperand(0).getReg();
3279   Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
3280   Register TmpExec = MRI.createVirtualRegister(BoolXExecRC);
3281   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3282   unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
3283 
3284   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
3285 
3286   // Save the EXEC mask
3287   BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec)
3288     .addReg(Exec);
3289 
3290   MachineBasicBlock *LoopBB;
3291   MachineBasicBlock *RemainderBB;
3292   std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false);
3293 
3294   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3295 
3296   auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
3297                                       InitResultReg, DstReg, PhiReg, TmpExec,
3298                                       Offset, UseGPRIdxMode, IsIndirectSrc);
3299 
3300   MachineBasicBlock::iterator First = RemainderBB->begin();
3301   BuildMI(*RemainderBB, First, DL, TII->get(MovExecOpc), Exec)
3302     .addReg(SaveExec);
3303 
3304   return InsPt;
3305 }
3306 
3307 // Returns subreg index, offset
3308 static std::pair<unsigned, int>
3309 computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
3310                             const TargetRegisterClass *SuperRC,
3311                             unsigned VecReg,
3312                             int Offset) {
3313   int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
3314 
3315   // Skip out of bounds offsets, or else we would end up using an undefined
3316   // register.
3317   if (Offset >= NumElts || Offset < 0)
3318     return std::make_pair(AMDGPU::sub0, Offset);
3319 
3320   return std::make_pair(AMDGPU::sub0 + Offset, 0);
3321 }
3322 
3323 // Return true if the index is an SGPR and was set.
3324 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
3325                                  MachineRegisterInfo &MRI,
3326                                  MachineInstr &MI,
3327                                  int Offset,
3328                                  bool UseGPRIdxMode,
3329                                  bool IsIndirectSrc) {
3330   MachineBasicBlock *MBB = MI.getParent();
3331   const DebugLoc &DL = MI.getDebugLoc();
3332   MachineBasicBlock::iterator I(&MI);
3333 
3334   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3335   const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
3336 
3337   assert(Idx->getReg() != AMDGPU::NoRegister);
3338 
3339   if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
3340     return false;
3341 
3342   if (UseGPRIdxMode) {
3343     unsigned IdxMode = IsIndirectSrc ?
3344       AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
3345     if (Offset == 0) {
3346       MachineInstr *SetOn =
3347           BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3348               .add(*Idx)
3349               .addImm(IdxMode);
3350 
3351       SetOn->getOperand(3).setIsUndef();
3352     } else {
3353       Register Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3354       BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
3355           .add(*Idx)
3356           .addImm(Offset);
3357       MachineInstr *SetOn =
3358         BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3359         .addReg(Tmp, RegState::Kill)
3360         .addImm(IdxMode);
3361 
3362       SetOn->getOperand(3).setIsUndef();
3363     }
3364 
3365     return true;
3366   }
3367 
3368   if (Offset == 0) {
3369     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3370       .add(*Idx);
3371   } else {
3372     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3373       .add(*Idx)
3374       .addImm(Offset);
3375   }
3376 
3377   return true;
3378 }
3379 
3380 // Control flow needs to be inserted if indexing with a VGPR.
3381 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
3382                                           MachineBasicBlock &MBB,
3383                                           const GCNSubtarget &ST) {
3384   const SIInstrInfo *TII = ST.getInstrInfo();
3385   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3386   MachineFunction *MF = MBB.getParent();
3387   MachineRegisterInfo &MRI = MF->getRegInfo();
3388 
3389   Register Dst = MI.getOperand(0).getReg();
3390   Register SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
3391   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3392 
3393   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
3394 
3395   unsigned SubReg;
3396   std::tie(SubReg, Offset)
3397     = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
3398 
3399   bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
3400 
3401   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
3402     MachineBasicBlock::iterator I(&MI);
3403     const DebugLoc &DL = MI.getDebugLoc();
3404 
3405     if (UseGPRIdxMode) {
3406       // TODO: Look at the uses to avoid the copy. This may require rescheduling
3407       // to avoid interfering with other uses, so probably requires a new
3408       // optimization pass.
3409       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3410         .addReg(SrcReg, RegState::Undef, SubReg)
3411         .addReg(SrcReg, RegState::Implicit)
3412         .addReg(AMDGPU::M0, RegState::Implicit);
3413       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3414     } else {
3415       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3416         .addReg(SrcReg, RegState::Undef, SubReg)
3417         .addReg(SrcReg, RegState::Implicit);
3418     }
3419 
3420     MI.eraseFromParent();
3421 
3422     return &MBB;
3423   }
3424 
3425   const DebugLoc &DL = MI.getDebugLoc();
3426   MachineBasicBlock::iterator I(&MI);
3427 
3428   Register PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3429   Register InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3430 
3431   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3432 
3433   auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3434                               Offset, UseGPRIdxMode, true);
3435   MachineBasicBlock *LoopBB = InsPt->getParent();
3436 
3437   if (UseGPRIdxMode) {
3438     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3439       .addReg(SrcReg, RegState::Undef, SubReg)
3440       .addReg(SrcReg, RegState::Implicit)
3441       .addReg(AMDGPU::M0, RegState::Implicit);
3442     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3443   } else {
3444     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3445       .addReg(SrcReg, RegState::Undef, SubReg)
3446       .addReg(SrcReg, RegState::Implicit);
3447   }
3448 
3449   MI.eraseFromParent();
3450 
3451   return LoopBB;
3452 }
3453 
3454 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
3455                                  const TargetRegisterClass *VecRC) {
3456   switch (TRI.getRegSizeInBits(*VecRC)) {
3457   case 32: // 4 bytes
3458     return AMDGPU::V_MOVRELD_B32_V1;
3459   case 64: // 8 bytes
3460     return AMDGPU::V_MOVRELD_B32_V2;
3461   case 128: // 16 bytes
3462     return AMDGPU::V_MOVRELD_B32_V4;
3463   case 256: // 32 bytes
3464     return AMDGPU::V_MOVRELD_B32_V8;
3465   case 512: // 64 bytes
3466     return AMDGPU::V_MOVRELD_B32_V16;
3467   default:
3468     llvm_unreachable("unsupported size for MOVRELD pseudos");
3469   }
3470 }
3471 
3472 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3473                                           MachineBasicBlock &MBB,
3474                                           const GCNSubtarget &ST) {
3475   const SIInstrInfo *TII = ST.getInstrInfo();
3476   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3477   MachineFunction *MF = MBB.getParent();
3478   MachineRegisterInfo &MRI = MF->getRegInfo();
3479 
3480   Register Dst = MI.getOperand(0).getReg();
3481   const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3482   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3483   const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3484   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3485   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3486 
3487   // This can be an immediate, but will be folded later.
3488   assert(Val->getReg());
3489 
3490   unsigned SubReg;
3491   std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3492                                                          SrcVec->getReg(),
3493                                                          Offset);
3494   bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
3495 
3496   if (Idx->getReg() == AMDGPU::NoRegister) {
3497     MachineBasicBlock::iterator I(&MI);
3498     const DebugLoc &DL = MI.getDebugLoc();
3499 
3500     assert(Offset == 0);
3501 
3502     BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
3503         .add(*SrcVec)
3504         .add(*Val)
3505         .addImm(SubReg);
3506 
3507     MI.eraseFromParent();
3508     return &MBB;
3509   }
3510 
3511   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
3512     MachineBasicBlock::iterator I(&MI);
3513     const DebugLoc &DL = MI.getDebugLoc();
3514 
3515     if (UseGPRIdxMode) {
3516       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3517           .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3518           .add(*Val)
3519           .addReg(Dst, RegState::ImplicitDefine)
3520           .addReg(SrcVec->getReg(), RegState::Implicit)
3521           .addReg(AMDGPU::M0, RegState::Implicit);
3522 
3523       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3524     } else {
3525       const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3526 
3527       BuildMI(MBB, I, DL, MovRelDesc)
3528           .addReg(Dst, RegState::Define)
3529           .addReg(SrcVec->getReg())
3530           .add(*Val)
3531           .addImm(SubReg - AMDGPU::sub0);
3532     }
3533 
3534     MI.eraseFromParent();
3535     return &MBB;
3536   }
3537 
3538   if (Val->isReg())
3539     MRI.clearKillFlags(Val->getReg());
3540 
3541   const DebugLoc &DL = MI.getDebugLoc();
3542 
3543   Register PhiReg = MRI.createVirtualRegister(VecRC);
3544 
3545   auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
3546                               Offset, UseGPRIdxMode, false);
3547   MachineBasicBlock *LoopBB = InsPt->getParent();
3548 
3549   if (UseGPRIdxMode) {
3550     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3551         .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3552         .add(*Val)                               // src0
3553         .addReg(Dst, RegState::ImplicitDefine)
3554         .addReg(PhiReg, RegState::Implicit)
3555         .addReg(AMDGPU::M0, RegState::Implicit);
3556     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3557   } else {
3558     const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3559 
3560     BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3561         .addReg(Dst, RegState::Define)
3562         .addReg(PhiReg)
3563         .add(*Val)
3564         .addImm(SubReg - AMDGPU::sub0);
3565   }
3566 
3567   MI.eraseFromParent();
3568 
3569   return LoopBB;
3570 }
3571 
3572 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3573   MachineInstr &MI, MachineBasicBlock *BB) const {
3574 
3575   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3576   MachineFunction *MF = BB->getParent();
3577   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3578 
3579   if (TII->isMIMG(MI)) {
3580     if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3581       report_fatal_error("missing mem operand from MIMG instruction");
3582     }
3583     // Add a memoperand for mimg instructions so that they aren't assumed to
3584     // be ordered memory instuctions.
3585 
3586     return BB;
3587   }
3588 
3589   switch (MI.getOpcode()) {
3590   case AMDGPU::S_ADD_U64_PSEUDO:
3591   case AMDGPU::S_SUB_U64_PSEUDO: {
3592     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3593     const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3594     const SIRegisterInfo *TRI = ST.getRegisterInfo();
3595     const TargetRegisterClass *BoolRC = TRI->getBoolRC();
3596     const DebugLoc &DL = MI.getDebugLoc();
3597 
3598     MachineOperand &Dest = MI.getOperand(0);
3599     MachineOperand &Src0 = MI.getOperand(1);
3600     MachineOperand &Src1 = MI.getOperand(2);
3601 
3602     Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3603     Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3604 
3605     MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3606      Src0, BoolRC, AMDGPU::sub0,
3607      &AMDGPU::SReg_32_XM0RegClass);
3608     MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3609       Src0, BoolRC, AMDGPU::sub1,
3610       &AMDGPU::SReg_32_XM0RegClass);
3611 
3612     MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3613       Src1, BoolRC, AMDGPU::sub0,
3614       &AMDGPU::SReg_32_XM0RegClass);
3615     MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3616       Src1, BoolRC, AMDGPU::sub1,
3617       &AMDGPU::SReg_32_XM0RegClass);
3618 
3619     bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3620 
3621     unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3622     unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3623     BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3624       .add(Src0Sub0)
3625       .add(Src1Sub0);
3626     BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3627       .add(Src0Sub1)
3628       .add(Src1Sub1);
3629     BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3630       .addReg(DestSub0)
3631       .addImm(AMDGPU::sub0)
3632       .addReg(DestSub1)
3633       .addImm(AMDGPU::sub1);
3634     MI.eraseFromParent();
3635     return BB;
3636   }
3637   case AMDGPU::SI_INIT_M0: {
3638     BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
3639             TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3640         .add(MI.getOperand(0));
3641     MI.eraseFromParent();
3642     return BB;
3643   }
3644   case AMDGPU::SI_INIT_EXEC:
3645     // This should be before all vector instructions.
3646     BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3647             AMDGPU::EXEC)
3648         .addImm(MI.getOperand(0).getImm());
3649     MI.eraseFromParent();
3650     return BB;
3651 
3652   case AMDGPU::SI_INIT_EXEC_LO:
3653     // This should be before all vector instructions.
3654     BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B32),
3655             AMDGPU::EXEC_LO)
3656         .addImm(MI.getOperand(0).getImm());
3657     MI.eraseFromParent();
3658     return BB;
3659 
3660   case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3661     // Extract the thread count from an SGPR input and set EXEC accordingly.
3662     // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3663     //
3664     // S_BFE_U32 count, input, {shift, 7}
3665     // S_BFM_B64 exec, count, 0
3666     // S_CMP_EQ_U32 count, 64
3667     // S_CMOV_B64 exec, -1
3668     MachineInstr *FirstMI = &*BB->begin();
3669     MachineRegisterInfo &MRI = MF->getRegInfo();
3670     Register InputReg = MI.getOperand(0).getReg();
3671     Register CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3672     bool Found = false;
3673 
3674     // Move the COPY of the input reg to the beginning, so that we can use it.
3675     for (auto I = BB->begin(); I != &MI; I++) {
3676       if (I->getOpcode() != TargetOpcode::COPY ||
3677           I->getOperand(0).getReg() != InputReg)
3678         continue;
3679 
3680       if (I == FirstMI) {
3681         FirstMI = &*++BB->begin();
3682       } else {
3683         I->removeFromParent();
3684         BB->insert(FirstMI, &*I);
3685       }
3686       Found = true;
3687       break;
3688     }
3689     assert(Found);
3690     (void)Found;
3691 
3692     // This should be before all vector instructions.
3693     unsigned Mask = (getSubtarget()->getWavefrontSize() << 1) - 1;
3694     bool isWave32 = getSubtarget()->isWave32();
3695     unsigned Exec = isWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3696     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3697         .addReg(InputReg)
3698         .addImm((MI.getOperand(1).getImm() & Mask) | 0x70000);
3699     BuildMI(*BB, FirstMI, DebugLoc(),
3700             TII->get(isWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64),
3701             Exec)
3702         .addReg(CountReg)
3703         .addImm(0);
3704     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3705         .addReg(CountReg, RegState::Kill)
3706         .addImm(getSubtarget()->getWavefrontSize());
3707     BuildMI(*BB, FirstMI, DebugLoc(),
3708             TII->get(isWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64),
3709             Exec)
3710         .addImm(-1);
3711     MI.eraseFromParent();
3712     return BB;
3713   }
3714 
3715   case AMDGPU::GET_GROUPSTATICSIZE: {
3716     assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||
3717            getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL);
3718     DebugLoc DL = MI.getDebugLoc();
3719     BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
3720         .add(MI.getOperand(0))
3721         .addImm(MFI->getLDSSize());
3722     MI.eraseFromParent();
3723     return BB;
3724   }
3725   case AMDGPU::SI_INDIRECT_SRC_V1:
3726   case AMDGPU::SI_INDIRECT_SRC_V2:
3727   case AMDGPU::SI_INDIRECT_SRC_V4:
3728   case AMDGPU::SI_INDIRECT_SRC_V8:
3729   case AMDGPU::SI_INDIRECT_SRC_V16:
3730     return emitIndirectSrc(MI, *BB, *getSubtarget());
3731   case AMDGPU::SI_INDIRECT_DST_V1:
3732   case AMDGPU::SI_INDIRECT_DST_V2:
3733   case AMDGPU::SI_INDIRECT_DST_V4:
3734   case AMDGPU::SI_INDIRECT_DST_V8:
3735   case AMDGPU::SI_INDIRECT_DST_V16:
3736     return emitIndirectDst(MI, *BB, *getSubtarget());
3737   case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3738   case AMDGPU::SI_KILL_I1_PSEUDO:
3739     return splitKillBlock(MI, BB);
3740   case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3741     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3742     const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3743     const SIRegisterInfo *TRI = ST.getRegisterInfo();
3744 
3745     Register Dst = MI.getOperand(0).getReg();
3746     Register Src0 = MI.getOperand(1).getReg();
3747     Register Src1 = MI.getOperand(2).getReg();
3748     const DebugLoc &DL = MI.getDebugLoc();
3749     Register SrcCond = MI.getOperand(3).getReg();
3750 
3751     Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3752     Register DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3753     const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
3754     Register SrcCondCopy = MRI.createVirtualRegister(CondRC);
3755 
3756     BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3757       .addReg(SrcCond);
3758     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3759       .addImm(0)
3760       .addReg(Src0, 0, AMDGPU::sub0)
3761       .addImm(0)
3762       .addReg(Src1, 0, AMDGPU::sub0)
3763       .addReg(SrcCondCopy);
3764     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3765       .addImm(0)
3766       .addReg(Src0, 0, AMDGPU::sub1)
3767       .addImm(0)
3768       .addReg(Src1, 0, AMDGPU::sub1)
3769       .addReg(SrcCondCopy);
3770 
3771     BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3772       .addReg(DstLo)
3773       .addImm(AMDGPU::sub0)
3774       .addReg(DstHi)
3775       .addImm(AMDGPU::sub1);
3776     MI.eraseFromParent();
3777     return BB;
3778   }
3779   case AMDGPU::SI_BR_UNDEF: {
3780     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3781     const DebugLoc &DL = MI.getDebugLoc();
3782     MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3783                            .add(MI.getOperand(0));
3784     Br->getOperand(1).setIsUndef(true); // read undef SCC
3785     MI.eraseFromParent();
3786     return BB;
3787   }
3788   case AMDGPU::ADJCALLSTACKUP:
3789   case AMDGPU::ADJCALLSTACKDOWN: {
3790     const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3791     MachineInstrBuilder MIB(*MF, &MI);
3792 
3793     // Add an implicit use of the frame offset reg to prevent the restore copy
3794     // inserted after the call from being reorderd after stack operations in the
3795     // the caller's frame.
3796     MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
3797         .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3798         .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
3799     return BB;
3800   }
3801   case AMDGPU::SI_CALL_ISEL: {
3802     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3803     const DebugLoc &DL = MI.getDebugLoc();
3804 
3805     unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
3806 
3807     MachineInstrBuilder MIB;
3808     MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
3809 
3810     for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
3811       MIB.add(MI.getOperand(I));
3812 
3813     MIB.cloneMemRefs(MI);
3814     MI.eraseFromParent();
3815     return BB;
3816   }
3817   case AMDGPU::V_ADD_I32_e32:
3818   case AMDGPU::V_SUB_I32_e32:
3819   case AMDGPU::V_SUBREV_I32_e32: {
3820     // TODO: Define distinct V_*_I32_Pseudo instructions instead.
3821     const DebugLoc &DL = MI.getDebugLoc();
3822     unsigned Opc = MI.getOpcode();
3823 
3824     bool NeedClampOperand = false;
3825     if (TII->pseudoToMCOpcode(Opc) == -1) {
3826       Opc = AMDGPU::getVOPe64(Opc);
3827       NeedClampOperand = true;
3828     }
3829 
3830     auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg());
3831     if (TII->isVOP3(*I)) {
3832       const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3833       const SIRegisterInfo *TRI = ST.getRegisterInfo();
3834       I.addReg(TRI->getVCC(), RegState::Define);
3835     }
3836     I.add(MI.getOperand(1))
3837      .add(MI.getOperand(2));
3838     if (NeedClampOperand)
3839       I.addImm(0); // clamp bit for e64 encoding
3840 
3841     TII->legalizeOperands(*I);
3842 
3843     MI.eraseFromParent();
3844     return BB;
3845   }
3846   case AMDGPU::DS_GWS_INIT:
3847   case AMDGPU::DS_GWS_SEMA_V:
3848   case AMDGPU::DS_GWS_SEMA_BR:
3849   case AMDGPU::DS_GWS_SEMA_P:
3850   case AMDGPU::DS_GWS_SEMA_RELEASE_ALL:
3851   case AMDGPU::DS_GWS_BARRIER:
3852     // A s_waitcnt 0 is required to be the instruction immediately following.
3853     if (getSubtarget()->hasGWSAutoReplay()) {
3854       bundleInstWithWaitcnt(MI);
3855       return BB;
3856     }
3857 
3858     return emitGWSMemViolTestLoop(MI, BB);
3859   default:
3860     return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
3861   }
3862 }
3863 
3864 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3865   return isTypeLegal(VT.getScalarType());
3866 }
3867 
3868 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3869   // This currently forces unfolding various combinations of fsub into fma with
3870   // free fneg'd operands. As long as we have fast FMA (controlled by
3871   // isFMAFasterThanFMulAndFAdd), we should perform these.
3872 
3873   // When fma is quarter rate, for f64 where add / sub are at best half rate,
3874   // most of these combines appear to be cycle neutral but save on instruction
3875   // count / code size.
3876   return true;
3877 }
3878 
3879 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3880                                          EVT VT) const {
3881   if (!VT.isVector()) {
3882     return MVT::i1;
3883   }
3884   return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
3885 }
3886 
3887 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3888   // TODO: Should i16 be used always if legal? For now it would force VALU
3889   // shifts.
3890   return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
3891 }
3892 
3893 // Answering this is somewhat tricky and depends on the specific device which
3894 // have different rates for fma or all f64 operations.
3895 //
3896 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3897 // regardless of which device (although the number of cycles differs between
3898 // devices), so it is always profitable for f64.
3899 //
3900 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3901 // only on full rate devices. Normally, we should prefer selecting v_mad_f32
3902 // which we can always do even without fused FP ops since it returns the same
3903 // result as the separate operations and since it is always full
3904 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3905 // however does not support denormals, so we do report fma as faster if we have
3906 // a fast fma device and require denormals.
3907 //
3908 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3909   VT = VT.getScalarType();
3910 
3911   switch (VT.getSimpleVT().SimpleTy) {
3912   case MVT::f32: {
3913     // This is as fast on some subtargets. However, we always have full rate f32
3914     // mad available which returns the same result as the separate operations
3915     // which we should prefer over fma. We can't use this if we want to support
3916     // denormals, so only report this in these cases.
3917     if (Subtarget->hasFP32Denormals())
3918       return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3919 
3920     // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3921     return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3922   }
3923   case MVT::f64:
3924     return true;
3925   case MVT::f16:
3926     return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
3927   default:
3928     break;
3929   }
3930 
3931   return false;
3932 }
3933 
3934 //===----------------------------------------------------------------------===//
3935 // Custom DAG Lowering Operations
3936 //===----------------------------------------------------------------------===//
3937 
3938 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3939 // wider vector type is legal.
3940 SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
3941                                              SelectionDAG &DAG) const {
3942   unsigned Opc = Op.getOpcode();
3943   EVT VT = Op.getValueType();
3944   assert(VT == MVT::v4f16);
3945 
3946   SDValue Lo, Hi;
3947   std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3948 
3949   SDLoc SL(Op);
3950   SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3951                              Op->getFlags());
3952   SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3953                              Op->getFlags());
3954 
3955   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3956 }
3957 
3958 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3959 // wider vector type is legal.
3960 SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
3961                                               SelectionDAG &DAG) const {
3962   unsigned Opc = Op.getOpcode();
3963   EVT VT = Op.getValueType();
3964   assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3965 
3966   SDValue Lo0, Hi0;
3967   std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3968   SDValue Lo1, Hi1;
3969   std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3970 
3971   SDLoc SL(Op);
3972 
3973   SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3974                              Op->getFlags());
3975   SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3976                              Op->getFlags());
3977 
3978   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3979 }
3980 
3981 SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op,
3982                                               SelectionDAG &DAG) const {
3983   unsigned Opc = Op.getOpcode();
3984   EVT VT = Op.getValueType();
3985   assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3986 
3987   SDValue Lo0, Hi0;
3988   std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3989   SDValue Lo1, Hi1;
3990   std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3991   SDValue Lo2, Hi2;
3992   std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2);
3993 
3994   SDLoc SL(Op);
3995 
3996   SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, Lo2,
3997                              Op->getFlags());
3998   SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, Hi2,
3999                              Op->getFlags());
4000 
4001   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
4002 }
4003 
4004 
4005 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
4006   switch (Op.getOpcode()) {
4007   default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
4008   case ISD::BRCOND: return LowerBRCOND(Op, DAG);
4009   case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
4010   case ISD::LOAD: {
4011     SDValue Result = LowerLOAD(Op, DAG);
4012     assert((!Result.getNode() ||
4013             Result.getNode()->getNumValues() == 2) &&
4014            "Load should return a value and a chain");
4015     return Result;
4016   }
4017 
4018   case ISD::FSIN:
4019   case ISD::FCOS:
4020     return LowerTrig(Op, DAG);
4021   case ISD::SELECT: return LowerSELECT(Op, DAG);
4022   case ISD::FDIV: return LowerFDIV(Op, DAG);
4023   case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
4024   case ISD::STORE: return LowerSTORE(Op, DAG);
4025   case ISD::GlobalAddress: {
4026     MachineFunction &MF = DAG.getMachineFunction();
4027     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
4028     return LowerGlobalAddress(MFI, Op, DAG);
4029   }
4030   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
4031   case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
4032   case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
4033   case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
4034   case ISD::INSERT_SUBVECTOR:
4035     return lowerINSERT_SUBVECTOR(Op, DAG);
4036   case ISD::INSERT_VECTOR_ELT:
4037     return lowerINSERT_VECTOR_ELT(Op, DAG);
4038   case ISD::EXTRACT_VECTOR_ELT:
4039     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
4040   case ISD::VECTOR_SHUFFLE:
4041     return lowerVECTOR_SHUFFLE(Op, DAG);
4042   case ISD::BUILD_VECTOR:
4043     return lowerBUILD_VECTOR(Op, DAG);
4044   case ISD::FP_ROUND:
4045     return lowerFP_ROUND(Op, DAG);
4046   case ISD::TRAP:
4047     return lowerTRAP(Op, DAG);
4048   case ISD::DEBUGTRAP:
4049     return lowerDEBUGTRAP(Op, DAG);
4050   case ISD::FABS:
4051   case ISD::FNEG:
4052   case ISD::FCANONICALIZE:
4053     return splitUnaryVectorOp(Op, DAG);
4054   case ISD::FMINNUM:
4055   case ISD::FMAXNUM:
4056     return lowerFMINNUM_FMAXNUM(Op, DAG);
4057   case ISD::FMA:
4058     return splitTernaryVectorOp(Op, DAG);
4059   case ISD::SHL:
4060   case ISD::SRA:
4061   case ISD::SRL:
4062   case ISD::ADD:
4063   case ISD::SUB:
4064   case ISD::MUL:
4065   case ISD::SMIN:
4066   case ISD::SMAX:
4067   case ISD::UMIN:
4068   case ISD::UMAX:
4069   case ISD::FADD:
4070   case ISD::FMUL:
4071   case ISD::FMINNUM_IEEE:
4072   case ISD::FMAXNUM_IEEE:
4073     return splitBinaryVectorOp(Op, DAG);
4074   }
4075   return SDValue();
4076 }
4077 
4078 static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
4079                                        const SDLoc &DL,
4080                                        SelectionDAG &DAG, bool Unpacked) {
4081   if (!LoadVT.isVector())
4082     return Result;
4083 
4084   if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
4085     // Truncate to v2i16/v4i16.
4086     EVT IntLoadVT = LoadVT.changeTypeToInteger();
4087 
4088     // Workaround legalizer not scalarizing truncate after vector op
4089     // legalization byt not creating intermediate vector trunc.
4090     SmallVector<SDValue, 4> Elts;
4091     DAG.ExtractVectorElements(Result, Elts);
4092     for (SDValue &Elt : Elts)
4093       Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
4094 
4095     Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
4096 
4097     // Bitcast to original type (v2f16/v4f16).
4098     return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
4099   }
4100 
4101   // Cast back to the original packed type.
4102   return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
4103 }
4104 
4105 SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
4106                                               MemSDNode *M,
4107                                               SelectionDAG &DAG,
4108                                               ArrayRef<SDValue> Ops,
4109                                               bool IsIntrinsic) const {
4110   SDLoc DL(M);
4111 
4112   bool Unpacked = Subtarget->hasUnpackedD16VMem();
4113   EVT LoadVT = M->getValueType(0);
4114 
4115   EVT EquivLoadVT = LoadVT;
4116   if (Unpacked && LoadVT.isVector()) {
4117     EquivLoadVT = LoadVT.isVector() ?
4118       EVT::getVectorVT(*DAG.getContext(), MVT::i32,
4119                        LoadVT.getVectorNumElements()) : LoadVT;
4120   }
4121 
4122   // Change from v4f16/v2f16 to EquivLoadVT.
4123   SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
4124 
4125   SDValue Load
4126     = DAG.getMemIntrinsicNode(
4127       IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
4128       VTList, Ops, M->getMemoryVT(),
4129       M->getMemOperand());
4130   if (!Unpacked) // Just adjusted the opcode.
4131     return Load;
4132 
4133   SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
4134 
4135   return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
4136 }
4137 
4138 SDValue SITargetLowering::lowerIntrinsicLoad(MemSDNode *M, bool IsFormat,
4139                                              SelectionDAG &DAG,
4140                                              ArrayRef<SDValue> Ops) const {
4141   SDLoc DL(M);
4142   EVT LoadVT = M->getValueType(0);
4143   EVT EltType = LoadVT.getScalarType();
4144   EVT IntVT = LoadVT.changeTypeToInteger();
4145 
4146   bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
4147 
4148   unsigned Opc =
4149       IsFormat ? AMDGPUISD::BUFFER_LOAD_FORMAT : AMDGPUISD::BUFFER_LOAD;
4150 
4151   if (IsD16) {
4152     return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops);
4153   }
4154 
4155   // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
4156   if (!IsD16 && !LoadVT.isVector() && EltType.getSizeInBits() < 32)
4157     return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
4158 
4159   if (isTypeLegal(LoadVT)) {
4160     return getMemIntrinsicNode(Opc, DL, M->getVTList(), Ops, IntVT,
4161                                M->getMemOperand(), DAG);
4162   }
4163 
4164   EVT CastVT = getEquivalentMemType(*DAG.getContext(), LoadVT);
4165   SDVTList VTList = DAG.getVTList(CastVT, MVT::Other);
4166   SDValue MemNode = getMemIntrinsicNode(Opc, DL, VTList, Ops, CastVT,
4167                                         M->getMemOperand(), DAG);
4168   return DAG.getMergeValues(
4169       {DAG.getNode(ISD::BITCAST, DL, LoadVT, MemNode), MemNode.getValue(1)},
4170       DL);
4171 }
4172 
4173 static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI,
4174                                   SDNode *N, SelectionDAG &DAG) {
4175   EVT VT = N->getValueType(0);
4176   const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
4177   int CondCode = CD->getSExtValue();
4178   if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
4179       CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
4180     return DAG.getUNDEF(VT);
4181 
4182   ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
4183 
4184   SDValue LHS = N->getOperand(1);
4185   SDValue RHS = N->getOperand(2);
4186 
4187   SDLoc DL(N);
4188 
4189   EVT CmpVT = LHS.getValueType();
4190   if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
4191     unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
4192       ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4193     LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
4194     RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
4195   }
4196 
4197   ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
4198 
4199   unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
4200   EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
4201 
4202   SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS,
4203                               DAG.getCondCode(CCOpcode));
4204   if (VT.bitsEq(CCVT))
4205     return SetCC;
4206   return DAG.getZExtOrTrunc(SetCC, DL, VT);
4207 }
4208 
4209 static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
4210                                   SDNode *N, SelectionDAG &DAG) {
4211   EVT VT = N->getValueType(0);
4212   const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
4213 
4214   int CondCode = CD->getSExtValue();
4215   if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
4216       CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) {
4217     return DAG.getUNDEF(VT);
4218   }
4219 
4220   SDValue Src0 = N->getOperand(1);
4221   SDValue Src1 = N->getOperand(2);
4222   EVT CmpVT = Src0.getValueType();
4223   SDLoc SL(N);
4224 
4225   if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
4226     Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
4227     Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
4228   }
4229 
4230   FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
4231   ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
4232   unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
4233   EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
4234   SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0,
4235                               Src1, DAG.getCondCode(CCOpcode));
4236   if (VT.bitsEq(CCVT))
4237     return SetCC;
4238   return DAG.getZExtOrTrunc(SetCC, SL, VT);
4239 }
4240 
4241 void SITargetLowering::ReplaceNodeResults(SDNode *N,
4242                                           SmallVectorImpl<SDValue> &Results,
4243                                           SelectionDAG &DAG) const {
4244   switch (N->getOpcode()) {
4245   case ISD::INSERT_VECTOR_ELT: {
4246     if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
4247       Results.push_back(Res);
4248     return;
4249   }
4250   case ISD::EXTRACT_VECTOR_ELT: {
4251     if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
4252       Results.push_back(Res);
4253     return;
4254   }
4255   case ISD::INTRINSIC_WO_CHAIN: {
4256     unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
4257     switch (IID) {
4258     case Intrinsic::amdgcn_cvt_pkrtz: {
4259       SDValue Src0 = N->getOperand(1);
4260       SDValue Src1 = N->getOperand(2);
4261       SDLoc SL(N);
4262       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
4263                                 Src0, Src1);
4264       Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
4265       return;
4266     }
4267     case Intrinsic::amdgcn_cvt_pknorm_i16:
4268     case Intrinsic::amdgcn_cvt_pknorm_u16:
4269     case Intrinsic::amdgcn_cvt_pk_i16:
4270     case Intrinsic::amdgcn_cvt_pk_u16: {
4271       SDValue Src0 = N->getOperand(1);
4272       SDValue Src1 = N->getOperand(2);
4273       SDLoc SL(N);
4274       unsigned Opcode;
4275 
4276       if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
4277         Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
4278       else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
4279         Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
4280       else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
4281         Opcode = AMDGPUISD::CVT_PK_I16_I32;
4282       else
4283         Opcode = AMDGPUISD::CVT_PK_U16_U32;
4284 
4285       EVT VT = N->getValueType(0);
4286       if (isTypeLegal(VT))
4287         Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
4288       else {
4289         SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
4290         Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
4291       }
4292       return;
4293     }
4294     }
4295     break;
4296   }
4297   case ISD::INTRINSIC_W_CHAIN: {
4298     if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
4299       if (Res.getOpcode() == ISD::MERGE_VALUES) {
4300         // FIXME: Hacky
4301         Results.push_back(Res.getOperand(0));
4302         Results.push_back(Res.getOperand(1));
4303       } else {
4304         Results.push_back(Res);
4305         Results.push_back(Res.getValue(1));
4306       }
4307       return;
4308     }
4309 
4310     break;
4311   }
4312   case ISD::SELECT: {
4313     SDLoc SL(N);
4314     EVT VT = N->getValueType(0);
4315     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
4316     SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
4317     SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
4318 
4319     EVT SelectVT = NewVT;
4320     if (NewVT.bitsLT(MVT::i32)) {
4321       LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
4322       RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
4323       SelectVT = MVT::i32;
4324     }
4325 
4326     SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
4327                                     N->getOperand(0), LHS, RHS);
4328 
4329     if (NewVT != SelectVT)
4330       NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
4331     Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
4332     return;
4333   }
4334   case ISD::FNEG: {
4335     if (N->getValueType(0) != MVT::v2f16)
4336       break;
4337 
4338     SDLoc SL(N);
4339     SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4340 
4341     SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
4342                              BC,
4343                              DAG.getConstant(0x80008000, SL, MVT::i32));
4344     Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4345     return;
4346   }
4347   case ISD::FABS: {
4348     if (N->getValueType(0) != MVT::v2f16)
4349       break;
4350 
4351     SDLoc SL(N);
4352     SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4353 
4354     SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
4355                              BC,
4356                              DAG.getConstant(0x7fff7fff, SL, MVT::i32));
4357     Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4358     return;
4359   }
4360   default:
4361     break;
4362   }
4363 }
4364 
4365 /// Helper function for LowerBRCOND
4366 static SDNode *findUser(SDValue Value, unsigned Opcode) {
4367 
4368   SDNode *Parent = Value.getNode();
4369   for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
4370        I != E; ++I) {
4371 
4372     if (I.getUse().get() != Value)
4373       continue;
4374 
4375     if (I->getOpcode() == Opcode)
4376       return *I;
4377   }
4378   return nullptr;
4379 }
4380 
4381 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
4382   if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
4383     switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
4384     case Intrinsic::amdgcn_if:
4385       return AMDGPUISD::IF;
4386     case Intrinsic::amdgcn_else:
4387       return AMDGPUISD::ELSE;
4388     case Intrinsic::amdgcn_loop:
4389       return AMDGPUISD::LOOP;
4390     case Intrinsic::amdgcn_end_cf:
4391       llvm_unreachable("should not occur");
4392     default:
4393       return 0;
4394     }
4395   }
4396 
4397   // break, if_break, else_break are all only used as inputs to loop, not
4398   // directly as branch conditions.
4399   return 0;
4400 }
4401 
4402 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
4403   const Triple &TT = getTargetMachine().getTargetTriple();
4404   return (GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4405           GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
4406          AMDGPU::shouldEmitConstantsToTextSection(TT);
4407 }
4408 
4409 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
4410   // FIXME: Either avoid relying on address space here or change the default
4411   // address space for functions to avoid the explicit check.
4412   return (GV->getValueType()->isFunctionTy() ||
4413           GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
4414           GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4415           GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
4416          !shouldEmitFixup(GV) &&
4417          !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
4418 }
4419 
4420 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
4421   return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
4422 }
4423 
4424 /// This transforms the control flow intrinsics to get the branch destination as
4425 /// last parameter, also switches branch target with BR if the need arise
4426 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
4427                                       SelectionDAG &DAG) const {
4428   SDLoc DL(BRCOND);
4429 
4430   SDNode *Intr = BRCOND.getOperand(1).getNode();
4431   SDValue Target = BRCOND.getOperand(2);
4432   SDNode *BR = nullptr;
4433   SDNode *SetCC = nullptr;
4434 
4435   if (Intr->getOpcode() == ISD::SETCC) {
4436     // As long as we negate the condition everything is fine
4437     SetCC = Intr;
4438     Intr = SetCC->getOperand(0).getNode();
4439 
4440   } else {
4441     // Get the target from BR if we don't negate the condition
4442     BR = findUser(BRCOND, ISD::BR);
4443     Target = BR->getOperand(1);
4444   }
4445 
4446   // FIXME: This changes the types of the intrinsics instead of introducing new
4447   // nodes with the correct types.
4448   // e.g. llvm.amdgcn.loop
4449 
4450   // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
4451   // =>     t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
4452 
4453   unsigned CFNode = isCFIntrinsic(Intr);
4454   if (CFNode == 0) {
4455     // This is a uniform branch so we don't need to legalize.
4456     return BRCOND;
4457   }
4458 
4459   bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
4460                    Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
4461 
4462   assert(!SetCC ||
4463         (SetCC->getConstantOperandVal(1) == 1 &&
4464          cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
4465                                                              ISD::SETNE));
4466 
4467   // operands of the new intrinsic call
4468   SmallVector<SDValue, 4> Ops;
4469   if (HaveChain)
4470     Ops.push_back(BRCOND.getOperand(0));
4471 
4472   Ops.append(Intr->op_begin() + (HaveChain ?  2 : 1), Intr->op_end());
4473   Ops.push_back(Target);
4474 
4475   ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
4476 
4477   // build the new intrinsic call
4478   SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
4479 
4480   if (!HaveChain) {
4481     SDValue Ops[] =  {
4482       SDValue(Result, 0),
4483       BRCOND.getOperand(0)
4484     };
4485 
4486     Result = DAG.getMergeValues(Ops, DL).getNode();
4487   }
4488 
4489   if (BR) {
4490     // Give the branch instruction our target
4491     SDValue Ops[] = {
4492       BR->getOperand(0),
4493       BRCOND.getOperand(2)
4494     };
4495     SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
4496     DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
4497     BR = NewBR.getNode();
4498   }
4499 
4500   SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
4501 
4502   // Copy the intrinsic results to registers
4503   for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
4504     SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
4505     if (!CopyToReg)
4506       continue;
4507 
4508     Chain = DAG.getCopyToReg(
4509       Chain, DL,
4510       CopyToReg->getOperand(1),
4511       SDValue(Result, i - 1),
4512       SDValue());
4513 
4514     DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
4515   }
4516 
4517   // Remove the old intrinsic from the chain
4518   DAG.ReplaceAllUsesOfValueWith(
4519     SDValue(Intr, Intr->getNumValues() - 1),
4520     Intr->getOperand(0));
4521 
4522   return Chain;
4523 }
4524 
4525 SDValue SITargetLowering::LowerRETURNADDR(SDValue Op,
4526                                           SelectionDAG &DAG) const {
4527   MVT VT = Op.getSimpleValueType();
4528   SDLoc DL(Op);
4529   // Checking the depth
4530   if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0)
4531     return DAG.getConstant(0, DL, VT);
4532 
4533   MachineFunction &MF = DAG.getMachineFunction();
4534   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4535   // Check for kernel and shader functions
4536   if (Info->isEntryFunction())
4537     return DAG.getConstant(0, DL, VT);
4538 
4539   MachineFrameInfo &MFI = MF.getFrameInfo();
4540   // There is a call to @llvm.returnaddress in this function
4541   MFI.setReturnAddressIsTaken(true);
4542 
4543   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
4544   // Get the return address reg and mark it as an implicit live-in
4545   unsigned Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent()));
4546 
4547   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
4548 }
4549 
4550 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
4551                                             SDValue Op,
4552                                             const SDLoc &DL,
4553                                             EVT VT) const {
4554   return Op.getValueType().bitsLE(VT) ?
4555       DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
4556       DAG.getNode(ISD::FTRUNC, DL, VT, Op);
4557 }
4558 
4559 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
4560   assert(Op.getValueType() == MVT::f16 &&
4561          "Do not know how to custom lower FP_ROUND for non-f16 type");
4562 
4563   SDValue Src = Op.getOperand(0);
4564   EVT SrcVT = Src.getValueType();
4565   if (SrcVT != MVT::f64)
4566     return Op;
4567 
4568   SDLoc DL(Op);
4569 
4570   SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
4571   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
4572   return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
4573 }
4574 
4575 SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
4576                                                SelectionDAG &DAG) const {
4577   EVT VT = Op.getValueType();
4578   const MachineFunction &MF = DAG.getMachineFunction();
4579   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4580   bool IsIEEEMode = Info->getMode().IEEE;
4581 
4582   // FIXME: Assert during eslection that this is only selected for
4583   // ieee_mode. Currently a combine can produce the ieee version for non-ieee
4584   // mode functions, but this happens to be OK since it's only done in cases
4585   // where there is known no sNaN.
4586   if (IsIEEEMode)
4587     return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
4588 
4589   if (VT == MVT::v4f16)
4590     return splitBinaryVectorOp(Op, DAG);
4591   return Op;
4592 }
4593 
4594 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
4595   SDLoc SL(Op);
4596   SDValue Chain = Op.getOperand(0);
4597 
4598   if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4599       !Subtarget->isTrapHandlerEnabled())
4600     return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
4601 
4602   MachineFunction &MF = DAG.getMachineFunction();
4603   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4604   unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4605   assert(UserSGPR != AMDGPU::NoRegister);
4606   SDValue QueuePtr = CreateLiveInRegister(
4607     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4608   SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4609   SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4610                                    QueuePtr, SDValue());
4611   SDValue Ops[] = {
4612     ToReg,
4613     DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16),
4614     SGPR01,
4615     ToReg.getValue(1)
4616   };
4617   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4618 }
4619 
4620 SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4621   SDLoc SL(Op);
4622   SDValue Chain = Op.getOperand(0);
4623   MachineFunction &MF = DAG.getMachineFunction();
4624 
4625   if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4626       !Subtarget->isTrapHandlerEnabled()) {
4627     DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
4628                                      "debugtrap handler not supported",
4629                                      Op.getDebugLoc(),
4630                                      DS_Warning);
4631     LLVMContext &Ctx = MF.getFunction().getContext();
4632     Ctx.diagnose(NoTrap);
4633     return Chain;
4634   }
4635 
4636   SDValue Ops[] = {
4637     Chain,
4638     DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16)
4639   };
4640   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4641 }
4642 
4643 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
4644                                              SelectionDAG &DAG) const {
4645   // FIXME: Use inline constants (src_{shared, private}_base) instead.
4646   if (Subtarget->hasApertureRegs()) {
4647     unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
4648         AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
4649         AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
4650     unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
4651         AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
4652         AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
4653     unsigned Encoding =
4654         AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
4655         Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4656         WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
4657 
4658     SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4659     SDValue ApertureReg = SDValue(
4660         DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4661     SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4662     return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
4663   }
4664 
4665   MachineFunction &MF = DAG.getMachineFunction();
4666   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4667   unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4668   assert(UserSGPR != AMDGPU::NoRegister);
4669 
4670   SDValue QueuePtr = CreateLiveInRegister(
4671     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4672 
4673   // Offset into amd_queue_t for group_segment_aperture_base_hi /
4674   // private_segment_aperture_base_hi.
4675   uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
4676 
4677   SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
4678 
4679   // TODO: Use custom target PseudoSourceValue.
4680   // TODO: We should use the value from the IR intrinsic call, but it might not
4681   // be available and how do we get it?
4682   Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
4683                                               AMDGPUAS::CONSTANT_ADDRESS));
4684 
4685   MachinePointerInfo PtrInfo(V, StructOffset);
4686   return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
4687                      MinAlign(64, StructOffset),
4688                      MachineMemOperand::MODereferenceable |
4689                          MachineMemOperand::MOInvariant);
4690 }
4691 
4692 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4693                                              SelectionDAG &DAG) const {
4694   SDLoc SL(Op);
4695   const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4696 
4697   SDValue Src = ASC->getOperand(0);
4698   SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4699 
4700   const AMDGPUTargetMachine &TM =
4701     static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4702 
4703   // flat -> local/private
4704   if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
4705     unsigned DestAS = ASC->getDestAddressSpace();
4706 
4707     if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
4708         DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
4709       unsigned NullVal = TM.getNullPointerValue(DestAS);
4710       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4711       SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4712       SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4713 
4714       return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4715                          NonNull, Ptr, SegmentNullPtr);
4716     }
4717   }
4718 
4719   // local/private -> flat
4720   if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
4721     unsigned SrcAS = ASC->getSrcAddressSpace();
4722 
4723     if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
4724         SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
4725       unsigned NullVal = TM.getNullPointerValue(SrcAS);
4726       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4727 
4728       SDValue NonNull
4729         = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4730 
4731       SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
4732       SDValue CvtPtr
4733         = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4734 
4735       return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4736                          DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4737                          FlatNullPtr);
4738     }
4739   }
4740 
4741   // global <-> flat are no-ops and never emitted.
4742 
4743   const MachineFunction &MF = DAG.getMachineFunction();
4744   DiagnosticInfoUnsupported InvalidAddrSpaceCast(
4745     MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
4746   DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4747 
4748   return DAG.getUNDEF(ASC->getValueType(0));
4749 }
4750 
4751 // This lowers an INSERT_SUBVECTOR by extracting the individual elements from
4752 // the small vector and inserting them into the big vector. That is better than
4753 // the default expansion of doing it via a stack slot. Even though the use of
4754 // the stack slot would be optimized away afterwards, the stack slot itself
4755 // remains.
4756 SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
4757                                                 SelectionDAG &DAG) const {
4758   SDValue Vec = Op.getOperand(0);
4759   SDValue Ins = Op.getOperand(1);
4760   SDValue Idx = Op.getOperand(2);
4761   EVT VecVT = Vec.getValueType();
4762   EVT InsVT = Ins.getValueType();
4763   EVT EltVT = VecVT.getVectorElementType();
4764   unsigned InsNumElts = InsVT.getVectorNumElements();
4765   unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
4766   SDLoc SL(Op);
4767 
4768   for (unsigned I = 0; I != InsNumElts; ++I) {
4769     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins,
4770                               DAG.getConstant(I, SL, MVT::i32));
4771     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt,
4772                       DAG.getConstant(IdxVal + I, SL, MVT::i32));
4773   }
4774   return Vec;
4775 }
4776 
4777 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4778                                                  SelectionDAG &DAG) const {
4779   SDValue Vec = Op.getOperand(0);
4780   SDValue InsVal = Op.getOperand(1);
4781   SDValue Idx = Op.getOperand(2);
4782   EVT VecVT = Vec.getValueType();
4783   EVT EltVT = VecVT.getVectorElementType();
4784   unsigned VecSize = VecVT.getSizeInBits();
4785   unsigned EltSize = EltVT.getSizeInBits();
4786 
4787 
4788   assert(VecSize <= 64);
4789 
4790   unsigned NumElts = VecVT.getVectorNumElements();
4791   SDLoc SL(Op);
4792   auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4793 
4794   if (NumElts == 4 && EltSize == 16 && KIdx) {
4795     SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4796 
4797     SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4798                                  DAG.getConstant(0, SL, MVT::i32));
4799     SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4800                                  DAG.getConstant(1, SL, MVT::i32));
4801 
4802     SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4803     SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4804 
4805     unsigned Idx = KIdx->getZExtValue();
4806     bool InsertLo = Idx < 2;
4807     SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4808       InsertLo ? LoVec : HiVec,
4809       DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4810       DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4811 
4812     InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4813 
4814     SDValue Concat = InsertLo ?
4815       DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4816       DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4817 
4818     return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4819   }
4820 
4821   if (isa<ConstantSDNode>(Idx))
4822     return SDValue();
4823 
4824   MVT IntVT = MVT::getIntegerVT(VecSize);
4825 
4826   // Avoid stack access for dynamic indexing.
4827   // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
4828 
4829   // Create a congruent vector with the target value in each element so that
4830   // the required element can be masked and ORed into the target vector.
4831   SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT,
4832                                DAG.getSplatBuildVector(VecVT, SL, InsVal));
4833 
4834   assert(isPowerOf2_32(EltSize));
4835   SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4836 
4837   // Convert vector index to bit-index.
4838   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
4839 
4840   SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4841   SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
4842                             DAG.getConstant(0xffff, SL, IntVT),
4843                             ScaledIdx);
4844 
4845   SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
4846   SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
4847                             DAG.getNOT(SL, BFM, IntVT), BCVec);
4848 
4849   SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
4850   return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
4851 }
4852 
4853 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4854                                                   SelectionDAG &DAG) const {
4855   SDLoc SL(Op);
4856 
4857   EVT ResultVT = Op.getValueType();
4858   SDValue Vec = Op.getOperand(0);
4859   SDValue Idx = Op.getOperand(1);
4860   EVT VecVT = Vec.getValueType();
4861   unsigned VecSize = VecVT.getSizeInBits();
4862   EVT EltVT = VecVT.getVectorElementType();
4863   assert(VecSize <= 64);
4864 
4865   DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4866 
4867   // Make sure we do any optimizations that will make it easier to fold
4868   // source modifiers before obscuring it with bit operations.
4869 
4870   // XXX - Why doesn't this get called when vector_shuffle is expanded?
4871   if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4872     return Combined;
4873 
4874   unsigned EltSize = EltVT.getSizeInBits();
4875   assert(isPowerOf2_32(EltSize));
4876 
4877   MVT IntVT = MVT::getIntegerVT(VecSize);
4878   SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4879 
4880   // Convert vector index to bit-index (* EltSize)
4881   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
4882 
4883   SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4884   SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
4885 
4886   if (ResultVT == MVT::f16) {
4887     SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
4888     return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4889   }
4890 
4891   return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
4892 }
4893 
4894 static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) {
4895   assert(Elt % 2 == 0);
4896   return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0);
4897 }
4898 
4899 SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
4900                                               SelectionDAG &DAG) const {
4901   SDLoc SL(Op);
4902   EVT ResultVT = Op.getValueType();
4903   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
4904 
4905   EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16;
4906   EVT EltVT = PackVT.getVectorElementType();
4907   int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements();
4908 
4909   // vector_shuffle <0,1,6,7> lhs, rhs
4910   // -> concat_vectors (extract_subvector lhs, 0), (extract_subvector rhs, 2)
4911   //
4912   // vector_shuffle <6,7,2,3> lhs, rhs
4913   // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 2)
4914   //
4915   // vector_shuffle <6,7,0,1> lhs, rhs
4916   // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 0)
4917 
4918   // Avoid scalarizing when both halves are reading from consecutive elements.
4919   SmallVector<SDValue, 4> Pieces;
4920   for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) {
4921     if (elementPairIsContiguous(SVN->getMask(), I)) {
4922       const int Idx = SVN->getMaskElt(I);
4923       int VecIdx = Idx < SrcNumElts ? 0 : 1;
4924       int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts;
4925       SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL,
4926                                     PackVT, SVN->getOperand(VecIdx),
4927                                     DAG.getConstant(EltIdx, SL, MVT::i32));
4928       Pieces.push_back(SubVec);
4929     } else {
4930       const int Idx0 = SVN->getMaskElt(I);
4931       const int Idx1 = SVN->getMaskElt(I + 1);
4932       int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1;
4933       int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1;
4934       int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts;
4935       int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts;
4936 
4937       SDValue Vec0 = SVN->getOperand(VecIdx0);
4938       SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
4939                                  Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32));
4940 
4941       SDValue Vec1 = SVN->getOperand(VecIdx1);
4942       SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
4943                                  Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32));
4944       Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 }));
4945     }
4946   }
4947 
4948   return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces);
4949 }
4950 
4951 SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
4952                                             SelectionDAG &DAG) const {
4953   SDLoc SL(Op);
4954   EVT VT = Op.getValueType();
4955 
4956   if (VT == MVT::v4i16 || VT == MVT::v4f16) {
4957     EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2);
4958 
4959     // Turn into pair of packed build_vectors.
4960     // TODO: Special case for constants that can be materialized with s_mov_b64.
4961     SDValue Lo = DAG.getBuildVector(HalfVT, SL,
4962                                     { Op.getOperand(0), Op.getOperand(1) });
4963     SDValue Hi = DAG.getBuildVector(HalfVT, SL,
4964                                     { Op.getOperand(2), Op.getOperand(3) });
4965 
4966     SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo);
4967     SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi);
4968 
4969     SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi });
4970     return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
4971   }
4972 
4973   assert(VT == MVT::v2f16 || VT == MVT::v2i16);
4974   assert(!Subtarget->hasVOP3PInsts() && "this should be legal");
4975 
4976   SDValue Lo = Op.getOperand(0);
4977   SDValue Hi = Op.getOperand(1);
4978 
4979   // Avoid adding defined bits with the zero_extend.
4980   if (Hi.isUndef()) {
4981     Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4982     SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo);
4983     return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo);
4984   }
4985 
4986   Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
4987   Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
4988 
4989   SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
4990                               DAG.getConstant(16, SL, MVT::i32));
4991   if (Lo.isUndef())
4992     return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi);
4993 
4994   Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4995   Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
4996 
4997   SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
4998   return DAG.getNode(ISD::BITCAST, SL, VT, Or);
4999 }
5000 
5001 bool
5002 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
5003   // We can fold offsets for anything that doesn't require a GOT relocation.
5004   return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
5005           GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
5006           GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
5007          !shouldEmitGOTReloc(GA->getGlobal());
5008 }
5009 
5010 static SDValue
5011 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
5012                         const SDLoc &DL, unsigned Offset, EVT PtrVT,
5013                         unsigned GAFlags = SIInstrInfo::MO_NONE) {
5014   // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
5015   // lowered to the following code sequence:
5016   //
5017   // For constant address space:
5018   //   s_getpc_b64 s[0:1]
5019   //   s_add_u32 s0, s0, $symbol
5020   //   s_addc_u32 s1, s1, 0
5021   //
5022   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
5023   //   a fixup or relocation is emitted to replace $symbol with a literal
5024   //   constant, which is a pc-relative offset from the encoding of the $symbol
5025   //   operand to the global variable.
5026   //
5027   // For global address space:
5028   //   s_getpc_b64 s[0:1]
5029   //   s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
5030   //   s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
5031   //
5032   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
5033   //   fixups or relocations are emitted to replace $symbol@*@lo and
5034   //   $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
5035   //   which is a 64-bit pc-relative offset from the encoding of the $symbol
5036   //   operand to the global variable.
5037   //
5038   // What we want here is an offset from the value returned by s_getpc
5039   // (which is the address of the s_add_u32 instruction) to the global
5040   // variable, but since the encoding of $symbol starts 4 bytes after the start
5041   // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
5042   // small. This requires us to add 4 to the global variable offset in order to
5043   // compute the correct address.
5044   SDValue PtrLo =
5045       DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags);
5046   SDValue PtrHi;
5047   if (GAFlags == SIInstrInfo::MO_NONE) {
5048     PtrHi = DAG.getTargetConstant(0, DL, MVT::i32);
5049   } else {
5050     PtrHi =
5051         DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags + 1);
5052   }
5053   return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
5054 }
5055 
5056 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
5057                                              SDValue Op,
5058                                              SelectionDAG &DAG) const {
5059   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
5060   const GlobalValue *GV = GSD->getGlobal();
5061   if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
5062        (!GV->hasExternalLinkage() ||
5063         getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||
5064         getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL)) ||
5065       GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS ||
5066       GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS)
5067     return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
5068 
5069   SDLoc DL(GSD);
5070   EVT PtrVT = Op.getValueType();
5071 
5072   if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
5073     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(),
5074                                             SIInstrInfo::MO_ABS32_LO);
5075     return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA);
5076   }
5077 
5078   if (shouldEmitFixup(GV))
5079     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
5080   else if (shouldEmitPCReloc(GV))
5081     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
5082                                    SIInstrInfo::MO_REL32);
5083 
5084   SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
5085                                             SIInstrInfo::MO_GOTPCREL32);
5086 
5087   Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
5088   PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
5089   const DataLayout &DataLayout = DAG.getDataLayout();
5090   unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
5091   MachinePointerInfo PtrInfo
5092     = MachinePointerInfo::getGOT(DAG.getMachineFunction());
5093 
5094   return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
5095                      MachineMemOperand::MODereferenceable |
5096                          MachineMemOperand::MOInvariant);
5097 }
5098 
5099 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
5100                                    const SDLoc &DL, SDValue V) const {
5101   // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
5102   // the destination register.
5103   //
5104   // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
5105   // so we will end up with redundant moves to m0.
5106   //
5107   // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
5108 
5109   // A Null SDValue creates a glue result.
5110   SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
5111                                   V, Chain);
5112   return SDValue(M0, 0);
5113 }
5114 
5115 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
5116                                                  SDValue Op,
5117                                                  MVT VT,
5118                                                  unsigned Offset) const {
5119   SDLoc SL(Op);
5120   SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
5121                                            DAG.getEntryNode(), Offset, 4, false);
5122   // The local size values will have the hi 16-bits as zero.
5123   return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
5124                      DAG.getValueType(VT));
5125 }
5126 
5127 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
5128                                         EVT VT) {
5129   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
5130                                       "non-hsa intrinsic with hsa target",
5131                                       DL.getDebugLoc());
5132   DAG.getContext()->diagnose(BadIntrin);
5133   return DAG.getUNDEF(VT);
5134 }
5135 
5136 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
5137                                          EVT VT) {
5138   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
5139                                       "intrinsic not supported on subtarget",
5140                                       DL.getDebugLoc());
5141   DAG.getContext()->diagnose(BadIntrin);
5142   return DAG.getUNDEF(VT);
5143 }
5144 
5145 static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
5146                                     ArrayRef<SDValue> Elts) {
5147   assert(!Elts.empty());
5148   MVT Type;
5149   unsigned NumElts;
5150 
5151   if (Elts.size() == 1) {
5152     Type = MVT::f32;
5153     NumElts = 1;
5154   } else if (Elts.size() == 2) {
5155     Type = MVT::v2f32;
5156     NumElts = 2;
5157   } else if (Elts.size() <= 4) {
5158     Type = MVT::v4f32;
5159     NumElts = 4;
5160   } else if (Elts.size() <= 8) {
5161     Type = MVT::v8f32;
5162     NumElts = 8;
5163   } else {
5164     assert(Elts.size() <= 16);
5165     Type = MVT::v16f32;
5166     NumElts = 16;
5167   }
5168 
5169   SmallVector<SDValue, 16> VecElts(NumElts);
5170   for (unsigned i = 0; i < Elts.size(); ++i) {
5171     SDValue Elt = Elts[i];
5172     if (Elt.getValueType() != MVT::f32)
5173       Elt = DAG.getBitcast(MVT::f32, Elt);
5174     VecElts[i] = Elt;
5175   }
5176   for (unsigned i = Elts.size(); i < NumElts; ++i)
5177     VecElts[i] = DAG.getUNDEF(MVT::f32);
5178 
5179   if (NumElts == 1)
5180     return VecElts[0];
5181   return DAG.getBuildVector(Type, DL, VecElts);
5182 }
5183 
5184 static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG,
5185                              SDValue *GLC, SDValue *SLC, SDValue *DLC) {
5186   auto CachePolicyConst = cast<ConstantSDNode>(CachePolicy.getNode());
5187 
5188   uint64_t Value = CachePolicyConst->getZExtValue();
5189   SDLoc DL(CachePolicy);
5190   if (GLC) {
5191     *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
5192     Value &= ~(uint64_t)0x1;
5193   }
5194   if (SLC) {
5195     *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
5196     Value &= ~(uint64_t)0x2;
5197   }
5198   if (DLC) {
5199     *DLC = DAG.getTargetConstant((Value & 0x4) ? 1 : 0, DL, MVT::i32);
5200     Value &= ~(uint64_t)0x4;
5201   }
5202 
5203   return Value == 0;
5204 }
5205 
5206 // Re-construct the required return value for a image load intrinsic.
5207 // This is more complicated due to the optional use TexFailCtrl which means the required
5208 // return type is an aggregate
5209 static SDValue constructRetValue(SelectionDAG &DAG,
5210                                  MachineSDNode *Result,
5211                                  ArrayRef<EVT> ResultTypes,
5212                                  bool IsTexFail, bool Unpacked, bool IsD16,
5213                                  int DMaskPop, int NumVDataDwords,
5214                                  const SDLoc &DL, LLVMContext &Context) {
5215   // Determine the required return type. This is the same regardless of IsTexFail flag
5216   EVT ReqRetVT = ResultTypes[0];
5217   EVT ReqRetEltVT = ReqRetVT.isVector() ? ReqRetVT.getVectorElementType() : ReqRetVT;
5218   int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1;
5219   EVT AdjEltVT = Unpacked && IsD16 ? MVT::i32 : ReqRetEltVT;
5220   EVT AdjVT = Unpacked ? ReqRetNumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, ReqRetNumElts)
5221                                            : AdjEltVT
5222                        : ReqRetVT;
5223 
5224   // Extract data part of the result
5225   // Bitcast the result to the same type as the required return type
5226   int NumElts;
5227   if (IsD16 && !Unpacked)
5228     NumElts = NumVDataDwords << 1;
5229   else
5230     NumElts = NumVDataDwords;
5231 
5232   EVT CastVT = NumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, NumElts)
5233                            : AdjEltVT;
5234 
5235   // Special case for v6f16. Rather than add support for this, use v3i32 to
5236   // extract the data elements
5237   bool V6F16Special = false;
5238   if (NumElts == 6) {
5239     CastVT = EVT::getVectorVT(Context, MVT::i32, NumElts / 2);
5240     DMaskPop >>= 1;
5241     ReqRetNumElts >>= 1;
5242     V6F16Special = true;
5243     AdjVT = MVT::v2i32;
5244   }
5245 
5246   SDValue N = SDValue(Result, 0);
5247   SDValue CastRes = DAG.getNode(ISD::BITCAST, DL, CastVT, N);
5248 
5249   // Iterate over the result
5250   SmallVector<SDValue, 4> BVElts;
5251 
5252   if (CastVT.isVector()) {
5253     DAG.ExtractVectorElements(CastRes, BVElts, 0, DMaskPop);
5254   } else {
5255     BVElts.push_back(CastRes);
5256   }
5257   int ExtraElts = ReqRetNumElts - DMaskPop;
5258   while(ExtraElts--)
5259     BVElts.push_back(DAG.getUNDEF(AdjEltVT));
5260 
5261   SDValue PreTFCRes;
5262   if (ReqRetNumElts > 1) {
5263     SDValue NewVec = DAG.getBuildVector(AdjVT, DL, BVElts);
5264     if (IsD16 && Unpacked)
5265       PreTFCRes = adjustLoadValueTypeImpl(NewVec, ReqRetVT, DL, DAG, Unpacked);
5266     else
5267       PreTFCRes = NewVec;
5268   } else {
5269     PreTFCRes = BVElts[0];
5270   }
5271 
5272   if (V6F16Special)
5273     PreTFCRes = DAG.getNode(ISD::BITCAST, DL, MVT::v4f16, PreTFCRes);
5274 
5275   if (!IsTexFail) {
5276     if (Result->getNumValues() > 1)
5277       return DAG.getMergeValues({PreTFCRes, SDValue(Result, 1)}, DL);
5278     else
5279       return PreTFCRes;
5280   }
5281 
5282   // Extract the TexFail result and insert into aggregate return
5283   SmallVector<SDValue, 1> TFCElt;
5284   DAG.ExtractVectorElements(N, TFCElt, DMaskPop, 1);
5285   SDValue TFCRes = DAG.getNode(ISD::BITCAST, DL, ResultTypes[1], TFCElt[0]);
5286   return DAG.getMergeValues({PreTFCRes, TFCRes, SDValue(Result, 1)}, DL);
5287 }
5288 
5289 static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE,
5290                          SDValue *LWE, bool &IsTexFail) {
5291   auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode());
5292 
5293   uint64_t Value = TexFailCtrlConst->getZExtValue();
5294   if (Value) {
5295     IsTexFail = true;
5296   }
5297 
5298   SDLoc DL(TexFailCtrlConst);
5299   *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
5300   Value &= ~(uint64_t)0x1;
5301   *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
5302   Value &= ~(uint64_t)0x2;
5303 
5304   return Value == 0;
5305 }
5306 
5307 SDValue SITargetLowering::lowerImage(SDValue Op,
5308                                      const AMDGPU::ImageDimIntrinsicInfo *Intr,
5309                                      SelectionDAG &DAG) const {
5310   SDLoc DL(Op);
5311   MachineFunction &MF = DAG.getMachineFunction();
5312   const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>();
5313   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
5314       AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
5315   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
5316   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
5317       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
5318   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
5319       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
5320   unsigned IntrOpcode = Intr->BaseOpcode;
5321   bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10;
5322 
5323   SmallVector<EVT, 3> ResultTypes(Op->value_begin(), Op->value_end());
5324   SmallVector<EVT, 3> OrigResultTypes(Op->value_begin(), Op->value_end());
5325   bool IsD16 = false;
5326   bool IsA16 = false;
5327   SDValue VData;
5328   int NumVDataDwords;
5329   bool AdjustRetType = false;
5330 
5331   unsigned AddrIdx; // Index of first address argument
5332   unsigned DMask;
5333   unsigned DMaskLanes = 0;
5334 
5335   if (BaseOpcode->Atomic) {
5336     VData = Op.getOperand(2);
5337 
5338     bool Is64Bit = VData.getValueType() == MVT::i64;
5339     if (BaseOpcode->AtomicX2) {
5340       SDValue VData2 = Op.getOperand(3);
5341       VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL,
5342                                  {VData, VData2});
5343       if (Is64Bit)
5344         VData = DAG.getBitcast(MVT::v4i32, VData);
5345 
5346       ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32;
5347       DMask = Is64Bit ? 0xf : 0x3;
5348       NumVDataDwords = Is64Bit ? 4 : 2;
5349       AddrIdx = 4;
5350     } else {
5351       DMask = Is64Bit ? 0x3 : 0x1;
5352       NumVDataDwords = Is64Bit ? 2 : 1;
5353       AddrIdx = 3;
5354     }
5355   } else {
5356     unsigned DMaskIdx = BaseOpcode->Store ? 3 : isa<MemSDNode>(Op) ? 2 : 1;
5357     auto DMaskConst = cast<ConstantSDNode>(Op.getOperand(DMaskIdx));
5358     DMask = DMaskConst->getZExtValue();
5359     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
5360 
5361     if (BaseOpcode->Store) {
5362       VData = Op.getOperand(2);
5363 
5364       MVT StoreVT = VData.getSimpleValueType();
5365       if (StoreVT.getScalarType() == MVT::f16) {
5366         if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
5367           return Op; // D16 is unsupported for this instruction
5368 
5369         IsD16 = true;
5370         VData = handleD16VData(VData, DAG);
5371       }
5372 
5373       NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
5374     } else {
5375       // Work out the num dwords based on the dmask popcount and underlying type
5376       // and whether packing is supported.
5377       MVT LoadVT = ResultTypes[0].getSimpleVT();
5378       if (LoadVT.getScalarType() == MVT::f16) {
5379         if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
5380           return Op; // D16 is unsupported for this instruction
5381 
5382         IsD16 = true;
5383       }
5384 
5385       // Confirm that the return type is large enough for the dmask specified
5386       if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) ||
5387           (!LoadVT.isVector() && DMaskLanes > 1))
5388           return Op;
5389 
5390       if (IsD16 && !Subtarget->hasUnpackedD16VMem())
5391         NumVDataDwords = (DMaskLanes + 1) / 2;
5392       else
5393         NumVDataDwords = DMaskLanes;
5394 
5395       AdjustRetType = true;
5396     }
5397 
5398     AddrIdx = DMaskIdx + 1;
5399   }
5400 
5401   unsigned NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0;
5402   unsigned NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0;
5403   unsigned NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0;
5404   unsigned NumVAddrs = BaseOpcode->NumExtraArgs + NumGradients +
5405                        NumCoords + NumLCM;
5406   unsigned NumMIVAddrs = NumVAddrs;
5407 
5408   SmallVector<SDValue, 4> VAddrs;
5409 
5410   // Optimize _L to _LZ when _L is zero
5411   if (LZMappingInfo) {
5412     if (auto ConstantLod =
5413          dyn_cast<ConstantFPSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
5414       if (ConstantLod->isZero() || ConstantLod->isNegative()) {
5415         IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
5416         NumMIVAddrs--;               // remove 'lod'
5417       }
5418     }
5419   }
5420 
5421   // Optimize _mip away, when 'lod' is zero
5422   if (MIPMappingInfo) {
5423     if (auto ConstantLod =
5424          dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
5425       if (ConstantLod->isNullValue()) {
5426         IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
5427         NumMIVAddrs--;               // remove 'lod'
5428       }
5429     }
5430   }
5431 
5432   // Check for 16 bit addresses and pack if true.
5433   unsigned DimIdx = AddrIdx + BaseOpcode->NumExtraArgs;
5434   MVT VAddrVT = Op.getOperand(DimIdx).getSimpleValueType();
5435   const MVT VAddrScalarVT = VAddrVT.getScalarType();
5436   if (((VAddrScalarVT == MVT::f16) || (VAddrScalarVT == MVT::i16)) &&
5437       ST->hasFeature(AMDGPU::FeatureR128A16)) {
5438     IsA16 = true;
5439     const MVT VectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16;
5440     for (unsigned i = AddrIdx; i < (AddrIdx + NumMIVAddrs); ++i) {
5441       SDValue AddrLo, AddrHi;
5442       // Push back extra arguments.
5443       if (i < DimIdx) {
5444         AddrLo = Op.getOperand(i);
5445       } else {
5446         AddrLo = Op.getOperand(i);
5447         // Dz/dh, dz/dv and the last odd coord are packed with undef. Also,
5448         // in 1D, derivatives dx/dh and dx/dv are packed with undef.
5449         if (((i + 1) >= (AddrIdx + NumMIVAddrs)) ||
5450             ((NumGradients / 2) % 2 == 1 &&
5451             (i == DimIdx + (NumGradients / 2) - 1 ||
5452              i == DimIdx + NumGradients - 1))) {
5453           AddrHi = DAG.getUNDEF(MVT::f16);
5454         } else {
5455           AddrHi = Op.getOperand(i + 1);
5456           i++;
5457         }
5458         AddrLo = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VectorVT,
5459                              {AddrLo, AddrHi});
5460         AddrLo = DAG.getBitcast(MVT::i32, AddrLo);
5461       }
5462       VAddrs.push_back(AddrLo);
5463     }
5464   } else {
5465     for (unsigned i = 0; i < NumMIVAddrs; ++i)
5466       VAddrs.push_back(Op.getOperand(AddrIdx + i));
5467   }
5468 
5469   // If the register allocator cannot place the address registers contiguously
5470   // without introducing moves, then using the non-sequential address encoding
5471   // is always preferable, since it saves VALU instructions and is usually a
5472   // wash in terms of code size or even better.
5473   //
5474   // However, we currently have no way of hinting to the register allocator that
5475   // MIMG addresses should be placed contiguously when it is possible to do so,
5476   // so force non-NSA for the common 2-address case as a heuristic.
5477   //
5478   // SIShrinkInstructions will convert NSA encodings to non-NSA after register
5479   // allocation when possible.
5480   bool UseNSA =
5481       ST->hasFeature(AMDGPU::FeatureNSAEncoding) && VAddrs.size() >= 3;
5482   SDValue VAddr;
5483   if (!UseNSA)
5484     VAddr = getBuildDwordsVector(DAG, DL, VAddrs);
5485 
5486   SDValue True = DAG.getTargetConstant(1, DL, MVT::i1);
5487   SDValue False = DAG.getTargetConstant(0, DL, MVT::i1);
5488   unsigned CtrlIdx; // Index of texfailctrl argument
5489   SDValue Unorm;
5490   if (!BaseOpcode->Sampler) {
5491     Unorm = True;
5492     CtrlIdx = AddrIdx + NumVAddrs + 1;
5493   } else {
5494     auto UnormConst =
5495         cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2));
5496 
5497     Unorm = UnormConst->getZExtValue() ? True : False;
5498     CtrlIdx = AddrIdx + NumVAddrs + 3;
5499   }
5500 
5501   SDValue TFE;
5502   SDValue LWE;
5503   SDValue TexFail = Op.getOperand(CtrlIdx);
5504   bool IsTexFail = false;
5505   if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail))
5506     return Op;
5507 
5508   if (IsTexFail) {
5509     if (!DMaskLanes) {
5510       // Expecting to get an error flag since TFC is on - and dmask is 0
5511       // Force dmask to be at least 1 otherwise the instruction will fail
5512       DMask = 0x1;
5513       DMaskLanes = 1;
5514       NumVDataDwords = 1;
5515     }
5516     NumVDataDwords += 1;
5517     AdjustRetType = true;
5518   }
5519 
5520   // Has something earlier tagged that the return type needs adjusting
5521   // This happens if the instruction is a load or has set TexFailCtrl flags
5522   if (AdjustRetType) {
5523     // NumVDataDwords reflects the true number of dwords required in the return type
5524     if (DMaskLanes == 0 && !BaseOpcode->Store) {
5525       // This is a no-op load. This can be eliminated
5526       SDValue Undef = DAG.getUNDEF(Op.getValueType());
5527       if (isa<MemSDNode>(Op))
5528         return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
5529       return Undef;
5530     }
5531 
5532     EVT NewVT = NumVDataDwords > 1 ?
5533                   EVT::getVectorVT(*DAG.getContext(), MVT::f32, NumVDataDwords)
5534                 : MVT::f32;
5535 
5536     ResultTypes[0] = NewVT;
5537     if (ResultTypes.size() == 3) {
5538       // Original result was aggregate type used for TexFailCtrl results
5539       // The actual instruction returns as a vector type which has now been
5540       // created. Remove the aggregate result.
5541       ResultTypes.erase(&ResultTypes[1]);
5542     }
5543   }
5544 
5545   SDValue GLC;
5546   SDValue SLC;
5547   SDValue DLC;
5548   if (BaseOpcode->Atomic) {
5549     GLC = True; // TODO no-return optimization
5550     if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC,
5551                           IsGFX10 ? &DLC : nullptr))
5552       return Op;
5553   } else {
5554     if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC,
5555                           IsGFX10 ? &DLC : nullptr))
5556       return Op;
5557   }
5558 
5559   SmallVector<SDValue, 26> Ops;
5560   if (BaseOpcode->Store || BaseOpcode->Atomic)
5561     Ops.push_back(VData); // vdata
5562   if (UseNSA) {
5563     for (const SDValue &Addr : VAddrs)
5564       Ops.push_back(Addr);
5565   } else {
5566     Ops.push_back(VAddr);
5567   }
5568   Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc
5569   if (BaseOpcode->Sampler)
5570     Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler
5571   Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32));
5572   if (IsGFX10)
5573     Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32));
5574   Ops.push_back(Unorm);
5575   if (IsGFX10)
5576     Ops.push_back(DLC);
5577   Ops.push_back(GLC);
5578   Ops.push_back(SLC);
5579   Ops.push_back(IsA16 &&  // a16 or r128
5580                 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False);
5581   Ops.push_back(TFE); // tfe
5582   Ops.push_back(LWE); // lwe
5583   if (!IsGFX10)
5584     Ops.push_back(DimInfo->DA ? True : False);
5585   if (BaseOpcode->HasD16)
5586     Ops.push_back(IsD16 ? True : False);
5587   if (isa<MemSDNode>(Op))
5588     Ops.push_back(Op.getOperand(0)); // chain
5589 
5590   int NumVAddrDwords =
5591       UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32;
5592   int Opcode = -1;
5593 
5594   if (IsGFX10) {
5595     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
5596                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
5597                                           : AMDGPU::MIMGEncGfx10Default,
5598                                    NumVDataDwords, NumVAddrDwords);
5599   } else {
5600     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5601       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
5602                                      NumVDataDwords, NumVAddrDwords);
5603     if (Opcode == -1)
5604       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
5605                                      NumVDataDwords, NumVAddrDwords);
5606   }
5607   assert(Opcode != -1);
5608 
5609   MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops);
5610   if (auto MemOp = dyn_cast<MemSDNode>(Op)) {
5611     MachineMemOperand *MemRef = MemOp->getMemOperand();
5612     DAG.setNodeMemRefs(NewNode, {MemRef});
5613   }
5614 
5615   if (BaseOpcode->AtomicX2) {
5616     SmallVector<SDValue, 1> Elt;
5617     DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
5618     return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
5619   } else if (!BaseOpcode->Store) {
5620     return constructRetValue(DAG, NewNode,
5621                              OrigResultTypes, IsTexFail,
5622                              Subtarget->hasUnpackedD16VMem(), IsD16,
5623                              DMaskLanes, NumVDataDwords, DL,
5624                              *DAG.getContext());
5625   }
5626 
5627   return SDValue(NewNode, 0);
5628 }
5629 
5630 SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
5631                                        SDValue Offset, SDValue GLC, SDValue DLC,
5632                                        SelectionDAG &DAG) const {
5633   MachineFunction &MF = DAG.getMachineFunction();
5634   MachineMemOperand *MMO = MF.getMachineMemOperand(
5635       MachinePointerInfo(),
5636       MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
5637           MachineMemOperand::MOInvariant,
5638       VT.getStoreSize(), VT.getStoreSize());
5639 
5640   if (!Offset->isDivergent()) {
5641     SDValue Ops[] = {
5642         Rsrc,
5643         Offset, // Offset
5644         GLC,
5645         DLC,
5646     };
5647     return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL,
5648                                    DAG.getVTList(VT), Ops, VT, MMO);
5649   }
5650 
5651   // We have a divergent offset. Emit a MUBUF buffer load instead. We can
5652   // assume that the buffer is unswizzled.
5653   SmallVector<SDValue, 4> Loads;
5654   unsigned NumLoads = 1;
5655   MVT LoadVT = VT.getSimpleVT();
5656   unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1;
5657   assert((LoadVT.getScalarType() == MVT::i32 ||
5658           LoadVT.getScalarType() == MVT::f32) &&
5659          isPowerOf2_32(NumElts));
5660 
5661   if (NumElts == 8 || NumElts == 16) {
5662     NumLoads = NumElts == 16 ? 4 : 2;
5663     LoadVT = MVT::v4i32;
5664   }
5665 
5666   SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue});
5667   unsigned CachePolicy = cast<ConstantSDNode>(GLC)->getZExtValue();
5668   SDValue Ops[] = {
5669       DAG.getEntryNode(),                               // Chain
5670       Rsrc,                                             // rsrc
5671       DAG.getConstant(0, DL, MVT::i32),                 // vindex
5672       {},                                               // voffset
5673       {},                                               // soffset
5674       {},                                               // offset
5675       DAG.getTargetConstant(CachePolicy, DL, MVT::i32), // cachepolicy
5676       DAG.getTargetConstant(0, DL, MVT::i1),            // idxen
5677   };
5678 
5679   // Use the alignment to ensure that the required offsets will fit into the
5680   // immediate offsets.
5681   setBufferOffsets(Offset, DAG, &Ops[3], NumLoads > 1 ? 16 * NumLoads : 4);
5682 
5683   uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
5684   for (unsigned i = 0; i < NumLoads; ++i) {
5685     Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32);
5686     Loads.push_back(DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList,
5687                                             Ops, LoadVT, MMO));
5688   }
5689 
5690   if (VT == MVT::v8i32 || VT == MVT::v16i32)
5691     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads);
5692 
5693   return Loads[0];
5694 }
5695 
5696 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
5697                                                   SelectionDAG &DAG) const {
5698   MachineFunction &MF = DAG.getMachineFunction();
5699   auto MFI = MF.getInfo<SIMachineFunctionInfo>();
5700 
5701   EVT VT = Op.getValueType();
5702   SDLoc DL(Op);
5703   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5704 
5705   // TODO: Should this propagate fast-math-flags?
5706 
5707   switch (IntrinsicID) {
5708   case Intrinsic::amdgcn_implicit_buffer_ptr: {
5709     if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction()))
5710       return emitNonHSAIntrinsicError(DAG, DL, VT);
5711     return getPreloadedValue(DAG, *MFI, VT,
5712                              AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
5713   }
5714   case Intrinsic::amdgcn_dispatch_ptr:
5715   case Intrinsic::amdgcn_queue_ptr: {
5716     if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) {
5717       DiagnosticInfoUnsupported BadIntrin(
5718           MF.getFunction(), "unsupported hsa intrinsic without hsa target",
5719           DL.getDebugLoc());
5720       DAG.getContext()->diagnose(BadIntrin);
5721       return DAG.getUNDEF(VT);
5722     }
5723 
5724     auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
5725       AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
5726     return getPreloadedValue(DAG, *MFI, VT, RegID);
5727   }
5728   case Intrinsic::amdgcn_implicitarg_ptr: {
5729     if (MFI->isEntryFunction())
5730       return getImplicitArgPtr(DAG, DL);
5731     return getPreloadedValue(DAG, *MFI, VT,
5732                              AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
5733   }
5734   case Intrinsic::amdgcn_kernarg_segment_ptr: {
5735     return getPreloadedValue(DAG, *MFI, VT,
5736                              AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
5737   }
5738   case Intrinsic::amdgcn_dispatch_id: {
5739     return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
5740   }
5741   case Intrinsic::amdgcn_rcp:
5742     return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
5743   case Intrinsic::amdgcn_rsq:
5744     return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5745   case Intrinsic::amdgcn_rsq_legacy:
5746     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5747       return emitRemovedIntrinsicError(DAG, DL, VT);
5748 
5749     return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
5750   case Intrinsic::amdgcn_rcp_legacy:
5751     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5752       return emitRemovedIntrinsicError(DAG, DL, VT);
5753     return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
5754   case Intrinsic::amdgcn_rsq_clamp: {
5755     if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
5756       return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
5757 
5758     Type *Type = VT.getTypeForEVT(*DAG.getContext());
5759     APFloat Max = APFloat::getLargest(Type->getFltSemantics());
5760     APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
5761 
5762     SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5763     SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
5764                               DAG.getConstantFP(Max, DL, VT));
5765     return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
5766                        DAG.getConstantFP(Min, DL, VT));
5767   }
5768   case Intrinsic::r600_read_ngroups_x:
5769     if (Subtarget->isAmdHsaOS())
5770       return emitNonHSAIntrinsicError(DAG, DL, VT);
5771 
5772     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5773                                     SI::KernelInputOffsets::NGROUPS_X, 4, false);
5774   case Intrinsic::r600_read_ngroups_y:
5775     if (Subtarget->isAmdHsaOS())
5776       return emitNonHSAIntrinsicError(DAG, DL, VT);
5777 
5778     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5779                                     SI::KernelInputOffsets::NGROUPS_Y, 4, false);
5780   case Intrinsic::r600_read_ngroups_z:
5781     if (Subtarget->isAmdHsaOS())
5782       return emitNonHSAIntrinsicError(DAG, DL, VT);
5783 
5784     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5785                                     SI::KernelInputOffsets::NGROUPS_Z, 4, false);
5786   case Intrinsic::r600_read_global_size_x:
5787     if (Subtarget->isAmdHsaOS())
5788       return emitNonHSAIntrinsicError(DAG, DL, VT);
5789 
5790     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5791                                     SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false);
5792   case Intrinsic::r600_read_global_size_y:
5793     if (Subtarget->isAmdHsaOS())
5794       return emitNonHSAIntrinsicError(DAG, DL, VT);
5795 
5796     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5797                                     SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false);
5798   case Intrinsic::r600_read_global_size_z:
5799     if (Subtarget->isAmdHsaOS())
5800       return emitNonHSAIntrinsicError(DAG, DL, VT);
5801 
5802     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5803                                     SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false);
5804   case Intrinsic::r600_read_local_size_x:
5805     if (Subtarget->isAmdHsaOS())
5806       return emitNonHSAIntrinsicError(DAG, DL, VT);
5807 
5808     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5809                                   SI::KernelInputOffsets::LOCAL_SIZE_X);
5810   case Intrinsic::r600_read_local_size_y:
5811     if (Subtarget->isAmdHsaOS())
5812       return emitNonHSAIntrinsicError(DAG, DL, VT);
5813 
5814     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5815                                   SI::KernelInputOffsets::LOCAL_SIZE_Y);
5816   case Intrinsic::r600_read_local_size_z:
5817     if (Subtarget->isAmdHsaOS())
5818       return emitNonHSAIntrinsicError(DAG, DL, VT);
5819 
5820     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5821                                   SI::KernelInputOffsets::LOCAL_SIZE_Z);
5822   case Intrinsic::amdgcn_workgroup_id_x:
5823   case Intrinsic::r600_read_tgid_x:
5824     return getPreloadedValue(DAG, *MFI, VT,
5825                              AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
5826   case Intrinsic::amdgcn_workgroup_id_y:
5827   case Intrinsic::r600_read_tgid_y:
5828     return getPreloadedValue(DAG, *MFI, VT,
5829                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
5830   case Intrinsic::amdgcn_workgroup_id_z:
5831   case Intrinsic::r600_read_tgid_z:
5832     return getPreloadedValue(DAG, *MFI, VT,
5833                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
5834   case Intrinsic::amdgcn_workitem_id_x:
5835   case Intrinsic::r600_read_tidig_x:
5836     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5837                           SDLoc(DAG.getEntryNode()),
5838                           MFI->getArgInfo().WorkItemIDX);
5839   case Intrinsic::amdgcn_workitem_id_y:
5840   case Intrinsic::r600_read_tidig_y:
5841     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5842                           SDLoc(DAG.getEntryNode()),
5843                           MFI->getArgInfo().WorkItemIDY);
5844   case Intrinsic::amdgcn_workitem_id_z:
5845   case Intrinsic::r600_read_tidig_z:
5846     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5847                           SDLoc(DAG.getEntryNode()),
5848                           MFI->getArgInfo().WorkItemIDZ);
5849   case Intrinsic::amdgcn_wavefrontsize:
5850     return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(),
5851                            SDLoc(Op), MVT::i32);
5852   case Intrinsic::amdgcn_s_buffer_load: {
5853     bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10;
5854     SDValue GLC;
5855     SDValue DLC = DAG.getTargetConstant(0, DL, MVT::i1);
5856     if (!parseCachePolicy(Op.getOperand(3), DAG, &GLC, nullptr,
5857                           IsGFX10 ? &DLC : nullptr))
5858       return Op;
5859     return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), GLC, DLC,
5860                         DAG);
5861   }
5862   case Intrinsic::amdgcn_fdiv_fast:
5863     return lowerFDIV_FAST(Op, DAG);
5864   case Intrinsic::amdgcn_interp_mov: {
5865     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5866     SDValue Glue = M0.getValue(1);
5867     return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
5868                        Op.getOperand(2), Op.getOperand(3), Glue);
5869   }
5870   case Intrinsic::amdgcn_interp_p1: {
5871     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5872     SDValue Glue = M0.getValue(1);
5873     return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
5874                        Op.getOperand(2), Op.getOperand(3), Glue);
5875   }
5876   case Intrinsic::amdgcn_interp_p2: {
5877     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5878     SDValue Glue = SDValue(M0.getNode(), 1);
5879     return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
5880                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
5881                        Glue);
5882   }
5883   case Intrinsic::amdgcn_interp_p1_f16: {
5884     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5885     SDValue Glue = M0.getValue(1);
5886     if (getSubtarget()->getLDSBankCount() == 16) {
5887       // 16 bank LDS
5888       SDValue S = DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32,
5889                               DAG.getConstant(2, DL, MVT::i32), // P0
5890                               Op.getOperand(2), // Attrchan
5891                               Op.getOperand(3), // Attr
5892                               Glue);
5893       SDValue Ops[] = {
5894         Op.getOperand(1), // Src0
5895         Op.getOperand(2), // Attrchan
5896         Op.getOperand(3), // Attr
5897         DAG.getTargetConstant(0, DL, MVT::i32), // $src0_modifiers
5898         S, // Src2 - holds two f16 values selected by high
5899         DAG.getTargetConstant(0, DL, MVT::i32), // $src2_modifiers
5900         Op.getOperand(4), // high
5901         DAG.getTargetConstant(0, DL, MVT::i1), // $clamp
5902         DAG.getTargetConstant(0, DL, MVT::i32) // $omod
5903       };
5904       return DAG.getNode(AMDGPUISD::INTERP_P1LV_F16, DL, MVT::f32, Ops);
5905     } else {
5906       // 32 bank LDS
5907       SDValue Ops[] = {
5908         Op.getOperand(1), // Src0
5909         Op.getOperand(2), // Attrchan
5910         Op.getOperand(3), // Attr
5911         DAG.getTargetConstant(0, DL, MVT::i32), // $src0_modifiers
5912         Op.getOperand(4), // high
5913         DAG.getTargetConstant(0, DL, MVT::i1), // $clamp
5914         DAG.getTargetConstant(0, DL, MVT::i32), // $omod
5915         Glue
5916       };
5917       return DAG.getNode(AMDGPUISD::INTERP_P1LL_F16, DL, MVT::f32, Ops);
5918     }
5919   }
5920   case Intrinsic::amdgcn_interp_p2_f16: {
5921     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(6));
5922     SDValue Glue = SDValue(M0.getNode(), 1);
5923     SDValue Ops[] = {
5924       Op.getOperand(2), // Src0
5925       Op.getOperand(3), // Attrchan
5926       Op.getOperand(4), // Attr
5927       DAG.getTargetConstant(0, DL, MVT::i32), // $src0_modifiers
5928       Op.getOperand(1), // Src2
5929       DAG.getTargetConstant(0, DL, MVT::i32), // $src2_modifiers
5930       Op.getOperand(5), // high
5931       DAG.getTargetConstant(0, DL, MVT::i1), // $clamp
5932       Glue
5933     };
5934     return DAG.getNode(AMDGPUISD::INTERP_P2_F16, DL, MVT::f16, Ops);
5935   }
5936   case Intrinsic::amdgcn_sin:
5937     return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
5938 
5939   case Intrinsic::amdgcn_cos:
5940     return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
5941 
5942   case Intrinsic::amdgcn_mul_u24:
5943     return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2));
5944   case Intrinsic::amdgcn_mul_i24:
5945     return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2));
5946 
5947   case Intrinsic::amdgcn_log_clamp: {
5948     if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
5949       return SDValue();
5950 
5951     DiagnosticInfoUnsupported BadIntrin(
5952       MF.getFunction(), "intrinsic not supported on subtarget",
5953       DL.getDebugLoc());
5954       DAG.getContext()->diagnose(BadIntrin);
5955       return DAG.getUNDEF(VT);
5956   }
5957   case Intrinsic::amdgcn_ldexp:
5958     return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
5959                        Op.getOperand(1), Op.getOperand(2));
5960 
5961   case Intrinsic::amdgcn_fract:
5962     return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
5963 
5964   case Intrinsic::amdgcn_class:
5965     return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
5966                        Op.getOperand(1), Op.getOperand(2));
5967   case Intrinsic::amdgcn_div_fmas:
5968     return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
5969                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5970                        Op.getOperand(4));
5971 
5972   case Intrinsic::amdgcn_div_fixup:
5973     return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
5974                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5975 
5976   case Intrinsic::amdgcn_trig_preop:
5977     return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
5978                        Op.getOperand(1), Op.getOperand(2));
5979   case Intrinsic::amdgcn_div_scale: {
5980     const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3));
5981 
5982     // Translate to the operands expected by the machine instruction. The
5983     // first parameter must be the same as the first instruction.
5984     SDValue Numerator = Op.getOperand(1);
5985     SDValue Denominator = Op.getOperand(2);
5986 
5987     // Note this order is opposite of the machine instruction's operations,
5988     // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
5989     // intrinsic has the numerator as the first operand to match a normal
5990     // division operation.
5991 
5992     SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
5993 
5994     return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
5995                        Denominator, Numerator);
5996   }
5997   case Intrinsic::amdgcn_icmp: {
5998     // There is a Pat that handles this variant, so return it as-is.
5999     if (Op.getOperand(1).getValueType() == MVT::i1 &&
6000         Op.getConstantOperandVal(2) == 0 &&
6001         Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE)
6002       return Op;
6003     return lowerICMPIntrinsic(*this, Op.getNode(), DAG);
6004   }
6005   case Intrinsic::amdgcn_fcmp: {
6006     return lowerFCMPIntrinsic(*this, Op.getNode(), DAG);
6007   }
6008   case Intrinsic::amdgcn_fmed3:
6009     return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
6010                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
6011   case Intrinsic::amdgcn_fdot2:
6012     return DAG.getNode(AMDGPUISD::FDOT2, DL, VT,
6013                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
6014                        Op.getOperand(4));
6015   case Intrinsic::amdgcn_fmul_legacy:
6016     return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
6017                        Op.getOperand(1), Op.getOperand(2));
6018   case Intrinsic::amdgcn_sffbh:
6019     return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
6020   case Intrinsic::amdgcn_sbfe:
6021     return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
6022                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
6023   case Intrinsic::amdgcn_ubfe:
6024     return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
6025                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
6026   case Intrinsic::amdgcn_cvt_pkrtz:
6027   case Intrinsic::amdgcn_cvt_pknorm_i16:
6028   case Intrinsic::amdgcn_cvt_pknorm_u16:
6029   case Intrinsic::amdgcn_cvt_pk_i16:
6030   case Intrinsic::amdgcn_cvt_pk_u16: {
6031     // FIXME: Stop adding cast if v2f16/v2i16 are legal.
6032     EVT VT = Op.getValueType();
6033     unsigned Opcode;
6034 
6035     if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
6036       Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
6037     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
6038       Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
6039     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
6040       Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
6041     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
6042       Opcode = AMDGPUISD::CVT_PK_I16_I32;
6043     else
6044       Opcode = AMDGPUISD::CVT_PK_U16_U32;
6045 
6046     if (isTypeLegal(VT))
6047       return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2));
6048 
6049     SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
6050                                Op.getOperand(1), Op.getOperand(2));
6051     return DAG.getNode(ISD::BITCAST, DL, VT, Node);
6052   }
6053   case Intrinsic::amdgcn_fmad_ftz:
6054     return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1),
6055                        Op.getOperand(2), Op.getOperand(3));
6056 
6057   case Intrinsic::amdgcn_if_break:
6058     return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT,
6059                                       Op->getOperand(1), Op->getOperand(2)), 0);
6060 
6061   case Intrinsic::amdgcn_groupstaticsize: {
6062     Triple::OSType OS = getTargetMachine().getTargetTriple().getOS();
6063     if (OS == Triple::AMDHSA || OS == Triple::AMDPAL)
6064       return Op;
6065 
6066     const Module *M = MF.getFunction().getParent();
6067     const GlobalValue *GV =
6068         M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize));
6069     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0,
6070                                             SIInstrInfo::MO_ABS32_LO);
6071     return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0};
6072   }
6073   case Intrinsic::amdgcn_is_shared:
6074   case Intrinsic::amdgcn_is_private: {
6075     SDLoc SL(Op);
6076     unsigned AS = (IntrinsicID == Intrinsic::amdgcn_is_shared) ?
6077       AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
6078     SDValue Aperture = getSegmentAperture(AS, SL, DAG);
6079     SDValue SrcVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32,
6080                                  Op.getOperand(1));
6081 
6082     SDValue SrcHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, SrcVec,
6083                                 DAG.getConstant(1, SL, MVT::i32));
6084     return DAG.getSetCC(SL, MVT::i1, SrcHi, Aperture, ISD::SETEQ);
6085   }
6086   default:
6087     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6088             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
6089       return lowerImage(Op, ImageDimIntr, DAG);
6090 
6091     return Op;
6092   }
6093 }
6094 
6095 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
6096                                                  SelectionDAG &DAG) const {
6097   unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6098   SDLoc DL(Op);
6099 
6100   switch (IntrID) {
6101   case Intrinsic::amdgcn_ds_ordered_add:
6102   case Intrinsic::amdgcn_ds_ordered_swap: {
6103     MemSDNode *M = cast<MemSDNode>(Op);
6104     SDValue Chain = M->getOperand(0);
6105     SDValue M0 = M->getOperand(2);
6106     SDValue Value = M->getOperand(3);
6107     unsigned IndexOperand = M->getConstantOperandVal(7);
6108     unsigned WaveRelease = M->getConstantOperandVal(8);
6109     unsigned WaveDone = M->getConstantOperandVal(9);
6110     unsigned ShaderType;
6111     unsigned Instruction;
6112 
6113     unsigned OrderedCountIndex = IndexOperand & 0x3f;
6114     IndexOperand &= ~0x3f;
6115     unsigned CountDw = 0;
6116 
6117     if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) {
6118       CountDw = (IndexOperand >> 24) & 0xf;
6119       IndexOperand &= ~(0xf << 24);
6120 
6121       if (CountDw < 1 || CountDw > 4) {
6122         report_fatal_error(
6123             "ds_ordered_count: dword count must be between 1 and 4");
6124       }
6125     }
6126 
6127     if (IndexOperand)
6128       report_fatal_error("ds_ordered_count: bad index operand");
6129 
6130     switch (IntrID) {
6131     case Intrinsic::amdgcn_ds_ordered_add:
6132       Instruction = 0;
6133       break;
6134     case Intrinsic::amdgcn_ds_ordered_swap:
6135       Instruction = 1;
6136       break;
6137     }
6138 
6139     if (WaveDone && !WaveRelease)
6140       report_fatal_error("ds_ordered_count: wave_done requires wave_release");
6141 
6142     switch (DAG.getMachineFunction().getFunction().getCallingConv()) {
6143     case CallingConv::AMDGPU_CS:
6144     case CallingConv::AMDGPU_KERNEL:
6145       ShaderType = 0;
6146       break;
6147     case CallingConv::AMDGPU_PS:
6148       ShaderType = 1;
6149       break;
6150     case CallingConv::AMDGPU_VS:
6151       ShaderType = 2;
6152       break;
6153     case CallingConv::AMDGPU_GS:
6154       ShaderType = 3;
6155       break;
6156     default:
6157       report_fatal_error("ds_ordered_count unsupported for this calling conv");
6158     }
6159 
6160     unsigned Offset0 = OrderedCountIndex << 2;
6161     unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
6162                        (Instruction << 4);
6163 
6164     if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
6165       Offset1 |= (CountDw - 1) << 6;
6166 
6167     unsigned Offset = Offset0 | (Offset1 << 8);
6168 
6169     SDValue Ops[] = {
6170       Chain,
6171       Value,
6172       DAG.getTargetConstant(Offset, DL, MVT::i16),
6173       copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue
6174     };
6175     return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL,
6176                                    M->getVTList(), Ops, M->getMemoryVT(),
6177                                    M->getMemOperand());
6178   }
6179   case Intrinsic::amdgcn_ds_fadd: {
6180     MemSDNode *M = cast<MemSDNode>(Op);
6181     unsigned Opc;
6182     switch (IntrID) {
6183     case Intrinsic::amdgcn_ds_fadd:
6184       Opc = ISD::ATOMIC_LOAD_FADD;
6185       break;
6186     }
6187 
6188     return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(),
6189                          M->getOperand(0), M->getOperand(2), M->getOperand(3),
6190                          M->getMemOperand());
6191   }
6192   case Intrinsic::amdgcn_atomic_inc:
6193   case Intrinsic::amdgcn_atomic_dec:
6194   case Intrinsic::amdgcn_ds_fmin:
6195   case Intrinsic::amdgcn_ds_fmax: {
6196     MemSDNode *M = cast<MemSDNode>(Op);
6197     unsigned Opc;
6198     switch (IntrID) {
6199     case Intrinsic::amdgcn_atomic_inc:
6200       Opc = AMDGPUISD::ATOMIC_INC;
6201       break;
6202     case Intrinsic::amdgcn_atomic_dec:
6203       Opc = AMDGPUISD::ATOMIC_DEC;
6204       break;
6205     case Intrinsic::amdgcn_ds_fmin:
6206       Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
6207       break;
6208     case Intrinsic::amdgcn_ds_fmax:
6209       Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
6210       break;
6211     default:
6212       llvm_unreachable("Unknown intrinsic!");
6213     }
6214     SDValue Ops[] = {
6215       M->getOperand(0), // Chain
6216       M->getOperand(2), // Ptr
6217       M->getOperand(3)  // Value
6218     };
6219 
6220     return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
6221                                    M->getMemoryVT(), M->getMemOperand());
6222   }
6223   case Intrinsic::amdgcn_buffer_load:
6224   case Intrinsic::amdgcn_buffer_load_format: {
6225     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
6226     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6227     unsigned IdxEn = 1;
6228     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
6229       IdxEn = Idx->getZExtValue() != 0;
6230     SDValue Ops[] = {
6231       Op.getOperand(0), // Chain
6232       Op.getOperand(2), // rsrc
6233       Op.getOperand(3), // vindex
6234       SDValue(),        // voffset -- will be set by setBufferOffsets
6235       SDValue(),        // soffset -- will be set by setBufferOffsets
6236       SDValue(),        // offset -- will be set by setBufferOffsets
6237       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6238       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
6239     };
6240 
6241     setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]);
6242     unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
6243         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
6244 
6245     EVT VT = Op.getValueType();
6246     EVT IntVT = VT.changeTypeToInteger();
6247     auto *M = cast<MemSDNode>(Op);
6248     EVT LoadVT = Op.getValueType();
6249 
6250     if (LoadVT.getScalarType() == MVT::f16)
6251       return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
6252                                  M, DAG, Ops);
6253 
6254     // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
6255     if (LoadVT.getScalarType() == MVT::i8 ||
6256         LoadVT.getScalarType() == MVT::i16)
6257       return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
6258 
6259     return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
6260                                M->getMemOperand(), DAG);
6261   }
6262   case Intrinsic::amdgcn_raw_buffer_load:
6263   case Intrinsic::amdgcn_raw_buffer_load_format: {
6264     const bool IsFormat = IntrID == Intrinsic::amdgcn_raw_buffer_load_format;
6265 
6266     auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
6267     SDValue Ops[] = {
6268       Op.getOperand(0), // Chain
6269       Op.getOperand(2), // rsrc
6270       DAG.getConstant(0, DL, MVT::i32), // vindex
6271       Offsets.first,    // voffset
6272       Op.getOperand(4), // soffset
6273       Offsets.second,   // offset
6274       Op.getOperand(5), // cachepolicy
6275       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6276     };
6277 
6278     return lowerIntrinsicLoad(cast<MemSDNode>(Op), IsFormat, DAG, Ops);
6279   }
6280   case Intrinsic::amdgcn_struct_buffer_load:
6281   case Intrinsic::amdgcn_struct_buffer_load_format: {
6282     const bool IsFormat = IntrID == Intrinsic::amdgcn_struct_buffer_load_format;
6283 
6284     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6285     SDValue Ops[] = {
6286       Op.getOperand(0), // Chain
6287       Op.getOperand(2), // rsrc
6288       Op.getOperand(3), // vindex
6289       Offsets.first,    // voffset
6290       Op.getOperand(5), // soffset
6291       Offsets.second,   // offset
6292       Op.getOperand(6), // cachepolicy
6293       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6294     };
6295 
6296     return lowerIntrinsicLoad(cast<MemSDNode>(Op), IsFormat, DAG, Ops);
6297   }
6298   case Intrinsic::amdgcn_tbuffer_load: {
6299     MemSDNode *M = cast<MemSDNode>(Op);
6300     EVT LoadVT = Op.getValueType();
6301 
6302     unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6303     unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6304     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6305     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6306     unsigned IdxEn = 1;
6307     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
6308       IdxEn = Idx->getZExtValue() != 0;
6309     SDValue Ops[] = {
6310       Op.getOperand(0),  // Chain
6311       Op.getOperand(2),  // rsrc
6312       Op.getOperand(3),  // vindex
6313       Op.getOperand(4),  // voffset
6314       Op.getOperand(5),  // soffset
6315       Op.getOperand(6),  // offset
6316       DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6317       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6318       DAG.getTargetConstant(IdxEn, DL, MVT::i1) // idxen
6319     };
6320 
6321     if (LoadVT.getScalarType() == MVT::f16)
6322       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6323                                  M, DAG, Ops);
6324     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6325                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6326                                DAG);
6327   }
6328   case Intrinsic::amdgcn_raw_tbuffer_load: {
6329     MemSDNode *M = cast<MemSDNode>(Op);
6330     EVT LoadVT = Op.getValueType();
6331     auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
6332 
6333     SDValue Ops[] = {
6334       Op.getOperand(0),  // Chain
6335       Op.getOperand(2),  // rsrc
6336       DAG.getConstant(0, DL, MVT::i32), // vindex
6337       Offsets.first,     // voffset
6338       Op.getOperand(4),  // soffset
6339       Offsets.second,    // offset
6340       Op.getOperand(5),  // format
6341       Op.getOperand(6),  // cachepolicy
6342       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6343     };
6344 
6345     if (LoadVT.getScalarType() == MVT::f16)
6346       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6347                                  M, DAG, Ops);
6348     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6349                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6350                                DAG);
6351   }
6352   case Intrinsic::amdgcn_struct_tbuffer_load: {
6353     MemSDNode *M = cast<MemSDNode>(Op);
6354     EVT LoadVT = Op.getValueType();
6355     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6356 
6357     SDValue Ops[] = {
6358       Op.getOperand(0),  // Chain
6359       Op.getOperand(2),  // rsrc
6360       Op.getOperand(3),  // vindex
6361       Offsets.first,     // voffset
6362       Op.getOperand(5),  // soffset
6363       Offsets.second,    // offset
6364       Op.getOperand(6),  // format
6365       Op.getOperand(7),  // cachepolicy
6366       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6367     };
6368 
6369     if (LoadVT.getScalarType() == MVT::f16)
6370       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6371                                  M, DAG, Ops);
6372     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6373                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6374                                DAG);
6375   }
6376   case Intrinsic::amdgcn_buffer_atomic_swap:
6377   case Intrinsic::amdgcn_buffer_atomic_add:
6378   case Intrinsic::amdgcn_buffer_atomic_sub:
6379   case Intrinsic::amdgcn_buffer_atomic_smin:
6380   case Intrinsic::amdgcn_buffer_atomic_umin:
6381   case Intrinsic::amdgcn_buffer_atomic_smax:
6382   case Intrinsic::amdgcn_buffer_atomic_umax:
6383   case Intrinsic::amdgcn_buffer_atomic_and:
6384   case Intrinsic::amdgcn_buffer_atomic_or:
6385   case Intrinsic::amdgcn_buffer_atomic_xor: {
6386     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6387     unsigned IdxEn = 1;
6388     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6389       IdxEn = Idx->getZExtValue() != 0;
6390     SDValue Ops[] = {
6391       Op.getOperand(0), // Chain
6392       Op.getOperand(2), // vdata
6393       Op.getOperand(3), // rsrc
6394       Op.getOperand(4), // vindex
6395       SDValue(),        // voffset -- will be set by setBufferOffsets
6396       SDValue(),        // soffset -- will be set by setBufferOffsets
6397       SDValue(),        // offset -- will be set by setBufferOffsets
6398       DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6399       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
6400     };
6401     setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6402     EVT VT = Op.getValueType();
6403 
6404     auto *M = cast<MemSDNode>(Op);
6405     unsigned Opcode = 0;
6406 
6407     switch (IntrID) {
6408     case Intrinsic::amdgcn_buffer_atomic_swap:
6409       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6410       break;
6411     case Intrinsic::amdgcn_buffer_atomic_add:
6412       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6413       break;
6414     case Intrinsic::amdgcn_buffer_atomic_sub:
6415       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6416       break;
6417     case Intrinsic::amdgcn_buffer_atomic_smin:
6418       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6419       break;
6420     case Intrinsic::amdgcn_buffer_atomic_umin:
6421       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6422       break;
6423     case Intrinsic::amdgcn_buffer_atomic_smax:
6424       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6425       break;
6426     case Intrinsic::amdgcn_buffer_atomic_umax:
6427       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6428       break;
6429     case Intrinsic::amdgcn_buffer_atomic_and:
6430       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6431       break;
6432     case Intrinsic::amdgcn_buffer_atomic_or:
6433       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6434       break;
6435     case Intrinsic::amdgcn_buffer_atomic_xor:
6436       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6437       break;
6438     default:
6439       llvm_unreachable("unhandled atomic opcode");
6440     }
6441 
6442     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6443                                    M->getMemOperand());
6444   }
6445   case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6446   case Intrinsic::amdgcn_raw_buffer_atomic_add:
6447   case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6448   case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6449   case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6450   case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6451   case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6452   case Intrinsic::amdgcn_raw_buffer_atomic_and:
6453   case Intrinsic::amdgcn_raw_buffer_atomic_or:
6454   case Intrinsic::amdgcn_raw_buffer_atomic_xor:
6455   case Intrinsic::amdgcn_raw_buffer_atomic_inc:
6456   case Intrinsic::amdgcn_raw_buffer_atomic_dec: {
6457     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6458     SDValue Ops[] = {
6459       Op.getOperand(0), // Chain
6460       Op.getOperand(2), // vdata
6461       Op.getOperand(3), // rsrc
6462       DAG.getConstant(0, DL, MVT::i32), // vindex
6463       Offsets.first,    // voffset
6464       Op.getOperand(5), // soffset
6465       Offsets.second,   // offset
6466       Op.getOperand(6), // cachepolicy
6467       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6468     };
6469     EVT VT = Op.getValueType();
6470 
6471     auto *M = cast<MemSDNode>(Op);
6472     unsigned Opcode = 0;
6473 
6474     switch (IntrID) {
6475     case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6476       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6477       break;
6478     case Intrinsic::amdgcn_raw_buffer_atomic_add:
6479       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6480       break;
6481     case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6482       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6483       break;
6484     case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6485       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6486       break;
6487     case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6488       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6489       break;
6490     case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6491       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6492       break;
6493     case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6494       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6495       break;
6496     case Intrinsic::amdgcn_raw_buffer_atomic_and:
6497       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6498       break;
6499     case Intrinsic::amdgcn_raw_buffer_atomic_or:
6500       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6501       break;
6502     case Intrinsic::amdgcn_raw_buffer_atomic_xor:
6503       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6504       break;
6505     case Intrinsic::amdgcn_raw_buffer_atomic_inc:
6506       Opcode = AMDGPUISD::BUFFER_ATOMIC_INC;
6507       break;
6508     case Intrinsic::amdgcn_raw_buffer_atomic_dec:
6509       Opcode = AMDGPUISD::BUFFER_ATOMIC_DEC;
6510       break;
6511     default:
6512       llvm_unreachable("unhandled atomic opcode");
6513     }
6514 
6515     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6516                                    M->getMemOperand());
6517   }
6518   case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6519   case Intrinsic::amdgcn_struct_buffer_atomic_add:
6520   case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6521   case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6522   case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6523   case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6524   case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6525   case Intrinsic::amdgcn_struct_buffer_atomic_and:
6526   case Intrinsic::amdgcn_struct_buffer_atomic_or:
6527   case Intrinsic::amdgcn_struct_buffer_atomic_xor:
6528   case Intrinsic::amdgcn_struct_buffer_atomic_inc:
6529   case Intrinsic::amdgcn_struct_buffer_atomic_dec: {
6530     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6531     SDValue Ops[] = {
6532       Op.getOperand(0), // Chain
6533       Op.getOperand(2), // vdata
6534       Op.getOperand(3), // rsrc
6535       Op.getOperand(4), // vindex
6536       Offsets.first,    // voffset
6537       Op.getOperand(6), // soffset
6538       Offsets.second,   // offset
6539       Op.getOperand(7), // cachepolicy
6540       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6541     };
6542     EVT VT = Op.getValueType();
6543 
6544     auto *M = cast<MemSDNode>(Op);
6545     unsigned Opcode = 0;
6546 
6547     switch (IntrID) {
6548     case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6549       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6550       break;
6551     case Intrinsic::amdgcn_struct_buffer_atomic_add:
6552       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6553       break;
6554     case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6555       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6556       break;
6557     case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6558       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6559       break;
6560     case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6561       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6562       break;
6563     case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6564       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6565       break;
6566     case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6567       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6568       break;
6569     case Intrinsic::amdgcn_struct_buffer_atomic_and:
6570       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6571       break;
6572     case Intrinsic::amdgcn_struct_buffer_atomic_or:
6573       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6574       break;
6575     case Intrinsic::amdgcn_struct_buffer_atomic_xor:
6576       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6577       break;
6578     case Intrinsic::amdgcn_struct_buffer_atomic_inc:
6579       Opcode = AMDGPUISD::BUFFER_ATOMIC_INC;
6580       break;
6581     case Intrinsic::amdgcn_struct_buffer_atomic_dec:
6582       Opcode = AMDGPUISD::BUFFER_ATOMIC_DEC;
6583       break;
6584     default:
6585       llvm_unreachable("unhandled atomic opcode");
6586     }
6587 
6588     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6589                                    M->getMemOperand());
6590   }
6591   case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
6592     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6593     unsigned IdxEn = 1;
6594     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5)))
6595       IdxEn = Idx->getZExtValue() != 0;
6596     SDValue Ops[] = {
6597       Op.getOperand(0), // Chain
6598       Op.getOperand(2), // src
6599       Op.getOperand(3), // cmp
6600       Op.getOperand(4), // rsrc
6601       Op.getOperand(5), // vindex
6602       SDValue(),        // voffset -- will be set by setBufferOffsets
6603       SDValue(),        // soffset -- will be set by setBufferOffsets
6604       SDValue(),        // offset -- will be set by setBufferOffsets
6605       DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6606       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
6607     };
6608     setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]);
6609     EVT VT = Op.getValueType();
6610     auto *M = cast<MemSDNode>(Op);
6611 
6612     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6613                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6614   }
6615   case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: {
6616     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6617     SDValue Ops[] = {
6618       Op.getOperand(0), // Chain
6619       Op.getOperand(2), // src
6620       Op.getOperand(3), // cmp
6621       Op.getOperand(4), // rsrc
6622       DAG.getConstant(0, DL, MVT::i32), // vindex
6623       Offsets.first,    // voffset
6624       Op.getOperand(6), // soffset
6625       Offsets.second,   // offset
6626       Op.getOperand(7), // cachepolicy
6627       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6628     };
6629     EVT VT = Op.getValueType();
6630     auto *M = cast<MemSDNode>(Op);
6631 
6632     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6633                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6634   }
6635   case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: {
6636     auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG);
6637     SDValue Ops[] = {
6638       Op.getOperand(0), // Chain
6639       Op.getOperand(2), // src
6640       Op.getOperand(3), // cmp
6641       Op.getOperand(4), // rsrc
6642       Op.getOperand(5), // vindex
6643       Offsets.first,    // voffset
6644       Op.getOperand(7), // soffset
6645       Offsets.second,   // offset
6646       Op.getOperand(8), // cachepolicy
6647       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6648     };
6649     EVT VT = Op.getValueType();
6650     auto *M = cast<MemSDNode>(Op);
6651 
6652     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6653                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6654   }
6655 
6656   default:
6657     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6658             AMDGPU::getImageDimIntrinsicInfo(IntrID))
6659       return lowerImage(Op, ImageDimIntr, DAG);
6660 
6661     return SDValue();
6662   }
6663 }
6664 
6665 // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to
6666 // dwordx4 if on SI.
6667 SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL,
6668                                               SDVTList VTList,
6669                                               ArrayRef<SDValue> Ops, EVT MemVT,
6670                                               MachineMemOperand *MMO,
6671                                               SelectionDAG &DAG) const {
6672   EVT VT = VTList.VTs[0];
6673   EVT WidenedVT = VT;
6674   EVT WidenedMemVT = MemVT;
6675   if (!Subtarget->hasDwordx3LoadStores() &&
6676       (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) {
6677     WidenedVT = EVT::getVectorVT(*DAG.getContext(),
6678                                  WidenedVT.getVectorElementType(), 4);
6679     WidenedMemVT = EVT::getVectorVT(*DAG.getContext(),
6680                                     WidenedMemVT.getVectorElementType(), 4);
6681     MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16);
6682   }
6683 
6684   assert(VTList.NumVTs == 2);
6685   SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]);
6686 
6687   auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops,
6688                                        WidenedMemVT, MMO);
6689   if (WidenedVT != VT) {
6690     auto Extract = DAG.getNode(
6691         ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp,
6692         DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
6693     NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL);
6694   }
6695   return NewOp;
6696 }
6697 
6698 SDValue SITargetLowering::handleD16VData(SDValue VData,
6699                                          SelectionDAG &DAG) const {
6700   EVT StoreVT = VData.getValueType();
6701 
6702   // No change for f16 and legal vector D16 types.
6703   if (!StoreVT.isVector())
6704     return VData;
6705 
6706   SDLoc DL(VData);
6707   assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
6708 
6709   if (Subtarget->hasUnpackedD16VMem()) {
6710     // We need to unpack the packed data to store.
6711     EVT IntStoreVT = StoreVT.changeTypeToInteger();
6712     SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
6713 
6714     EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
6715                                         StoreVT.getVectorNumElements());
6716     SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
6717     return DAG.UnrollVectorOp(ZExt.getNode());
6718   }
6719 
6720   assert(isTypeLegal(StoreVT));
6721   return VData;
6722 }
6723 
6724 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
6725                                               SelectionDAG &DAG) const {
6726   SDLoc DL(Op);
6727   SDValue Chain = Op.getOperand(0);
6728   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6729   MachineFunction &MF = DAG.getMachineFunction();
6730 
6731   switch (IntrinsicID) {
6732   case Intrinsic::amdgcn_exp: {
6733     const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6734     const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6735     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
6736     const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
6737 
6738     const SDValue Ops[] = {
6739       Chain,
6740       DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6741       DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8),  // en
6742       Op.getOperand(4), // src0
6743       Op.getOperand(5), // src1
6744       Op.getOperand(6), // src2
6745       Op.getOperand(7), // src3
6746       DAG.getTargetConstant(0, DL, MVT::i1), // compr
6747       DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6748     };
6749 
6750     unsigned Opc = Done->isNullValue() ?
6751       AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6752     return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6753   }
6754   case Intrinsic::amdgcn_exp_compr: {
6755     const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6756     const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6757     SDValue Src0 = Op.getOperand(4);
6758     SDValue Src1 = Op.getOperand(5);
6759     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
6760     const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
6761 
6762     SDValue Undef = DAG.getUNDEF(MVT::f32);
6763     const SDValue Ops[] = {
6764       Chain,
6765       DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6766       DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8),  // en
6767       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
6768       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
6769       Undef, // src2
6770       Undef, // src3
6771       DAG.getTargetConstant(1, DL, MVT::i1), // compr
6772       DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6773     };
6774 
6775     unsigned Opc = Done->isNullValue() ?
6776       AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6777     return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6778   }
6779   case Intrinsic::amdgcn_s_barrier: {
6780     if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
6781       const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
6782       unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
6783       if (WGSize <= ST.getWavefrontSize())
6784         return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
6785                                           Op.getOperand(0)), 0);
6786     }
6787     return SDValue();
6788   };
6789   case Intrinsic::amdgcn_tbuffer_store: {
6790     SDValue VData = Op.getOperand(2);
6791     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6792     if (IsD16)
6793       VData = handleD16VData(VData, DAG);
6794     unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6795     unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6796     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6797     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue();
6798     unsigned IdxEn = 1;
6799     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6800       IdxEn = Idx->getZExtValue() != 0;
6801     SDValue Ops[] = {
6802       Chain,
6803       VData,             // vdata
6804       Op.getOperand(3),  // rsrc
6805       Op.getOperand(4),  // vindex
6806       Op.getOperand(5),  // voffset
6807       Op.getOperand(6),  // soffset
6808       Op.getOperand(7),  // offset
6809       DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6810       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6811       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idexen
6812     };
6813     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6814                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6815     MemSDNode *M = cast<MemSDNode>(Op);
6816     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6817                                    M->getMemoryVT(), M->getMemOperand());
6818   }
6819 
6820   case Intrinsic::amdgcn_struct_tbuffer_store: {
6821     SDValue VData = Op.getOperand(2);
6822     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6823     if (IsD16)
6824       VData = handleD16VData(VData, DAG);
6825     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6826     SDValue Ops[] = {
6827       Chain,
6828       VData,             // vdata
6829       Op.getOperand(3),  // rsrc
6830       Op.getOperand(4),  // vindex
6831       Offsets.first,     // voffset
6832       Op.getOperand(6),  // soffset
6833       Offsets.second,    // offset
6834       Op.getOperand(7),  // format
6835       Op.getOperand(8),  // cachepolicy
6836       DAG.getTargetConstant(1, DL, MVT::i1), // idexen
6837     };
6838     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6839                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6840     MemSDNode *M = cast<MemSDNode>(Op);
6841     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6842                                    M->getMemoryVT(), M->getMemOperand());
6843   }
6844 
6845   case Intrinsic::amdgcn_raw_tbuffer_store: {
6846     SDValue VData = Op.getOperand(2);
6847     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6848     if (IsD16)
6849       VData = handleD16VData(VData, DAG);
6850     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6851     SDValue Ops[] = {
6852       Chain,
6853       VData,             // vdata
6854       Op.getOperand(3),  // rsrc
6855       DAG.getConstant(0, DL, MVT::i32), // vindex
6856       Offsets.first,     // voffset
6857       Op.getOperand(5),  // soffset
6858       Offsets.second,    // offset
6859       Op.getOperand(6),  // format
6860       Op.getOperand(7),  // cachepolicy
6861       DAG.getTargetConstant(0, DL, MVT::i1), // idexen
6862     };
6863     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6864                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6865     MemSDNode *M = cast<MemSDNode>(Op);
6866     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6867                                    M->getMemoryVT(), M->getMemOperand());
6868   }
6869 
6870   case Intrinsic::amdgcn_buffer_store:
6871   case Intrinsic::amdgcn_buffer_store_format: {
6872     SDValue VData = Op.getOperand(2);
6873     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6874     if (IsD16)
6875       VData = handleD16VData(VData, DAG);
6876     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6877     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6878     unsigned IdxEn = 1;
6879     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6880       IdxEn = Idx->getZExtValue() != 0;
6881     SDValue Ops[] = {
6882       Chain,
6883       VData,
6884       Op.getOperand(3), // rsrc
6885       Op.getOperand(4), // vindex
6886       SDValue(), // voffset -- will be set by setBufferOffsets
6887       SDValue(), // soffset -- will be set by setBufferOffsets
6888       SDValue(), // offset -- will be set by setBufferOffsets
6889       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6890       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
6891     };
6892     setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6893     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
6894                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6895     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6896     MemSDNode *M = cast<MemSDNode>(Op);
6897 
6898     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6899     EVT VDataType = VData.getValueType().getScalarType();
6900     if (VDataType == MVT::i8 || VDataType == MVT::i16)
6901       return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6902 
6903     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6904                                    M->getMemoryVT(), M->getMemOperand());
6905   }
6906 
6907   case Intrinsic::amdgcn_raw_buffer_store:
6908   case Intrinsic::amdgcn_raw_buffer_store_format: {
6909     const bool IsFormat =
6910         IntrinsicID == Intrinsic::amdgcn_raw_buffer_store_format;
6911 
6912     SDValue VData = Op.getOperand(2);
6913     EVT VDataVT = VData.getValueType();
6914     EVT EltType = VDataVT.getScalarType();
6915     bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
6916     if (IsD16)
6917       VData = handleD16VData(VData, DAG);
6918 
6919     if (!isTypeLegal(VDataVT)) {
6920       VData =
6921           DAG.getNode(ISD::BITCAST, DL,
6922                       getEquivalentMemType(*DAG.getContext(), VDataVT), VData);
6923     }
6924 
6925     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6926     SDValue Ops[] = {
6927       Chain,
6928       VData,
6929       Op.getOperand(3), // rsrc
6930       DAG.getConstant(0, DL, MVT::i32), // vindex
6931       Offsets.first,    // voffset
6932       Op.getOperand(5), // soffset
6933       Offsets.second,   // offset
6934       Op.getOperand(6), // cachepolicy
6935       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6936     };
6937     unsigned Opc =
6938         IsFormat ? AMDGPUISD::BUFFER_STORE_FORMAT : AMDGPUISD::BUFFER_STORE;
6939     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6940     MemSDNode *M = cast<MemSDNode>(Op);
6941 
6942     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6943     if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32)
6944       return handleByteShortBufferStores(DAG, VDataVT, DL, Ops, M);
6945 
6946     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6947                                    M->getMemoryVT(), M->getMemOperand());
6948   }
6949 
6950   case Intrinsic::amdgcn_struct_buffer_store:
6951   case Intrinsic::amdgcn_struct_buffer_store_format: {
6952     const bool IsFormat =
6953         IntrinsicID == Intrinsic::amdgcn_struct_buffer_store_format;
6954 
6955     SDValue VData = Op.getOperand(2);
6956     EVT VDataVT = VData.getValueType();
6957     EVT EltType = VDataVT.getScalarType();
6958     bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
6959 
6960     if (IsD16)
6961       VData = handleD16VData(VData, DAG);
6962 
6963     if (!isTypeLegal(VDataVT)) {
6964       VData =
6965           DAG.getNode(ISD::BITCAST, DL,
6966                       getEquivalentMemType(*DAG.getContext(), VDataVT), VData);
6967     }
6968 
6969     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6970     SDValue Ops[] = {
6971       Chain,
6972       VData,
6973       Op.getOperand(3), // rsrc
6974       Op.getOperand(4), // vindex
6975       Offsets.first,    // voffset
6976       Op.getOperand(6), // soffset
6977       Offsets.second,   // offset
6978       Op.getOperand(7), // cachepolicy
6979       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6980     };
6981     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ?
6982                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6983     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6984     MemSDNode *M = cast<MemSDNode>(Op);
6985 
6986     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6987     EVT VDataType = VData.getValueType().getScalarType();
6988     if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32)
6989       return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6990 
6991     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6992                                    M->getMemoryVT(), M->getMemOperand());
6993   }
6994 
6995   case Intrinsic::amdgcn_buffer_atomic_fadd: {
6996     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6997     unsigned IdxEn = 1;
6998     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6999       IdxEn = Idx->getZExtValue() != 0;
7000     SDValue Ops[] = {
7001       Chain,
7002       Op.getOperand(2), // vdata
7003       Op.getOperand(3), // rsrc
7004       Op.getOperand(4), // vindex
7005       SDValue(),        // voffset -- will be set by setBufferOffsets
7006       SDValue(),        // soffset -- will be set by setBufferOffsets
7007       SDValue(),        // offset -- will be set by setBufferOffsets
7008       DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy
7009       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
7010     };
7011     setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
7012     EVT VT = Op.getOperand(2).getValueType();
7013 
7014     auto *M = cast<MemSDNode>(Op);
7015     unsigned Opcode = VT.isVector() ? AMDGPUISD::BUFFER_ATOMIC_PK_FADD
7016                                     : AMDGPUISD::BUFFER_ATOMIC_FADD;
7017 
7018     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
7019                                    M->getMemOperand());
7020   }
7021 
7022   case Intrinsic::amdgcn_global_atomic_fadd: {
7023     SDValue Ops[] = {
7024       Chain,
7025       Op.getOperand(2), // ptr
7026       Op.getOperand(3)  // vdata
7027     };
7028     EVT VT = Op.getOperand(3).getValueType();
7029 
7030     auto *M = cast<MemSDNode>(Op);
7031     unsigned Opcode = VT.isVector() ? AMDGPUISD::ATOMIC_PK_FADD
7032                                     : AMDGPUISD::ATOMIC_FADD;
7033 
7034     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
7035                                    M->getMemOperand());
7036   }
7037 
7038   case Intrinsic::amdgcn_end_cf:
7039     return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other,
7040                                       Op->getOperand(2), Chain), 0);
7041 
7042   default: {
7043     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
7044             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
7045       return lowerImage(Op, ImageDimIntr, DAG);
7046 
7047     return Op;
7048   }
7049   }
7050 }
7051 
7052 // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args:
7053 // offset (the offset that is included in bounds checking and swizzling, to be
7054 // split between the instruction's voffset and immoffset fields) and soffset
7055 // (the offset that is excluded from bounds checking and swizzling, to go in
7056 // the instruction's soffset field).  This function takes the first kind of
7057 // offset and figures out how to split it between voffset and immoffset.
7058 std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets(
7059     SDValue Offset, SelectionDAG &DAG) const {
7060   SDLoc DL(Offset);
7061   const unsigned MaxImm = 4095;
7062   SDValue N0 = Offset;
7063   ConstantSDNode *C1 = nullptr;
7064 
7065   if ((C1 = dyn_cast<ConstantSDNode>(N0)))
7066     N0 = SDValue();
7067   else if (DAG.isBaseWithConstantOffset(N0)) {
7068     C1 = cast<ConstantSDNode>(N0.getOperand(1));
7069     N0 = N0.getOperand(0);
7070   }
7071 
7072   if (C1) {
7073     unsigned ImmOffset = C1->getZExtValue();
7074     // If the immediate value is too big for the immoffset field, put the value
7075     // and -4096 into the immoffset field so that the value that is copied/added
7076     // for the voffset field is a multiple of 4096, and it stands more chance
7077     // of being CSEd with the copy/add for another similar load/store.
7078     // However, do not do that rounding down to a multiple of 4096 if that is a
7079     // negative number, as it appears to be illegal to have a negative offset
7080     // in the vgpr, even if adding the immediate offset makes it positive.
7081     unsigned Overflow = ImmOffset & ~MaxImm;
7082     ImmOffset -= Overflow;
7083     if ((int32_t)Overflow < 0) {
7084       Overflow += ImmOffset;
7085       ImmOffset = 0;
7086     }
7087     C1 = cast<ConstantSDNode>(DAG.getTargetConstant(ImmOffset, DL, MVT::i32));
7088     if (Overflow) {
7089       auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32);
7090       if (!N0)
7091         N0 = OverflowVal;
7092       else {
7093         SDValue Ops[] = { N0, OverflowVal };
7094         N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops);
7095       }
7096     }
7097   }
7098   if (!N0)
7099     N0 = DAG.getConstant(0, DL, MVT::i32);
7100   if (!C1)
7101     C1 = cast<ConstantSDNode>(DAG.getTargetConstant(0, DL, MVT::i32));
7102   return {N0, SDValue(C1, 0)};
7103 }
7104 
7105 // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the
7106 // three offsets (voffset, soffset and instoffset) into the SDValue[3] array
7107 // pointed to by Offsets.
7108 void SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
7109                                         SelectionDAG &DAG, SDValue *Offsets,
7110                                         unsigned Align) const {
7111   SDLoc DL(CombinedOffset);
7112   if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) {
7113     uint32_t Imm = C->getZExtValue();
7114     uint32_t SOffset, ImmOffset;
7115     if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, Align)) {
7116       Offsets[0] = DAG.getConstant(0, DL, MVT::i32);
7117       Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
7118       Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32);
7119       return;
7120     }
7121   }
7122   if (DAG.isBaseWithConstantOffset(CombinedOffset)) {
7123     SDValue N0 = CombinedOffset.getOperand(0);
7124     SDValue N1 = CombinedOffset.getOperand(1);
7125     uint32_t SOffset, ImmOffset;
7126     int Offset = cast<ConstantSDNode>(N1)->getSExtValue();
7127     if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset,
7128                                                 Subtarget, Align)) {
7129       Offsets[0] = N0;
7130       Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
7131       Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32);
7132       return;
7133     }
7134   }
7135   Offsets[0] = CombinedOffset;
7136   Offsets[1] = DAG.getConstant(0, DL, MVT::i32);
7137   Offsets[2] = DAG.getTargetConstant(0, DL, MVT::i32);
7138 }
7139 
7140 // Handle 8 bit and 16 bit buffer loads
7141 SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG,
7142                                                      EVT LoadVT, SDLoc DL,
7143                                                      ArrayRef<SDValue> Ops,
7144                                                      MemSDNode *M) const {
7145   EVT IntVT = LoadVT.changeTypeToInteger();
7146   unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ?
7147          AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT;
7148 
7149   SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other);
7150   SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList,
7151                                                Ops, IntVT,
7152                                                M->getMemOperand());
7153   SDValue LoadVal = DAG.getNode(ISD::TRUNCATE, DL, IntVT, BufferLoad);
7154   LoadVal = DAG.getNode(ISD::BITCAST, DL, LoadVT, LoadVal);
7155 
7156   return DAG.getMergeValues({LoadVal, BufferLoad.getValue(1)}, DL);
7157 }
7158 
7159 // Handle 8 bit and 16 bit buffer stores
7160 SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG,
7161                                                       EVT VDataType, SDLoc DL,
7162                                                       SDValue Ops[],
7163                                                       MemSDNode *M) const {
7164   if (VDataType == MVT::f16)
7165     Ops[1] = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Ops[1]);
7166 
7167   SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]);
7168   Ops[1] = BufferStoreExt;
7169   unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE :
7170                                  AMDGPUISD::BUFFER_STORE_SHORT;
7171   ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9);
7172   return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType,
7173                                      M->getMemOperand());
7174 }
7175 
7176 static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
7177                                  ISD::LoadExtType ExtType, SDValue Op,
7178                                  const SDLoc &SL, EVT VT) {
7179   if (VT.bitsLT(Op.getValueType()))
7180     return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
7181 
7182   switch (ExtType) {
7183   case ISD::SEXTLOAD:
7184     return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
7185   case ISD::ZEXTLOAD:
7186     return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
7187   case ISD::EXTLOAD:
7188     return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
7189   case ISD::NON_EXTLOAD:
7190     return Op;
7191   }
7192 
7193   llvm_unreachable("invalid ext type");
7194 }
7195 
7196 SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
7197   SelectionDAG &DAG = DCI.DAG;
7198   if (Ld->getAlignment() < 4 || Ld->isDivergent())
7199     return SDValue();
7200 
7201   // FIXME: Constant loads should all be marked invariant.
7202   unsigned AS = Ld->getAddressSpace();
7203   if (AS != AMDGPUAS::CONSTANT_ADDRESS &&
7204       AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
7205       (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
7206     return SDValue();
7207 
7208   // Don't do this early, since it may interfere with adjacent load merging for
7209   // illegal types. We can avoid losing alignment information for exotic types
7210   // pre-legalize.
7211   EVT MemVT = Ld->getMemoryVT();
7212   if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
7213       MemVT.getSizeInBits() >= 32)
7214     return SDValue();
7215 
7216   SDLoc SL(Ld);
7217 
7218   assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
7219          "unexpected vector extload");
7220 
7221   // TODO: Drop only high part of range.
7222   SDValue Ptr = Ld->getBasePtr();
7223   SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
7224                                 MVT::i32, SL, Ld->getChain(), Ptr,
7225                                 Ld->getOffset(),
7226                                 Ld->getPointerInfo(), MVT::i32,
7227                                 Ld->getAlignment(),
7228                                 Ld->getMemOperand()->getFlags(),
7229                                 Ld->getAAInfo(),
7230                                 nullptr); // Drop ranges
7231 
7232   EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
7233   if (MemVT.isFloatingPoint()) {
7234     assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
7235            "unexpected fp extload");
7236     TruncVT = MemVT.changeTypeToInteger();
7237   }
7238 
7239   SDValue Cvt = NewLoad;
7240   if (Ld->getExtensionType() == ISD::SEXTLOAD) {
7241     Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
7242                       DAG.getValueType(TruncVT));
7243   } else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
7244              Ld->getExtensionType() == ISD::NON_EXTLOAD) {
7245     Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
7246   } else {
7247     assert(Ld->getExtensionType() == ISD::EXTLOAD);
7248   }
7249 
7250   EVT VT = Ld->getValueType(0);
7251   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
7252 
7253   DCI.AddToWorklist(Cvt.getNode());
7254 
7255   // We may need to handle exotic cases, such as i16->i64 extloads, so insert
7256   // the appropriate extension from the 32-bit load.
7257   Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
7258   DCI.AddToWorklist(Cvt.getNode());
7259 
7260   // Handle conversion back to floating point if necessary.
7261   Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
7262 
7263   return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
7264 }
7265 
7266 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7267   SDLoc DL(Op);
7268   LoadSDNode *Load = cast<LoadSDNode>(Op);
7269   ISD::LoadExtType ExtType = Load->getExtensionType();
7270   EVT MemVT = Load->getMemoryVT();
7271 
7272   if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
7273     if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
7274       return SDValue();
7275 
7276     // FIXME: Copied from PPC
7277     // First, load into 32 bits, then truncate to 1 bit.
7278 
7279     SDValue Chain = Load->getChain();
7280     SDValue BasePtr = Load->getBasePtr();
7281     MachineMemOperand *MMO = Load->getMemOperand();
7282 
7283     EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
7284 
7285     SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
7286                                    BasePtr, RealMemVT, MMO);
7287 
7288     if (!MemVT.isVector()) {
7289       SDValue Ops[] = {
7290         DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
7291         NewLD.getValue(1)
7292       };
7293 
7294       return DAG.getMergeValues(Ops, DL);
7295     }
7296 
7297     SmallVector<SDValue, 3> Elts;
7298     for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) {
7299       SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD,
7300                                 DAG.getConstant(I, DL, MVT::i32));
7301 
7302       Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt));
7303     }
7304 
7305     SDValue Ops[] = {
7306       DAG.getBuildVector(MemVT, DL, Elts),
7307       NewLD.getValue(1)
7308     };
7309 
7310     return DAG.getMergeValues(Ops, DL);
7311   }
7312 
7313   if (!MemVT.isVector())
7314     return SDValue();
7315 
7316   assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
7317          "Custom lowering for non-i32 vectors hasn't been implemented.");
7318 
7319   if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
7320                           *Load->getMemOperand())) {
7321     SDValue Ops[2];
7322     std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
7323     return DAG.getMergeValues(Ops, DL);
7324   }
7325 
7326   unsigned Alignment = Load->getAlignment();
7327   unsigned AS = Load->getAddressSpace();
7328   if (Subtarget->hasLDSMisalignedBug() &&
7329       AS == AMDGPUAS::FLAT_ADDRESS &&
7330       Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) {
7331     return SplitVectorLoad(Op, DAG);
7332   }
7333 
7334   MachineFunction &MF = DAG.getMachineFunction();
7335   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
7336   // If there is a possibilty that flat instruction access scratch memory
7337   // then we need to use the same legalization rules we use for private.
7338   if (AS == AMDGPUAS::FLAT_ADDRESS)
7339     AS = MFI->hasFlatScratchInit() ?
7340          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
7341 
7342   unsigned NumElements = MemVT.getVectorNumElements();
7343 
7344   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7345       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
7346     if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) {
7347       if (MemVT.isPow2VectorType())
7348         return SDValue();
7349       if (NumElements == 3)
7350         return WidenVectorLoad(Op, DAG);
7351       return SplitVectorLoad(Op, DAG);
7352     }
7353     // Non-uniform loads will be selected to MUBUF instructions, so they
7354     // have the same legalization requirements as global and private
7355     // loads.
7356     //
7357   }
7358 
7359   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7360       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
7361       AS == AMDGPUAS::GLOBAL_ADDRESS) {
7362     if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
7363         !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
7364         Alignment >= 4 && NumElements < 32) {
7365       if (MemVT.isPow2VectorType())
7366         return SDValue();
7367       if (NumElements == 3)
7368         return WidenVectorLoad(Op, DAG);
7369       return SplitVectorLoad(Op, DAG);
7370     }
7371     // Non-uniform loads will be selected to MUBUF instructions, so they
7372     // have the same legalization requirements as global and private
7373     // loads.
7374     //
7375   }
7376   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7377       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
7378       AS == AMDGPUAS::GLOBAL_ADDRESS ||
7379       AS == AMDGPUAS::FLAT_ADDRESS) {
7380     if (NumElements > 4)
7381       return SplitVectorLoad(Op, DAG);
7382     // v3 loads not supported on SI.
7383     if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7384       return WidenVectorLoad(Op, DAG);
7385     // v3 and v4 loads are supported for private and global memory.
7386     return SDValue();
7387   }
7388   if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
7389     // Depending on the setting of the private_element_size field in the
7390     // resource descriptor, we can only make private accesses up to a certain
7391     // size.
7392     switch (Subtarget->getMaxPrivateElementSize()) {
7393     case 4:
7394       return scalarizeVectorLoad(Load, DAG);
7395     case 8:
7396       if (NumElements > 2)
7397         return SplitVectorLoad(Op, DAG);
7398       return SDValue();
7399     case 16:
7400       // Same as global/flat
7401       if (NumElements > 4)
7402         return SplitVectorLoad(Op, DAG);
7403       // v3 loads not supported on SI.
7404       if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7405         return WidenVectorLoad(Op, DAG);
7406       return SDValue();
7407     default:
7408       llvm_unreachable("unsupported private_element_size");
7409     }
7410   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
7411     // Use ds_read_b128 if possible.
7412     if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
7413         MemVT.getStoreSize() == 16)
7414       return SDValue();
7415 
7416     if (NumElements > 2)
7417       return SplitVectorLoad(Op, DAG);
7418 
7419     // SI has a hardware bug in the LDS / GDS boounds checking: if the base
7420     // address is negative, then the instruction is incorrectly treated as
7421     // out-of-bounds even if base + offsets is in bounds. Split vectorized
7422     // loads here to avoid emitting ds_read2_b32. We may re-combine the
7423     // load later in the SILoadStoreOptimizer.
7424     if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
7425         NumElements == 2 && MemVT.getStoreSize() == 8 &&
7426         Load->getAlignment() < 8) {
7427       return SplitVectorLoad(Op, DAG);
7428     }
7429   }
7430   return SDValue();
7431 }
7432 
7433 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
7434   EVT VT = Op.getValueType();
7435   assert(VT.getSizeInBits() == 64);
7436 
7437   SDLoc DL(Op);
7438   SDValue Cond = Op.getOperand(0);
7439 
7440   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
7441   SDValue One = DAG.getConstant(1, DL, MVT::i32);
7442 
7443   SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
7444   SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
7445 
7446   SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
7447   SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
7448 
7449   SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
7450 
7451   SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
7452   SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
7453 
7454   SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
7455 
7456   SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
7457   return DAG.getNode(ISD::BITCAST, DL, VT, Res);
7458 }
7459 
7460 // Catch division cases where we can use shortcuts with rcp and rsq
7461 // instructions.
7462 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
7463                                               SelectionDAG &DAG) const {
7464   SDLoc SL(Op);
7465   SDValue LHS = Op.getOperand(0);
7466   SDValue RHS = Op.getOperand(1);
7467   EVT VT = Op.getValueType();
7468   const SDNodeFlags Flags = Op->getFlags();
7469   bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || Flags.hasAllowReciprocal();
7470 
7471   if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
7472     return SDValue();
7473 
7474   if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
7475     if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
7476       if (CLHS->isExactlyValue(1.0)) {
7477         // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
7478         // the CI documentation has a worst case error of 1 ulp.
7479         // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
7480         // use it as long as we aren't trying to use denormals.
7481         //
7482         // v_rcp_f16 and v_rsq_f16 DO support denormals.
7483 
7484         // 1.0 / sqrt(x) -> rsq(x)
7485 
7486         // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
7487         // error seems really high at 2^29 ULP.
7488         if (RHS.getOpcode() == ISD::FSQRT)
7489           return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
7490 
7491         // 1.0 / x -> rcp(x)
7492         return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
7493       }
7494 
7495       // Same as for 1.0, but expand the sign out of the constant.
7496       if (CLHS->isExactlyValue(-1.0)) {
7497         // -1.0 / x -> rcp (fneg x)
7498         SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
7499         return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
7500       }
7501     }
7502   }
7503 
7504   if (Unsafe) {
7505     // Turn into multiply by the reciprocal.
7506     // x / y -> x * (1.0 / y)
7507     SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
7508     return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
7509   }
7510 
7511   return SDValue();
7512 }
7513 
7514 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
7515                           EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
7516   if (GlueChain->getNumValues() <= 1) {
7517     return DAG.getNode(Opcode, SL, VT, A, B);
7518   }
7519 
7520   assert(GlueChain->getNumValues() == 3);
7521 
7522   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
7523   switch (Opcode) {
7524   default: llvm_unreachable("no chain equivalent for opcode");
7525   case ISD::FMUL:
7526     Opcode = AMDGPUISD::FMUL_W_CHAIN;
7527     break;
7528   }
7529 
7530   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
7531                      GlueChain.getValue(2));
7532 }
7533 
7534 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
7535                            EVT VT, SDValue A, SDValue B, SDValue C,
7536                            SDValue GlueChain) {
7537   if (GlueChain->getNumValues() <= 1) {
7538     return DAG.getNode(Opcode, SL, VT, A, B, C);
7539   }
7540 
7541   assert(GlueChain->getNumValues() == 3);
7542 
7543   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
7544   switch (Opcode) {
7545   default: llvm_unreachable("no chain equivalent for opcode");
7546   case ISD::FMA:
7547     Opcode = AMDGPUISD::FMA_W_CHAIN;
7548     break;
7549   }
7550 
7551   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
7552                      GlueChain.getValue(2));
7553 }
7554 
7555 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
7556   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
7557     return FastLowered;
7558 
7559   SDLoc SL(Op);
7560   SDValue Src0 = Op.getOperand(0);
7561   SDValue Src1 = Op.getOperand(1);
7562 
7563   SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
7564   SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
7565 
7566   SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
7567   SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
7568 
7569   SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
7570   SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
7571 
7572   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
7573 }
7574 
7575 // Faster 2.5 ULP division that does not support denormals.
7576 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
7577   SDLoc SL(Op);
7578   SDValue LHS = Op.getOperand(1);
7579   SDValue RHS = Op.getOperand(2);
7580 
7581   SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
7582 
7583   const APFloat K0Val(BitsToFloat(0x6f800000));
7584   const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
7585 
7586   const APFloat K1Val(BitsToFloat(0x2f800000));
7587   const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
7588 
7589   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
7590 
7591   EVT SetCCVT =
7592     getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
7593 
7594   SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
7595 
7596   SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
7597 
7598   // TODO: Should this propagate fast-math-flags?
7599   r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
7600 
7601   // rcp does not support denormals.
7602   SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
7603 
7604   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
7605 
7606   return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
7607 }
7608 
7609 // Returns immediate value for setting the F32 denorm mode when using the
7610 // S_DENORM_MODE instruction.
7611 static const SDValue getSPDenormModeValue(int SPDenormMode, SelectionDAG &DAG,
7612                                           const SDLoc &SL, const GCNSubtarget *ST) {
7613   assert(ST->hasDenormModeInst() && "Requires S_DENORM_MODE");
7614   int DPDenormModeDefault = ST->hasFP64Denormals()
7615                                 ? FP_DENORM_FLUSH_NONE
7616                                 : FP_DENORM_FLUSH_IN_FLUSH_OUT;
7617 
7618   int Mode = SPDenormMode | (DPDenormModeDefault << 2);
7619   return DAG.getTargetConstant(Mode, SL, MVT::i32);
7620 }
7621 
7622 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
7623   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
7624     return FastLowered;
7625 
7626   SDLoc SL(Op);
7627   SDValue LHS = Op.getOperand(0);
7628   SDValue RHS = Op.getOperand(1);
7629 
7630   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
7631 
7632   SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
7633 
7634   SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7635                                           RHS, RHS, LHS);
7636   SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7637                                         LHS, RHS, LHS);
7638 
7639   // Denominator is scaled to not be denormal, so using rcp is ok.
7640   SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
7641                                   DenominatorScaled);
7642   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
7643                                      DenominatorScaled);
7644 
7645   const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
7646                                (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
7647                                (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
7648   const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
7649 
7650   if (!Subtarget->hasFP32Denormals()) {
7651     SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
7652 
7653     SDValue EnableDenorm;
7654     if (Subtarget->hasDenormModeInst()) {
7655       const SDValue EnableDenormValue =
7656           getSPDenormModeValue(FP_DENORM_FLUSH_NONE, DAG, SL, Subtarget);
7657 
7658       EnableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, BindParamVTs,
7659                                  DAG.getEntryNode(), EnableDenormValue);
7660     } else {
7661       const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
7662                                                         SL, MVT::i32);
7663       EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
7664                                  DAG.getEntryNode(), EnableDenormValue,
7665                                  BitField);
7666     }
7667 
7668     SDValue Ops[3] = {
7669       NegDivScale0,
7670       EnableDenorm.getValue(0),
7671       EnableDenorm.getValue(1)
7672     };
7673 
7674     NegDivScale0 = DAG.getMergeValues(Ops, SL);
7675   }
7676 
7677   SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
7678                              ApproxRcp, One, NegDivScale0);
7679 
7680   SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
7681                              ApproxRcp, Fma0);
7682 
7683   SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
7684                            Fma1, Fma1);
7685 
7686   SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
7687                              NumeratorScaled, Mul);
7688 
7689   SDValue Fma3 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
7690 
7691   SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
7692                              NumeratorScaled, Fma3);
7693 
7694   if (!Subtarget->hasFP32Denormals()) {
7695 
7696     SDValue DisableDenorm;
7697     if (Subtarget->hasDenormModeInst()) {
7698       const SDValue DisableDenormValue =
7699           getSPDenormModeValue(FP_DENORM_FLUSH_IN_FLUSH_OUT, DAG, SL, Subtarget);
7700 
7701       DisableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, MVT::Other,
7702                                   Fma4.getValue(1), DisableDenormValue,
7703                                   Fma4.getValue(2));
7704     } else {
7705       const SDValue DisableDenormValue =
7706           DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
7707 
7708       DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
7709                                   Fma4.getValue(1), DisableDenormValue,
7710                                   BitField, Fma4.getValue(2));
7711     }
7712 
7713     SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
7714                                       DisableDenorm, DAG.getRoot());
7715     DAG.setRoot(OutputChain);
7716   }
7717 
7718   SDValue Scale = NumeratorScaled.getValue(1);
7719   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
7720                              Fma4, Fma1, Fma3, Scale);
7721 
7722   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
7723 }
7724 
7725 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
7726   if (DAG.getTarget().Options.UnsafeFPMath)
7727     return lowerFastUnsafeFDIV(Op, DAG);
7728 
7729   SDLoc SL(Op);
7730   SDValue X = Op.getOperand(0);
7731   SDValue Y = Op.getOperand(1);
7732 
7733   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
7734 
7735   SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
7736 
7737   SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
7738 
7739   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
7740 
7741   SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
7742 
7743   SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
7744 
7745   SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
7746 
7747   SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
7748 
7749   SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
7750 
7751   SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
7752   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
7753 
7754   SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
7755                              NegDivScale0, Mul, DivScale1);
7756 
7757   SDValue Scale;
7758 
7759   if (!Subtarget->hasUsableDivScaleConditionOutput()) {
7760     // Workaround a hardware bug on SI where the condition output from div_scale
7761     // is not usable.
7762 
7763     const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
7764 
7765     // Figure out if the scale to use for div_fmas.
7766     SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
7767     SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
7768     SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
7769     SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
7770 
7771     SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
7772     SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
7773 
7774     SDValue Scale0Hi
7775       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
7776     SDValue Scale1Hi
7777       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
7778 
7779     SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
7780     SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
7781     Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
7782   } else {
7783     Scale = DivScale1.getValue(1);
7784   }
7785 
7786   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
7787                              Fma4, Fma3, Mul, Scale);
7788 
7789   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
7790 }
7791 
7792 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
7793   EVT VT = Op.getValueType();
7794 
7795   if (VT == MVT::f32)
7796     return LowerFDIV32(Op, DAG);
7797 
7798   if (VT == MVT::f64)
7799     return LowerFDIV64(Op, DAG);
7800 
7801   if (VT == MVT::f16)
7802     return LowerFDIV16(Op, DAG);
7803 
7804   llvm_unreachable("Unexpected type for fdiv");
7805 }
7806 
7807 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7808   SDLoc DL(Op);
7809   StoreSDNode *Store = cast<StoreSDNode>(Op);
7810   EVT VT = Store->getMemoryVT();
7811 
7812   if (VT == MVT::i1) {
7813     return DAG.getTruncStore(Store->getChain(), DL,
7814        DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
7815        Store->getBasePtr(), MVT::i1, Store->getMemOperand());
7816   }
7817 
7818   assert(VT.isVector() &&
7819          Store->getValue().getValueType().getScalarType() == MVT::i32);
7820 
7821   if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
7822                           *Store->getMemOperand())) {
7823     return expandUnalignedStore(Store, DAG);
7824   }
7825 
7826   unsigned AS = Store->getAddressSpace();
7827   if (Subtarget->hasLDSMisalignedBug() &&
7828       AS == AMDGPUAS::FLAT_ADDRESS &&
7829       Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) {
7830     return SplitVectorStore(Op, DAG);
7831   }
7832 
7833   MachineFunction &MF = DAG.getMachineFunction();
7834   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
7835   // If there is a possibilty that flat instruction access scratch memory
7836   // then we need to use the same legalization rules we use for private.
7837   if (AS == AMDGPUAS::FLAT_ADDRESS)
7838     AS = MFI->hasFlatScratchInit() ?
7839          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
7840 
7841   unsigned NumElements = VT.getVectorNumElements();
7842   if (AS == AMDGPUAS::GLOBAL_ADDRESS ||
7843       AS == AMDGPUAS::FLAT_ADDRESS) {
7844     if (NumElements > 4)
7845       return SplitVectorStore(Op, DAG);
7846     // v3 stores not supported on SI.
7847     if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7848       return SplitVectorStore(Op, DAG);
7849     return SDValue();
7850   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
7851     switch (Subtarget->getMaxPrivateElementSize()) {
7852     case 4:
7853       return scalarizeVectorStore(Store, DAG);
7854     case 8:
7855       if (NumElements > 2)
7856         return SplitVectorStore(Op, DAG);
7857       return SDValue();
7858     case 16:
7859       if (NumElements > 4 || NumElements == 3)
7860         return SplitVectorStore(Op, DAG);
7861       return SDValue();
7862     default:
7863       llvm_unreachable("unsupported private_element_size");
7864     }
7865   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
7866     // Use ds_write_b128 if possible.
7867     if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
7868         VT.getStoreSize() == 16 && NumElements != 3)
7869       return SDValue();
7870 
7871     if (NumElements > 2)
7872       return SplitVectorStore(Op, DAG);
7873 
7874     // SI has a hardware bug in the LDS / GDS boounds checking: if the base
7875     // address is negative, then the instruction is incorrectly treated as
7876     // out-of-bounds even if base + offsets is in bounds. Split vectorized
7877     // stores here to avoid emitting ds_write2_b32. We may re-combine the
7878     // store later in the SILoadStoreOptimizer.
7879     if (!Subtarget->hasUsableDSOffset() &&
7880         NumElements == 2 && VT.getStoreSize() == 8 &&
7881         Store->getAlignment() < 8) {
7882       return SplitVectorStore(Op, DAG);
7883     }
7884 
7885     return SDValue();
7886   } else {
7887     llvm_unreachable("unhandled address space");
7888   }
7889 }
7890 
7891 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
7892   SDLoc DL(Op);
7893   EVT VT = Op.getValueType();
7894   SDValue Arg = Op.getOperand(0);
7895   SDValue TrigVal;
7896 
7897   // TODO: Should this propagate fast-math-flags?
7898 
7899   SDValue OneOver2Pi = DAG.getConstantFP(0.5 / M_PI, DL, VT);
7900 
7901   if (Subtarget->hasTrigReducedRange()) {
7902     SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7903     TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal);
7904   } else {
7905     TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7906   }
7907 
7908   switch (Op.getOpcode()) {
7909   case ISD::FCOS:
7910     return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal);
7911   case ISD::FSIN:
7912     return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal);
7913   default:
7914     llvm_unreachable("Wrong trig opcode");
7915   }
7916 }
7917 
7918 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
7919   AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
7920   assert(AtomicNode->isCompareAndSwap());
7921   unsigned AS = AtomicNode->getAddressSpace();
7922 
7923   // No custom lowering required for local address space
7924   if (!isFlatGlobalAddrSpace(AS))
7925     return Op;
7926 
7927   // Non-local address space requires custom lowering for atomic compare
7928   // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
7929   SDLoc DL(Op);
7930   SDValue ChainIn = Op.getOperand(0);
7931   SDValue Addr = Op.getOperand(1);
7932   SDValue Old = Op.getOperand(2);
7933   SDValue New = Op.getOperand(3);
7934   EVT VT = Op.getValueType();
7935   MVT SimpleVT = VT.getSimpleVT();
7936   MVT VecType = MVT::getVectorVT(SimpleVT, 2);
7937 
7938   SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
7939   SDValue Ops[] = { ChainIn, Addr, NewOld };
7940 
7941   return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
7942                                  Ops, VT, AtomicNode->getMemOperand());
7943 }
7944 
7945 //===----------------------------------------------------------------------===//
7946 // Custom DAG optimizations
7947 //===----------------------------------------------------------------------===//
7948 
7949 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
7950                                                      DAGCombinerInfo &DCI) const {
7951   EVT VT = N->getValueType(0);
7952   EVT ScalarVT = VT.getScalarType();
7953   if (ScalarVT != MVT::f32)
7954     return SDValue();
7955 
7956   SelectionDAG &DAG = DCI.DAG;
7957   SDLoc DL(N);
7958 
7959   SDValue Src = N->getOperand(0);
7960   EVT SrcVT = Src.getValueType();
7961 
7962   // TODO: We could try to match extracting the higher bytes, which would be
7963   // easier if i8 vectors weren't promoted to i32 vectors, particularly after
7964   // types are legalized. v4i8 -> v4f32 is probably the only case to worry
7965   // about in practice.
7966   if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
7967     if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
7968       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
7969       DCI.AddToWorklist(Cvt.getNode());
7970       return Cvt;
7971     }
7972   }
7973 
7974   return SDValue();
7975 }
7976 
7977 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
7978 
7979 // This is a variant of
7980 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
7981 //
7982 // The normal DAG combiner will do this, but only if the add has one use since
7983 // that would increase the number of instructions.
7984 //
7985 // This prevents us from seeing a constant offset that can be folded into a
7986 // memory instruction's addressing mode. If we know the resulting add offset of
7987 // a pointer can be folded into an addressing offset, we can replace the pointer
7988 // operand with the add of new constant offset. This eliminates one of the uses,
7989 // and may allow the remaining use to also be simplified.
7990 //
7991 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
7992                                                unsigned AddrSpace,
7993                                                EVT MemVT,
7994                                                DAGCombinerInfo &DCI) const {
7995   SDValue N0 = N->getOperand(0);
7996   SDValue N1 = N->getOperand(1);
7997 
7998   // We only do this to handle cases where it's profitable when there are
7999   // multiple uses of the add, so defer to the standard combine.
8000   if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
8001       N0->hasOneUse())
8002     return SDValue();
8003 
8004   const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
8005   if (!CN1)
8006     return SDValue();
8007 
8008   const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
8009   if (!CAdd)
8010     return SDValue();
8011 
8012   // If the resulting offset is too large, we can't fold it into the addressing
8013   // mode offset.
8014   APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
8015   Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
8016 
8017   AddrMode AM;
8018   AM.HasBaseReg = true;
8019   AM.BaseOffs = Offset.getSExtValue();
8020   if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
8021     return SDValue();
8022 
8023   SelectionDAG &DAG = DCI.DAG;
8024   SDLoc SL(N);
8025   EVT VT = N->getValueType(0);
8026 
8027   SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
8028   SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
8029 
8030   SDNodeFlags Flags;
8031   Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
8032                           (N0.getOpcode() == ISD::OR ||
8033                            N0->getFlags().hasNoUnsignedWrap()));
8034 
8035   return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
8036 }
8037 
8038 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
8039                                                   DAGCombinerInfo &DCI) const {
8040   SDValue Ptr = N->getBasePtr();
8041   SelectionDAG &DAG = DCI.DAG;
8042   SDLoc SL(N);
8043 
8044   // TODO: We could also do this for multiplies.
8045   if (Ptr.getOpcode() == ISD::SHL) {
8046     SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(),  N->getAddressSpace(),
8047                                           N->getMemoryVT(), DCI);
8048     if (NewPtr) {
8049       SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
8050 
8051       NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
8052       return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
8053     }
8054   }
8055 
8056   return SDValue();
8057 }
8058 
8059 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
8060   return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
8061          (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
8062          (Opc == ISD::XOR && Val == 0);
8063 }
8064 
8065 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
8066 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
8067 // integer combine opportunities since most 64-bit operations are decomposed
8068 // this way.  TODO: We won't want this for SALU especially if it is an inline
8069 // immediate.
8070 SDValue SITargetLowering::splitBinaryBitConstantOp(
8071   DAGCombinerInfo &DCI,
8072   const SDLoc &SL,
8073   unsigned Opc, SDValue LHS,
8074   const ConstantSDNode *CRHS) const {
8075   uint64_t Val = CRHS->getZExtValue();
8076   uint32_t ValLo = Lo_32(Val);
8077   uint32_t ValHi = Hi_32(Val);
8078   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8079 
8080     if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
8081          bitOpWithConstantIsReducible(Opc, ValHi)) ||
8082         (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
8083     // If we need to materialize a 64-bit immediate, it will be split up later
8084     // anyway. Avoid creating the harder to understand 64-bit immediate
8085     // materialization.
8086     return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
8087   }
8088 
8089   return SDValue();
8090 }
8091 
8092 // Returns true if argument is a boolean value which is not serialized into
8093 // memory or argument and does not require v_cmdmask_b32 to be deserialized.
8094 static bool isBoolSGPR(SDValue V) {
8095   if (V.getValueType() != MVT::i1)
8096     return false;
8097   switch (V.getOpcode()) {
8098   default: break;
8099   case ISD::SETCC:
8100   case ISD::AND:
8101   case ISD::OR:
8102   case ISD::XOR:
8103   case AMDGPUISD::FP_CLASS:
8104     return true;
8105   }
8106   return false;
8107 }
8108 
8109 // If a constant has all zeroes or all ones within each byte return it.
8110 // Otherwise return 0.
8111 static uint32_t getConstantPermuteMask(uint32_t C) {
8112   // 0xff for any zero byte in the mask
8113   uint32_t ZeroByteMask = 0;
8114   if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
8115   if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
8116   if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
8117   if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
8118   uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte
8119   if ((NonZeroByteMask & C) != NonZeroByteMask)
8120     return 0; // Partial bytes selected.
8121   return C;
8122 }
8123 
8124 // Check if a node selects whole bytes from its operand 0 starting at a byte
8125 // boundary while masking the rest. Returns select mask as in the v_perm_b32
8126 // or -1 if not succeeded.
8127 // Note byte select encoding:
8128 // value 0-3 selects corresponding source byte;
8129 // value 0xc selects zero;
8130 // value 0xff selects 0xff.
8131 static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
8132   assert(V.getValueSizeInBits() == 32);
8133 
8134   if (V.getNumOperands() != 2)
8135     return ~0;
8136 
8137   ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
8138   if (!N1)
8139     return ~0;
8140 
8141   uint32_t C = N1->getZExtValue();
8142 
8143   switch (V.getOpcode()) {
8144   default:
8145     break;
8146   case ISD::AND:
8147     if (uint32_t ConstMask = getConstantPermuteMask(C)) {
8148       return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
8149     }
8150     break;
8151 
8152   case ISD::OR:
8153     if (uint32_t ConstMask = getConstantPermuteMask(C)) {
8154       return (0x03020100 & ~ConstMask) | ConstMask;
8155     }
8156     break;
8157 
8158   case ISD::SHL:
8159     if (C % 8)
8160       return ~0;
8161 
8162     return uint32_t((0x030201000c0c0c0cull << C) >> 32);
8163 
8164   case ISD::SRL:
8165     if (C % 8)
8166       return ~0;
8167 
8168     return uint32_t(0x0c0c0c0c03020100ull >> C);
8169   }
8170 
8171   return ~0;
8172 }
8173 
8174 SDValue SITargetLowering::performAndCombine(SDNode *N,
8175                                             DAGCombinerInfo &DCI) const {
8176   if (DCI.isBeforeLegalize())
8177     return SDValue();
8178 
8179   SelectionDAG &DAG = DCI.DAG;
8180   EVT VT = N->getValueType(0);
8181   SDValue LHS = N->getOperand(0);
8182   SDValue RHS = N->getOperand(1);
8183 
8184 
8185   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
8186   if (VT == MVT::i64 && CRHS) {
8187     if (SDValue Split
8188         = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
8189       return Split;
8190   }
8191 
8192   if (CRHS && VT == MVT::i32) {
8193     // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
8194     // nb = number of trailing zeroes in mask
8195     // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
8196     // given that we are selecting 8 or 16 bit fields starting at byte boundary.
8197     uint64_t Mask = CRHS->getZExtValue();
8198     unsigned Bits = countPopulation(Mask);
8199     if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
8200         (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
8201       if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
8202         unsigned Shift = CShift->getZExtValue();
8203         unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
8204         unsigned Offset = NB + Shift;
8205         if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
8206           SDLoc SL(N);
8207           SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
8208                                     LHS->getOperand(0),
8209                                     DAG.getConstant(Offset, SL, MVT::i32),
8210                                     DAG.getConstant(Bits, SL, MVT::i32));
8211           EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
8212           SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
8213                                     DAG.getValueType(NarrowVT));
8214           SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
8215                                     DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
8216           return Shl;
8217         }
8218       }
8219     }
8220 
8221     // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
8222     if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
8223         isa<ConstantSDNode>(LHS.getOperand(2))) {
8224       uint32_t Sel = getConstantPermuteMask(Mask);
8225       if (!Sel)
8226         return SDValue();
8227 
8228       // Select 0xc for all zero bytes
8229       Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
8230       SDLoc DL(N);
8231       return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
8232                          LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
8233     }
8234   }
8235 
8236   // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
8237   // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
8238   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
8239     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8240     ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
8241 
8242     SDValue X = LHS.getOperand(0);
8243     SDValue Y = RHS.getOperand(0);
8244     if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
8245       return SDValue();
8246 
8247     if (LCC == ISD::SETO) {
8248       if (X != LHS.getOperand(1))
8249         return SDValue();
8250 
8251       if (RCC == ISD::SETUNE) {
8252         const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
8253         if (!C1 || !C1->isInfinity() || C1->isNegative())
8254           return SDValue();
8255 
8256         const uint32_t Mask = SIInstrFlags::N_NORMAL |
8257                               SIInstrFlags::N_SUBNORMAL |
8258                               SIInstrFlags::N_ZERO |
8259                               SIInstrFlags::P_ZERO |
8260                               SIInstrFlags::P_SUBNORMAL |
8261                               SIInstrFlags::P_NORMAL;
8262 
8263         static_assert(((~(SIInstrFlags::S_NAN |
8264                           SIInstrFlags::Q_NAN |
8265                           SIInstrFlags::N_INFINITY |
8266                           SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
8267                       "mask not equal");
8268 
8269         SDLoc DL(N);
8270         return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
8271                            X, DAG.getConstant(Mask, DL, MVT::i32));
8272       }
8273     }
8274   }
8275 
8276   if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS)
8277     std::swap(LHS, RHS);
8278 
8279   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS &&
8280       RHS.hasOneUse()) {
8281     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8282     // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan)
8283     // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan)
8284     const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8285     if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask &&
8286         (RHS.getOperand(0) == LHS.getOperand(0) &&
8287          LHS.getOperand(0) == LHS.getOperand(1))) {
8288       const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN;
8289       unsigned NewMask = LCC == ISD::SETO ?
8290         Mask->getZExtValue() & ~OrdMask :
8291         Mask->getZExtValue() & OrdMask;
8292 
8293       SDLoc DL(N);
8294       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0),
8295                          DAG.getConstant(NewMask, DL, MVT::i32));
8296     }
8297   }
8298 
8299   if (VT == MVT::i32 &&
8300       (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
8301     // and x, (sext cc from i1) => select cc, x, 0
8302     if (RHS.getOpcode() != ISD::SIGN_EXTEND)
8303       std::swap(LHS, RHS);
8304     if (isBoolSGPR(RHS.getOperand(0)))
8305       return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
8306                            LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
8307   }
8308 
8309   // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
8310   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8311   if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
8312       N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
8313     uint32_t LHSMask = getPermuteMask(DAG, LHS);
8314     uint32_t RHSMask = getPermuteMask(DAG, RHS);
8315     if (LHSMask != ~0u && RHSMask != ~0u) {
8316       // Canonicalize the expression in an attempt to have fewer unique masks
8317       // and therefore fewer registers used to hold the masks.
8318       if (LHSMask > RHSMask) {
8319         std::swap(LHSMask, RHSMask);
8320         std::swap(LHS, RHS);
8321       }
8322 
8323       // Select 0xc for each lane used from source operand. Zero has 0xc mask
8324       // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
8325       uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8326       uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8327 
8328       // Check of we need to combine values from two sources within a byte.
8329       if (!(LHSUsedLanes & RHSUsedLanes) &&
8330           // If we select high and lower word keep it for SDWA.
8331           // TODO: teach SDWA to work with v_perm_b32 and remove the check.
8332           !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
8333         // Each byte in each mask is either selector mask 0-3, or has higher
8334         // bits set in either of masks, which can be 0xff for 0xff or 0x0c for
8335         // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise
8336         // mask which is not 0xff wins. By anding both masks we have a correct
8337         // result except that 0x0c shall be corrected to give 0x0c only.
8338         uint32_t Mask = LHSMask & RHSMask;
8339         for (unsigned I = 0; I < 32; I += 8) {
8340           uint32_t ByteSel = 0xff << I;
8341           if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
8342             Mask &= (0x0c << I) & 0xffffffff;
8343         }
8344 
8345         // Add 4 to each active LHS lane. It will not affect any existing 0xff
8346         // or 0x0c.
8347         uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
8348         SDLoc DL(N);
8349 
8350         return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
8351                            LHS.getOperand(0), RHS.getOperand(0),
8352                            DAG.getConstant(Sel, DL, MVT::i32));
8353       }
8354     }
8355   }
8356 
8357   return SDValue();
8358 }
8359 
8360 SDValue SITargetLowering::performOrCombine(SDNode *N,
8361                                            DAGCombinerInfo &DCI) const {
8362   SelectionDAG &DAG = DCI.DAG;
8363   SDValue LHS = N->getOperand(0);
8364   SDValue RHS = N->getOperand(1);
8365 
8366   EVT VT = N->getValueType(0);
8367   if (VT == MVT::i1) {
8368     // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
8369     if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
8370         RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
8371       SDValue Src = LHS.getOperand(0);
8372       if (Src != RHS.getOperand(0))
8373         return SDValue();
8374 
8375       const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
8376       const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8377       if (!CLHS || !CRHS)
8378         return SDValue();
8379 
8380       // Only 10 bits are used.
8381       static const uint32_t MaxMask = 0x3ff;
8382 
8383       uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
8384       SDLoc DL(N);
8385       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
8386                          Src, DAG.getConstant(NewMask, DL, MVT::i32));
8387     }
8388 
8389     return SDValue();
8390   }
8391 
8392   // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
8393   if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
8394       LHS.getOpcode() == AMDGPUISD::PERM &&
8395       isa<ConstantSDNode>(LHS.getOperand(2))) {
8396     uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
8397     if (!Sel)
8398       return SDValue();
8399 
8400     Sel |= LHS.getConstantOperandVal(2);
8401     SDLoc DL(N);
8402     return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
8403                        LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
8404   }
8405 
8406   // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
8407   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8408   if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
8409       N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
8410     uint32_t LHSMask = getPermuteMask(DAG, LHS);
8411     uint32_t RHSMask = getPermuteMask(DAG, RHS);
8412     if (LHSMask != ~0u && RHSMask != ~0u) {
8413       // Canonicalize the expression in an attempt to have fewer unique masks
8414       // and therefore fewer registers used to hold the masks.
8415       if (LHSMask > RHSMask) {
8416         std::swap(LHSMask, RHSMask);
8417         std::swap(LHS, RHS);
8418       }
8419 
8420       // Select 0xc for each lane used from source operand. Zero has 0xc mask
8421       // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
8422       uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8423       uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8424 
8425       // Check of we need to combine values from two sources within a byte.
8426       if (!(LHSUsedLanes & RHSUsedLanes) &&
8427           // If we select high and lower word keep it for SDWA.
8428           // TODO: teach SDWA to work with v_perm_b32 and remove the check.
8429           !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
8430         // Kill zero bytes selected by other mask. Zero value is 0xc.
8431         LHSMask &= ~RHSUsedLanes;
8432         RHSMask &= ~LHSUsedLanes;
8433         // Add 4 to each active LHS lane
8434         LHSMask |= LHSUsedLanes & 0x04040404;
8435         // Combine masks
8436         uint32_t Sel = LHSMask | RHSMask;
8437         SDLoc DL(N);
8438 
8439         return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
8440                            LHS.getOperand(0), RHS.getOperand(0),
8441                            DAG.getConstant(Sel, DL, MVT::i32));
8442       }
8443     }
8444   }
8445 
8446   if (VT != MVT::i64)
8447     return SDValue();
8448 
8449   // TODO: This could be a generic combine with a predicate for extracting the
8450   // high half of an integer being free.
8451 
8452   // (or i64:x, (zero_extend i32:y)) ->
8453   //   i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
8454   if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
8455       RHS.getOpcode() != ISD::ZERO_EXTEND)
8456     std::swap(LHS, RHS);
8457 
8458   if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
8459     SDValue ExtSrc = RHS.getOperand(0);
8460     EVT SrcVT = ExtSrc.getValueType();
8461     if (SrcVT == MVT::i32) {
8462       SDLoc SL(N);
8463       SDValue LowLHS, HiBits;
8464       std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
8465       SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
8466 
8467       DCI.AddToWorklist(LowOr.getNode());
8468       DCI.AddToWorklist(HiBits.getNode());
8469 
8470       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
8471                                 LowOr, HiBits);
8472       return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
8473     }
8474   }
8475 
8476   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
8477   if (CRHS) {
8478     if (SDValue Split
8479           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
8480       return Split;
8481   }
8482 
8483   return SDValue();
8484 }
8485 
8486 SDValue SITargetLowering::performXorCombine(SDNode *N,
8487                                             DAGCombinerInfo &DCI) const {
8488   EVT VT = N->getValueType(0);
8489   if (VT != MVT::i64)
8490     return SDValue();
8491 
8492   SDValue LHS = N->getOperand(0);
8493   SDValue RHS = N->getOperand(1);
8494 
8495   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
8496   if (CRHS) {
8497     if (SDValue Split
8498           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
8499       return Split;
8500   }
8501 
8502   return SDValue();
8503 }
8504 
8505 // Instructions that will be lowered with a final instruction that zeros the
8506 // high result bits.
8507 // XXX - probably only need to list legal operations.
8508 static bool fp16SrcZerosHighBits(unsigned Opc) {
8509   switch (Opc) {
8510   case ISD::FADD:
8511   case ISD::FSUB:
8512   case ISD::FMUL:
8513   case ISD::FDIV:
8514   case ISD::FREM:
8515   case ISD::FMA:
8516   case ISD::FMAD:
8517   case ISD::FCANONICALIZE:
8518   case ISD::FP_ROUND:
8519   case ISD::UINT_TO_FP:
8520   case ISD::SINT_TO_FP:
8521   case ISD::FABS:
8522     // Fabs is lowered to a bit operation, but it's an and which will clear the
8523     // high bits anyway.
8524   case ISD::FSQRT:
8525   case ISD::FSIN:
8526   case ISD::FCOS:
8527   case ISD::FPOWI:
8528   case ISD::FPOW:
8529   case ISD::FLOG:
8530   case ISD::FLOG2:
8531   case ISD::FLOG10:
8532   case ISD::FEXP:
8533   case ISD::FEXP2:
8534   case ISD::FCEIL:
8535   case ISD::FTRUNC:
8536   case ISD::FRINT:
8537   case ISD::FNEARBYINT:
8538   case ISD::FROUND:
8539   case ISD::FFLOOR:
8540   case ISD::FMINNUM:
8541   case ISD::FMAXNUM:
8542   case AMDGPUISD::FRACT:
8543   case AMDGPUISD::CLAMP:
8544   case AMDGPUISD::COS_HW:
8545   case AMDGPUISD::SIN_HW:
8546   case AMDGPUISD::FMIN3:
8547   case AMDGPUISD::FMAX3:
8548   case AMDGPUISD::FMED3:
8549   case AMDGPUISD::FMAD_FTZ:
8550   case AMDGPUISD::RCP:
8551   case AMDGPUISD::RSQ:
8552   case AMDGPUISD::RCP_IFLAG:
8553   case AMDGPUISD::LDEXP:
8554     return true;
8555   default:
8556     // fcopysign, select and others may be lowered to 32-bit bit operations
8557     // which don't zero the high bits.
8558     return false;
8559   }
8560 }
8561 
8562 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
8563                                                    DAGCombinerInfo &DCI) const {
8564   if (!Subtarget->has16BitInsts() ||
8565       DCI.getDAGCombineLevel() < AfterLegalizeDAG)
8566     return SDValue();
8567 
8568   EVT VT = N->getValueType(0);
8569   if (VT != MVT::i32)
8570     return SDValue();
8571 
8572   SDValue Src = N->getOperand(0);
8573   if (Src.getValueType() != MVT::i16)
8574     return SDValue();
8575 
8576   // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
8577   // FIXME: It is not universally true that the high bits are zeroed on gfx9.
8578   if (Src.getOpcode() == ISD::BITCAST) {
8579     SDValue BCSrc = Src.getOperand(0);
8580     if (BCSrc.getValueType() == MVT::f16 &&
8581         fp16SrcZerosHighBits(BCSrc.getOpcode()))
8582       return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
8583   }
8584 
8585   return SDValue();
8586 }
8587 
8588 SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N,
8589                                                         DAGCombinerInfo &DCI)
8590                                                         const {
8591   SDValue Src = N->getOperand(0);
8592   auto *VTSign = cast<VTSDNode>(N->getOperand(1));
8593 
8594   if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE &&
8595       VTSign->getVT() == MVT::i8) ||
8596       (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT &&
8597       VTSign->getVT() == MVT::i16)) &&
8598       Src.hasOneUse()) {
8599     auto *M = cast<MemSDNode>(Src);
8600     SDValue Ops[] = {
8601       Src.getOperand(0), // Chain
8602       Src.getOperand(1), // rsrc
8603       Src.getOperand(2), // vindex
8604       Src.getOperand(3), // voffset
8605       Src.getOperand(4), // soffset
8606       Src.getOperand(5), // offset
8607       Src.getOperand(6),
8608       Src.getOperand(7)
8609     };
8610     // replace with BUFFER_LOAD_BYTE/SHORT
8611     SDVTList ResList = DCI.DAG.getVTList(MVT::i32,
8612                                          Src.getOperand(0).getValueType());
8613     unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ?
8614                    AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT;
8615     SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N),
8616                                                           ResList,
8617                                                           Ops, M->getMemoryVT(),
8618                                                           M->getMemOperand());
8619     return DCI.DAG.getMergeValues({BufferLoadSignExt,
8620                                   BufferLoadSignExt.getValue(1)}, SDLoc(N));
8621   }
8622   return SDValue();
8623 }
8624 
8625 SDValue SITargetLowering::performClassCombine(SDNode *N,
8626                                               DAGCombinerInfo &DCI) const {
8627   SelectionDAG &DAG = DCI.DAG;
8628   SDValue Mask = N->getOperand(1);
8629 
8630   // fp_class x, 0 -> false
8631   if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
8632     if (CMask->isNullValue())
8633       return DAG.getConstant(0, SDLoc(N), MVT::i1);
8634   }
8635 
8636   if (N->getOperand(0).isUndef())
8637     return DAG.getUNDEF(MVT::i1);
8638 
8639   return SDValue();
8640 }
8641 
8642 SDValue SITargetLowering::performRcpCombine(SDNode *N,
8643                                             DAGCombinerInfo &DCI) const {
8644   EVT VT = N->getValueType(0);
8645   SDValue N0 = N->getOperand(0);
8646 
8647   if (N0.isUndef())
8648     return N0;
8649 
8650   if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP ||
8651                          N0.getOpcode() == ISD::SINT_TO_FP)) {
8652     return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0,
8653                            N->getFlags());
8654   }
8655 
8656   return AMDGPUTargetLowering::performRcpCombine(N, DCI);
8657 }
8658 
8659 bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
8660                                        unsigned MaxDepth) const {
8661   unsigned Opcode = Op.getOpcode();
8662   if (Opcode == ISD::FCANONICALIZE)
8663     return true;
8664 
8665   if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8666     auto F = CFP->getValueAPF();
8667     if (F.isNaN() && F.isSignaling())
8668       return false;
8669     return !F.isDenormal() || denormalsEnabledForType(Op.getValueType());
8670   }
8671 
8672   // If source is a result of another standard FP operation it is already in
8673   // canonical form.
8674   if (MaxDepth == 0)
8675     return false;
8676 
8677   switch (Opcode) {
8678   // These will flush denorms if required.
8679   case ISD::FADD:
8680   case ISD::FSUB:
8681   case ISD::FMUL:
8682   case ISD::FCEIL:
8683   case ISD::FFLOOR:
8684   case ISD::FMA:
8685   case ISD::FMAD:
8686   case ISD::FSQRT:
8687   case ISD::FDIV:
8688   case ISD::FREM:
8689   case ISD::FP_ROUND:
8690   case ISD::FP_EXTEND:
8691   case AMDGPUISD::FMUL_LEGACY:
8692   case AMDGPUISD::FMAD_FTZ:
8693   case AMDGPUISD::RCP:
8694   case AMDGPUISD::RSQ:
8695   case AMDGPUISD::RSQ_CLAMP:
8696   case AMDGPUISD::RCP_LEGACY:
8697   case AMDGPUISD::RSQ_LEGACY:
8698   case AMDGPUISD::RCP_IFLAG:
8699   case AMDGPUISD::TRIG_PREOP:
8700   case AMDGPUISD::DIV_SCALE:
8701   case AMDGPUISD::DIV_FMAS:
8702   case AMDGPUISD::DIV_FIXUP:
8703   case AMDGPUISD::FRACT:
8704   case AMDGPUISD::LDEXP:
8705   case AMDGPUISD::CVT_PKRTZ_F16_F32:
8706   case AMDGPUISD::CVT_F32_UBYTE0:
8707   case AMDGPUISD::CVT_F32_UBYTE1:
8708   case AMDGPUISD::CVT_F32_UBYTE2:
8709   case AMDGPUISD::CVT_F32_UBYTE3:
8710     return true;
8711 
8712   // It can/will be lowered or combined as a bit operation.
8713   // Need to check their input recursively to handle.
8714   case ISD::FNEG:
8715   case ISD::FABS:
8716   case ISD::FCOPYSIGN:
8717     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
8718 
8719   case ISD::FSIN:
8720   case ISD::FCOS:
8721   case ISD::FSINCOS:
8722     return Op.getValueType().getScalarType() != MVT::f16;
8723 
8724   case ISD::FMINNUM:
8725   case ISD::FMAXNUM:
8726   case ISD::FMINNUM_IEEE:
8727   case ISD::FMAXNUM_IEEE:
8728   case AMDGPUISD::CLAMP:
8729   case AMDGPUISD::FMED3:
8730   case AMDGPUISD::FMAX3:
8731   case AMDGPUISD::FMIN3: {
8732     // FIXME: Shouldn't treat the generic operations different based these.
8733     // However, we aren't really required to flush the result from
8734     // minnum/maxnum..
8735 
8736     // snans will be quieted, so we only need to worry about denormals.
8737     if (Subtarget->supportsMinMaxDenormModes() ||
8738         denormalsEnabledForType(Op.getValueType()))
8739       return true;
8740 
8741     // Flushing may be required.
8742     // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such
8743     // targets need to check their input recursively.
8744 
8745     // FIXME: Does this apply with clamp? It's implemented with max.
8746     for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) {
8747       if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1))
8748         return false;
8749     }
8750 
8751     return true;
8752   }
8753   case ISD::SELECT: {
8754     return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) &&
8755            isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1);
8756   }
8757   case ISD::BUILD_VECTOR: {
8758     for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
8759       SDValue SrcOp = Op.getOperand(i);
8760       if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1))
8761         return false;
8762     }
8763 
8764     return true;
8765   }
8766   case ISD::EXTRACT_VECTOR_ELT:
8767   case ISD::EXTRACT_SUBVECTOR: {
8768     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
8769   }
8770   case ISD::INSERT_VECTOR_ELT: {
8771     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
8772            isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
8773   }
8774   case ISD::UNDEF:
8775     // Could be anything.
8776     return false;
8777 
8778   case ISD::BITCAST: {
8779     // Hack round the mess we make when legalizing extract_vector_elt
8780     SDValue Src = Op.getOperand(0);
8781     if (Src.getValueType() == MVT::i16 &&
8782         Src.getOpcode() == ISD::TRUNCATE) {
8783       SDValue TruncSrc = Src.getOperand(0);
8784       if (TruncSrc.getValueType() == MVT::i32 &&
8785           TruncSrc.getOpcode() == ISD::BITCAST &&
8786           TruncSrc.getOperand(0).getValueType() == MVT::v2f16) {
8787         return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1);
8788       }
8789     }
8790 
8791     return false;
8792   }
8793   case ISD::INTRINSIC_WO_CHAIN: {
8794     unsigned IntrinsicID
8795       = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8796     // TODO: Handle more intrinsics
8797     switch (IntrinsicID) {
8798     case Intrinsic::amdgcn_cvt_pkrtz:
8799     case Intrinsic::amdgcn_cubeid:
8800     case Intrinsic::amdgcn_frexp_mant:
8801     case Intrinsic::amdgcn_fdot2:
8802       return true;
8803     default:
8804       break;
8805     }
8806 
8807     LLVM_FALLTHROUGH;
8808   }
8809   default:
8810     return denormalsEnabledForType(Op.getValueType()) &&
8811            DAG.isKnownNeverSNaN(Op);
8812   }
8813 
8814   llvm_unreachable("invalid operation");
8815 }
8816 
8817 // Constant fold canonicalize.
8818 SDValue SITargetLowering::getCanonicalConstantFP(
8819   SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const {
8820   // Flush denormals to 0 if not enabled.
8821   if (C.isDenormal() && !denormalsEnabledForType(VT))
8822     return DAG.getConstantFP(0.0, SL, VT);
8823 
8824   if (C.isNaN()) {
8825     APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
8826     if (C.isSignaling()) {
8827       // Quiet a signaling NaN.
8828       // FIXME: Is this supposed to preserve payload bits?
8829       return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8830     }
8831 
8832     // Make sure it is the canonical NaN bitpattern.
8833     //
8834     // TODO: Can we use -1 as the canonical NaN value since it's an inline
8835     // immediate?
8836     if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
8837       return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8838   }
8839 
8840   // Already canonical.
8841   return DAG.getConstantFP(C, SL, VT);
8842 }
8843 
8844 static bool vectorEltWillFoldAway(SDValue Op) {
8845   return Op.isUndef() || isa<ConstantFPSDNode>(Op);
8846 }
8847 
8848 SDValue SITargetLowering::performFCanonicalizeCombine(
8849   SDNode *N,
8850   DAGCombinerInfo &DCI) const {
8851   SelectionDAG &DAG = DCI.DAG;
8852   SDValue N0 = N->getOperand(0);
8853   EVT VT = N->getValueType(0);
8854 
8855   // fcanonicalize undef -> qnan
8856   if (N0.isUndef()) {
8857     APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT));
8858     return DAG.getConstantFP(QNaN, SDLoc(N), VT);
8859   }
8860 
8861   if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) {
8862     EVT VT = N->getValueType(0);
8863     return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF());
8864   }
8865 
8866   // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x),
8867   //                                                   (fcanonicalize k)
8868   //
8869   // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0
8870 
8871   // TODO: This could be better with wider vectors that will be split to v2f16,
8872   // and to consider uses since there aren't that many packed operations.
8873   if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 &&
8874       isTypeLegal(MVT::v2f16)) {
8875     SDLoc SL(N);
8876     SDValue NewElts[2];
8877     SDValue Lo = N0.getOperand(0);
8878     SDValue Hi = N0.getOperand(1);
8879     EVT EltVT = Lo.getValueType();
8880 
8881     if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) {
8882       for (unsigned I = 0; I != 2; ++I) {
8883         SDValue Op = N0.getOperand(I);
8884         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8885           NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT,
8886                                               CFP->getValueAPF());
8887         } else if (Op.isUndef()) {
8888           // Handled below based on what the other operand is.
8889           NewElts[I] = Op;
8890         } else {
8891           NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op);
8892         }
8893       }
8894 
8895       // If one half is undef, and one is constant, perfer a splat vector rather
8896       // than the normal qNaN. If it's a register, prefer 0.0 since that's
8897       // cheaper to use and may be free with a packed operation.
8898       if (NewElts[0].isUndef()) {
8899         if (isa<ConstantFPSDNode>(NewElts[1]))
8900           NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ?
8901             NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT);
8902       }
8903 
8904       if (NewElts[1].isUndef()) {
8905         NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ?
8906           NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT);
8907       }
8908 
8909       return DAG.getBuildVector(VT, SL, NewElts);
8910     }
8911   }
8912 
8913   unsigned SrcOpc = N0.getOpcode();
8914 
8915   // If it's free to do so, push canonicalizes further up the source, which may
8916   // find a canonical source.
8917   //
8918   // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for
8919   // sNaNs.
8920   if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) {
8921     auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
8922     if (CRHS && N0.hasOneUse()) {
8923       SDLoc SL(N);
8924       SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT,
8925                                    N0.getOperand(0));
8926       SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF());
8927       DCI.AddToWorklist(Canon0.getNode());
8928 
8929       return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1);
8930     }
8931   }
8932 
8933   return isCanonicalized(DAG, N0) ? N0 : SDValue();
8934 }
8935 
8936 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
8937   switch (Opc) {
8938   case ISD::FMAXNUM:
8939   case ISD::FMAXNUM_IEEE:
8940     return AMDGPUISD::FMAX3;
8941   case ISD::SMAX:
8942     return AMDGPUISD::SMAX3;
8943   case ISD::UMAX:
8944     return AMDGPUISD::UMAX3;
8945   case ISD::FMINNUM:
8946   case ISD::FMINNUM_IEEE:
8947     return AMDGPUISD::FMIN3;
8948   case ISD::SMIN:
8949     return AMDGPUISD::SMIN3;
8950   case ISD::UMIN:
8951     return AMDGPUISD::UMIN3;
8952   default:
8953     llvm_unreachable("Not a min/max opcode");
8954   }
8955 }
8956 
8957 SDValue SITargetLowering::performIntMed3ImmCombine(
8958   SelectionDAG &DAG, const SDLoc &SL,
8959   SDValue Op0, SDValue Op1, bool Signed) const {
8960   ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
8961   if (!K1)
8962     return SDValue();
8963 
8964   ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
8965   if (!K0)
8966     return SDValue();
8967 
8968   if (Signed) {
8969     if (K0->getAPIntValue().sge(K1->getAPIntValue()))
8970       return SDValue();
8971   } else {
8972     if (K0->getAPIntValue().uge(K1->getAPIntValue()))
8973       return SDValue();
8974   }
8975 
8976   EVT VT = K0->getValueType(0);
8977   unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
8978   if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
8979     return DAG.getNode(Med3Opc, SL, VT,
8980                        Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
8981   }
8982 
8983   // If there isn't a 16-bit med3 operation, convert to 32-bit.
8984   MVT NVT = MVT::i32;
8985   unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
8986 
8987   SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
8988   SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
8989   SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
8990 
8991   SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
8992   return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
8993 }
8994 
8995 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
8996   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
8997     return C;
8998 
8999   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
9000     if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
9001       return C;
9002   }
9003 
9004   return nullptr;
9005 }
9006 
9007 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
9008                                                   const SDLoc &SL,
9009                                                   SDValue Op0,
9010                                                   SDValue Op1) const {
9011   ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
9012   if (!K1)
9013     return SDValue();
9014 
9015   ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
9016   if (!K0)
9017     return SDValue();
9018 
9019   // Ordered >= (although NaN inputs should have folded away by now).
9020   APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
9021   if (Cmp == APFloat::cmpGreaterThan)
9022     return SDValue();
9023 
9024   const MachineFunction &MF = DAG.getMachineFunction();
9025   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
9026 
9027   // TODO: Check IEEE bit enabled?
9028   EVT VT = Op0.getValueType();
9029   if (Info->getMode().DX10Clamp) {
9030     // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
9031     // hardware fmed3 behavior converting to a min.
9032     // FIXME: Should this be allowing -0.0?
9033     if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
9034       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
9035   }
9036 
9037   // med3 for f16 is only available on gfx9+, and not available for v2f16.
9038   if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
9039     // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
9040     // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
9041     // then give the other result, which is different from med3 with a NaN
9042     // input.
9043     SDValue Var = Op0.getOperand(0);
9044     if (!DAG.isKnownNeverSNaN(Var))
9045       return SDValue();
9046 
9047     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
9048 
9049     if ((!K0->hasOneUse() ||
9050          TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) &&
9051         (!K1->hasOneUse() ||
9052          TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) {
9053       return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
9054                          Var, SDValue(K0, 0), SDValue(K1, 0));
9055     }
9056   }
9057 
9058   return SDValue();
9059 }
9060 
9061 SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
9062                                                DAGCombinerInfo &DCI) const {
9063   SelectionDAG &DAG = DCI.DAG;
9064 
9065   EVT VT = N->getValueType(0);
9066   unsigned Opc = N->getOpcode();
9067   SDValue Op0 = N->getOperand(0);
9068   SDValue Op1 = N->getOperand(1);
9069 
9070   // Only do this if the inner op has one use since this will just increases
9071   // register pressure for no benefit.
9072 
9073   if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
9074       !VT.isVector() &&
9075       (VT == MVT::i32 || VT == MVT::f32 ||
9076        ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) {
9077     // max(max(a, b), c) -> max3(a, b, c)
9078     // min(min(a, b), c) -> min3(a, b, c)
9079     if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
9080       SDLoc DL(N);
9081       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
9082                          DL,
9083                          N->getValueType(0),
9084                          Op0.getOperand(0),
9085                          Op0.getOperand(1),
9086                          Op1);
9087     }
9088 
9089     // Try commuted.
9090     // max(a, max(b, c)) -> max3(a, b, c)
9091     // min(a, min(b, c)) -> min3(a, b, c)
9092     if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
9093       SDLoc DL(N);
9094       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
9095                          DL,
9096                          N->getValueType(0),
9097                          Op0,
9098                          Op1.getOperand(0),
9099                          Op1.getOperand(1));
9100     }
9101   }
9102 
9103   // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
9104   if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
9105     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
9106       return Med3;
9107   }
9108 
9109   if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
9110     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
9111       return Med3;
9112   }
9113 
9114   // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
9115   if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
9116        (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) ||
9117        (Opc == AMDGPUISD::FMIN_LEGACY &&
9118         Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
9119       (VT == MVT::f32 || VT == MVT::f64 ||
9120        (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
9121        (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
9122       Op0.hasOneUse()) {
9123     if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
9124       return Res;
9125   }
9126 
9127   return SDValue();
9128 }
9129 
9130 static bool isClampZeroToOne(SDValue A, SDValue B) {
9131   if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
9132     if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
9133       // FIXME: Should this be allowing -0.0?
9134       return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
9135              (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
9136     }
9137   }
9138 
9139   return false;
9140 }
9141 
9142 // FIXME: Should only worry about snans for version with chain.
9143 SDValue SITargetLowering::performFMed3Combine(SDNode *N,
9144                                               DAGCombinerInfo &DCI) const {
9145   EVT VT = N->getValueType(0);
9146   // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
9147   // NaNs. With a NaN input, the order of the operands may change the result.
9148 
9149   SelectionDAG &DAG = DCI.DAG;
9150   SDLoc SL(N);
9151 
9152   SDValue Src0 = N->getOperand(0);
9153   SDValue Src1 = N->getOperand(1);
9154   SDValue Src2 = N->getOperand(2);
9155 
9156   if (isClampZeroToOne(Src0, Src1)) {
9157     // const_a, const_b, x -> clamp is safe in all cases including signaling
9158     // nans.
9159     // FIXME: Should this be allowing -0.0?
9160     return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
9161   }
9162 
9163   const MachineFunction &MF = DAG.getMachineFunction();
9164   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
9165 
9166   // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
9167   // handling no dx10-clamp?
9168   if (Info->getMode().DX10Clamp) {
9169     // If NaNs is clamped to 0, we are free to reorder the inputs.
9170 
9171     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
9172       std::swap(Src0, Src1);
9173 
9174     if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
9175       std::swap(Src1, Src2);
9176 
9177     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
9178       std::swap(Src0, Src1);
9179 
9180     if (isClampZeroToOne(Src1, Src2))
9181       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
9182   }
9183 
9184   return SDValue();
9185 }
9186 
9187 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
9188                                                  DAGCombinerInfo &DCI) const {
9189   SDValue Src0 = N->getOperand(0);
9190   SDValue Src1 = N->getOperand(1);
9191   if (Src0.isUndef() && Src1.isUndef())
9192     return DCI.DAG.getUNDEF(N->getValueType(0));
9193   return SDValue();
9194 }
9195 
9196 SDValue SITargetLowering::performExtractVectorEltCombine(
9197   SDNode *N, DAGCombinerInfo &DCI) const {
9198   SDValue Vec = N->getOperand(0);
9199   SelectionDAG &DAG = DCI.DAG;
9200 
9201   EVT VecVT = Vec.getValueType();
9202   EVT EltVT = VecVT.getVectorElementType();
9203 
9204   if ((Vec.getOpcode() == ISD::FNEG ||
9205        Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
9206     SDLoc SL(N);
9207     EVT EltVT = N->getValueType(0);
9208     SDValue Idx = N->getOperand(1);
9209     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9210                               Vec.getOperand(0), Idx);
9211     return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
9212   }
9213 
9214   // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
9215   //    =>
9216   // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
9217   // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
9218   // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
9219   if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
9220     SDLoc SL(N);
9221     EVT EltVT = N->getValueType(0);
9222     SDValue Idx = N->getOperand(1);
9223     unsigned Opc = Vec.getOpcode();
9224 
9225     switch(Opc) {
9226     default:
9227       break;
9228       // TODO: Support other binary operations.
9229     case ISD::FADD:
9230     case ISD::FSUB:
9231     case ISD::FMUL:
9232     case ISD::ADD:
9233     case ISD::UMIN:
9234     case ISD::UMAX:
9235     case ISD::SMIN:
9236     case ISD::SMAX:
9237     case ISD::FMAXNUM:
9238     case ISD::FMINNUM:
9239     case ISD::FMAXNUM_IEEE:
9240     case ISD::FMINNUM_IEEE: {
9241       SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9242                                  Vec.getOperand(0), Idx);
9243       SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9244                                  Vec.getOperand(1), Idx);
9245 
9246       DCI.AddToWorklist(Elt0.getNode());
9247       DCI.AddToWorklist(Elt1.getNode());
9248       return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags());
9249     }
9250     }
9251   }
9252 
9253   unsigned VecSize = VecVT.getSizeInBits();
9254   unsigned EltSize = EltVT.getSizeInBits();
9255 
9256   // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx)
9257   // This elminates non-constant index and subsequent movrel or scratch access.
9258   // Sub-dword vectors of size 2 dword or less have better implementation.
9259   // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
9260   // instructions.
9261   if (VecSize <= 256 && (VecSize > 64 || EltSize >= 32) &&
9262       !isa<ConstantSDNode>(N->getOperand(1))) {
9263     SDLoc SL(N);
9264     SDValue Idx = N->getOperand(1);
9265     EVT IdxVT = Idx.getValueType();
9266     SDValue V;
9267     for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
9268       SDValue IC = DAG.getConstant(I, SL, IdxVT);
9269       SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
9270       if (I == 0)
9271         V = Elt;
9272       else
9273         V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ);
9274     }
9275     return V;
9276   }
9277 
9278   if (!DCI.isBeforeLegalize())
9279     return SDValue();
9280 
9281   // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
9282   // elements. This exposes more load reduction opportunities by replacing
9283   // multiple small extract_vector_elements with a single 32-bit extract.
9284   auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
9285   if (isa<MemSDNode>(Vec) &&
9286       EltSize <= 16 &&
9287       EltVT.isByteSized() &&
9288       VecSize > 32 &&
9289       VecSize % 32 == 0 &&
9290       Idx) {
9291     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
9292 
9293     unsigned BitIndex = Idx->getZExtValue() * EltSize;
9294     unsigned EltIdx = BitIndex / 32;
9295     unsigned LeftoverBitIdx = BitIndex % 32;
9296     SDLoc SL(N);
9297 
9298     SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
9299     DCI.AddToWorklist(Cast.getNode());
9300 
9301     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
9302                               DAG.getConstant(EltIdx, SL, MVT::i32));
9303     DCI.AddToWorklist(Elt.getNode());
9304     SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
9305                               DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
9306     DCI.AddToWorklist(Srl.getNode());
9307 
9308     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
9309     DCI.AddToWorklist(Trunc.getNode());
9310     return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
9311   }
9312 
9313   return SDValue();
9314 }
9315 
9316 SDValue
9317 SITargetLowering::performInsertVectorEltCombine(SDNode *N,
9318                                                 DAGCombinerInfo &DCI) const {
9319   SDValue Vec = N->getOperand(0);
9320   SDValue Idx = N->getOperand(2);
9321   EVT VecVT = Vec.getValueType();
9322   EVT EltVT = VecVT.getVectorElementType();
9323   unsigned VecSize = VecVT.getSizeInBits();
9324   unsigned EltSize = EltVT.getSizeInBits();
9325 
9326   // INSERT_VECTOR_ELT (<n x e>, var-idx)
9327   // => BUILD_VECTOR n x select (e, const-idx)
9328   // This elminates non-constant index and subsequent movrel or scratch access.
9329   // Sub-dword vectors of size 2 dword or less have better implementation.
9330   // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
9331   // instructions.
9332   if (isa<ConstantSDNode>(Idx) ||
9333       VecSize > 256 || (VecSize <= 64 && EltSize < 32))
9334     return SDValue();
9335 
9336   SelectionDAG &DAG = DCI.DAG;
9337   SDLoc SL(N);
9338   SDValue Ins = N->getOperand(1);
9339   EVT IdxVT = Idx.getValueType();
9340 
9341   SmallVector<SDValue, 16> Ops;
9342   for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
9343     SDValue IC = DAG.getConstant(I, SL, IdxVT);
9344     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
9345     SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ);
9346     Ops.push_back(V);
9347   }
9348 
9349   return DAG.getBuildVector(VecVT, SL, Ops);
9350 }
9351 
9352 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
9353                                           const SDNode *N0,
9354                                           const SDNode *N1) const {
9355   EVT VT = N0->getValueType(0);
9356 
9357   // Only do this if we are not trying to support denormals. v_mad_f32 does not
9358   // support denormals ever.
9359   if (((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
9360        (VT == MVT::f16 && !Subtarget->hasFP16Denormals() &&
9361         getSubtarget()->hasMadF16())) &&
9362        isOperationLegal(ISD::FMAD, VT))
9363     return ISD::FMAD;
9364 
9365   const TargetOptions &Options = DAG.getTarget().Options;
9366   if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
9367        (N0->getFlags().hasAllowContract() &&
9368         N1->getFlags().hasAllowContract())) &&
9369       isFMAFasterThanFMulAndFAdd(VT)) {
9370     return ISD::FMA;
9371   }
9372 
9373   return 0;
9374 }
9375 
9376 // For a reassociatable opcode perform:
9377 // op x, (op y, z) -> op (op x, z), y, if x and z are uniform
9378 SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
9379                                                SelectionDAG &DAG) const {
9380   EVT VT = N->getValueType(0);
9381   if (VT != MVT::i32 && VT != MVT::i64)
9382     return SDValue();
9383 
9384   unsigned Opc = N->getOpcode();
9385   SDValue Op0 = N->getOperand(0);
9386   SDValue Op1 = N->getOperand(1);
9387 
9388   if (!(Op0->isDivergent() ^ Op1->isDivergent()))
9389     return SDValue();
9390 
9391   if (Op0->isDivergent())
9392     std::swap(Op0, Op1);
9393 
9394   if (Op1.getOpcode() != Opc || !Op1.hasOneUse())
9395     return SDValue();
9396 
9397   SDValue Op2 = Op1.getOperand(1);
9398   Op1 = Op1.getOperand(0);
9399   if (!(Op1->isDivergent() ^ Op2->isDivergent()))
9400     return SDValue();
9401 
9402   if (Op1->isDivergent())
9403     std::swap(Op1, Op2);
9404 
9405   // If either operand is constant this will conflict with
9406   // DAGCombiner::ReassociateOps().
9407   if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) ||
9408       DAG.isConstantIntBuildVectorOrConstantInt(Op1))
9409     return SDValue();
9410 
9411   SDLoc SL(N);
9412   SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1);
9413   return DAG.getNode(Opc, SL, VT, Add1, Op2);
9414 }
9415 
9416 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
9417                            EVT VT,
9418                            SDValue N0, SDValue N1, SDValue N2,
9419                            bool Signed) {
9420   unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
9421   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
9422   SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
9423   return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
9424 }
9425 
9426 SDValue SITargetLowering::performAddCombine(SDNode *N,
9427                                             DAGCombinerInfo &DCI) const {
9428   SelectionDAG &DAG = DCI.DAG;
9429   EVT VT = N->getValueType(0);
9430   SDLoc SL(N);
9431   SDValue LHS = N->getOperand(0);
9432   SDValue RHS = N->getOperand(1);
9433 
9434   if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
9435       && Subtarget->hasMad64_32() &&
9436       !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
9437       VT.getScalarSizeInBits() <= 64) {
9438     if (LHS.getOpcode() != ISD::MUL)
9439       std::swap(LHS, RHS);
9440 
9441     SDValue MulLHS = LHS.getOperand(0);
9442     SDValue MulRHS = LHS.getOperand(1);
9443     SDValue AddRHS = RHS;
9444 
9445     // TODO: Maybe restrict if SGPR inputs.
9446     if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
9447         numBitsUnsigned(MulRHS, DAG) <= 32) {
9448       MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
9449       MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
9450       AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
9451       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
9452     }
9453 
9454     if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
9455       MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
9456       MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
9457       AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
9458       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
9459     }
9460 
9461     return SDValue();
9462   }
9463 
9464   if (SDValue V = reassociateScalarOps(N, DAG)) {
9465     return V;
9466   }
9467 
9468   if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
9469     return SDValue();
9470 
9471   // add x, zext (setcc) => addcarry x, 0, setcc
9472   // add x, sext (setcc) => subcarry x, 0, setcc
9473   unsigned Opc = LHS.getOpcode();
9474   if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
9475       Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
9476     std::swap(RHS, LHS);
9477 
9478   Opc = RHS.getOpcode();
9479   switch (Opc) {
9480   default: break;
9481   case ISD::ZERO_EXTEND:
9482   case ISD::SIGN_EXTEND:
9483   case ISD::ANY_EXTEND: {
9484     auto Cond = RHS.getOperand(0);
9485     if (!isBoolSGPR(Cond))
9486       break;
9487     SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
9488     SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
9489     Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
9490     return DAG.getNode(Opc, SL, VTList, Args);
9491   }
9492   case ISD::ADDCARRY: {
9493     // add x, (addcarry y, 0, cc) => addcarry x, y, cc
9494     auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
9495     if (!C || C->getZExtValue() != 0) break;
9496     SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
9497     return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
9498   }
9499   }
9500   return SDValue();
9501 }
9502 
9503 SDValue SITargetLowering::performSubCombine(SDNode *N,
9504                                             DAGCombinerInfo &DCI) const {
9505   SelectionDAG &DAG = DCI.DAG;
9506   EVT VT = N->getValueType(0);
9507 
9508   if (VT != MVT::i32)
9509     return SDValue();
9510 
9511   SDLoc SL(N);
9512   SDValue LHS = N->getOperand(0);
9513   SDValue RHS = N->getOperand(1);
9514 
9515   if (LHS.getOpcode() == ISD::SUBCARRY) {
9516     // sub (subcarry x, 0, cc), y => subcarry x, y, cc
9517     auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
9518     if (!C || !C->isNullValue())
9519       return SDValue();
9520     SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
9521     return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
9522   }
9523   return SDValue();
9524 }
9525 
9526 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
9527   DAGCombinerInfo &DCI) const {
9528 
9529   if (N->getValueType(0) != MVT::i32)
9530     return SDValue();
9531 
9532   auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
9533   if (!C || C->getZExtValue() != 0)
9534     return SDValue();
9535 
9536   SelectionDAG &DAG = DCI.DAG;
9537   SDValue LHS = N->getOperand(0);
9538 
9539   // addcarry (add x, y), 0, cc => addcarry x, y, cc
9540   // subcarry (sub x, y), 0, cc => subcarry x, y, cc
9541   unsigned LHSOpc = LHS.getOpcode();
9542   unsigned Opc = N->getOpcode();
9543   if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
9544       (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
9545     SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
9546     return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
9547   }
9548   return SDValue();
9549 }
9550 
9551 SDValue SITargetLowering::performFAddCombine(SDNode *N,
9552                                              DAGCombinerInfo &DCI) const {
9553   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9554     return SDValue();
9555 
9556   SelectionDAG &DAG = DCI.DAG;
9557   EVT VT = N->getValueType(0);
9558 
9559   SDLoc SL(N);
9560   SDValue LHS = N->getOperand(0);
9561   SDValue RHS = N->getOperand(1);
9562 
9563   // These should really be instruction patterns, but writing patterns with
9564   // source modiifiers is a pain.
9565 
9566   // fadd (fadd (a, a), b) -> mad 2.0, a, b
9567   if (LHS.getOpcode() == ISD::FADD) {
9568     SDValue A = LHS.getOperand(0);
9569     if (A == LHS.getOperand(1)) {
9570       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
9571       if (FusedOp != 0) {
9572         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9573         return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
9574       }
9575     }
9576   }
9577 
9578   // fadd (b, fadd (a, a)) -> mad 2.0, a, b
9579   if (RHS.getOpcode() == ISD::FADD) {
9580     SDValue A = RHS.getOperand(0);
9581     if (A == RHS.getOperand(1)) {
9582       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
9583       if (FusedOp != 0) {
9584         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9585         return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
9586       }
9587     }
9588   }
9589 
9590   return SDValue();
9591 }
9592 
9593 SDValue SITargetLowering::performFSubCombine(SDNode *N,
9594                                              DAGCombinerInfo &DCI) const {
9595   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9596     return SDValue();
9597 
9598   SelectionDAG &DAG = DCI.DAG;
9599   SDLoc SL(N);
9600   EVT VT = N->getValueType(0);
9601   assert(!VT.isVector());
9602 
9603   // Try to get the fneg to fold into the source modifier. This undoes generic
9604   // DAG combines and folds them into the mad.
9605   //
9606   // Only do this if we are not trying to support denormals. v_mad_f32 does
9607   // not support denormals ever.
9608   SDValue LHS = N->getOperand(0);
9609   SDValue RHS = N->getOperand(1);
9610   if (LHS.getOpcode() == ISD::FADD) {
9611     // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
9612     SDValue A = LHS.getOperand(0);
9613     if (A == LHS.getOperand(1)) {
9614       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
9615       if (FusedOp != 0){
9616         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9617         SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
9618 
9619         return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
9620       }
9621     }
9622   }
9623 
9624   if (RHS.getOpcode() == ISD::FADD) {
9625     // (fsub c, (fadd a, a)) -> mad -2.0, a, c
9626 
9627     SDValue A = RHS.getOperand(0);
9628     if (A == RHS.getOperand(1)) {
9629       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
9630       if (FusedOp != 0){
9631         const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
9632         return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
9633       }
9634     }
9635   }
9636 
9637   return SDValue();
9638 }
9639 
9640 SDValue SITargetLowering::performFMACombine(SDNode *N,
9641                                             DAGCombinerInfo &DCI) const {
9642   SelectionDAG &DAG = DCI.DAG;
9643   EVT VT = N->getValueType(0);
9644   SDLoc SL(N);
9645 
9646   if (!Subtarget->hasDot2Insts() || VT != MVT::f32)
9647     return SDValue();
9648 
9649   // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) ->
9650   //   FDOT2((V2F16)S0, (V2F16)S1, (F32)z))
9651   SDValue Op1 = N->getOperand(0);
9652   SDValue Op2 = N->getOperand(1);
9653   SDValue FMA = N->getOperand(2);
9654 
9655   if (FMA.getOpcode() != ISD::FMA ||
9656       Op1.getOpcode() != ISD::FP_EXTEND ||
9657       Op2.getOpcode() != ISD::FP_EXTEND)
9658     return SDValue();
9659 
9660   // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
9661   // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract
9662   // is sufficient to allow generaing fdot2.
9663   const TargetOptions &Options = DAG.getTarget().Options;
9664   if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
9665       (N->getFlags().hasAllowContract() &&
9666        FMA->getFlags().hasAllowContract())) {
9667     Op1 = Op1.getOperand(0);
9668     Op2 = Op2.getOperand(0);
9669     if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9670         Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9671       return SDValue();
9672 
9673     SDValue Vec1 = Op1.getOperand(0);
9674     SDValue Idx1 = Op1.getOperand(1);
9675     SDValue Vec2 = Op2.getOperand(0);
9676 
9677     SDValue FMAOp1 = FMA.getOperand(0);
9678     SDValue FMAOp2 = FMA.getOperand(1);
9679     SDValue FMAAcc = FMA.getOperand(2);
9680 
9681     if (FMAOp1.getOpcode() != ISD::FP_EXTEND ||
9682         FMAOp2.getOpcode() != ISD::FP_EXTEND)
9683       return SDValue();
9684 
9685     FMAOp1 = FMAOp1.getOperand(0);
9686     FMAOp2 = FMAOp2.getOperand(0);
9687     if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9688         FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9689       return SDValue();
9690 
9691     SDValue Vec3 = FMAOp1.getOperand(0);
9692     SDValue Vec4 = FMAOp2.getOperand(0);
9693     SDValue Idx2 = FMAOp1.getOperand(1);
9694 
9695     if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) ||
9696         // Idx1 and Idx2 cannot be the same.
9697         Idx1 == Idx2)
9698       return SDValue();
9699 
9700     if (Vec1 == Vec2 || Vec3 == Vec4)
9701       return SDValue();
9702 
9703     if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16)
9704       return SDValue();
9705 
9706     if ((Vec1 == Vec3 && Vec2 == Vec4) ||
9707         (Vec1 == Vec4 && Vec2 == Vec3)) {
9708       return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc,
9709                          DAG.getTargetConstant(0, SL, MVT::i1));
9710     }
9711   }
9712   return SDValue();
9713 }
9714 
9715 SDValue SITargetLowering::performSetCCCombine(SDNode *N,
9716                                               DAGCombinerInfo &DCI) const {
9717   SelectionDAG &DAG = DCI.DAG;
9718   SDLoc SL(N);
9719 
9720   SDValue LHS = N->getOperand(0);
9721   SDValue RHS = N->getOperand(1);
9722   EVT VT = LHS.getValueType();
9723   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
9724 
9725   auto CRHS = dyn_cast<ConstantSDNode>(RHS);
9726   if (!CRHS) {
9727     CRHS = dyn_cast<ConstantSDNode>(LHS);
9728     if (CRHS) {
9729       std::swap(LHS, RHS);
9730       CC = getSetCCSwappedOperands(CC);
9731     }
9732   }
9733 
9734   if (CRHS) {
9735     if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
9736         isBoolSGPR(LHS.getOperand(0))) {
9737       // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
9738       // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
9739       // setcc (sext from i1 cc),  0, eq|sge|ule) => not cc => xor cc, -1
9740       // setcc (sext from i1 cc),  0, ne|ugt|slt) => cc
9741       if ((CRHS->isAllOnesValue() &&
9742            (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
9743           (CRHS->isNullValue() &&
9744            (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
9745         return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9746                            DAG.getConstant(-1, SL, MVT::i1));
9747       if ((CRHS->isAllOnesValue() &&
9748            (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
9749           (CRHS->isNullValue() &&
9750            (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
9751         return LHS.getOperand(0);
9752     }
9753 
9754     uint64_t CRHSVal = CRHS->getZExtValue();
9755     if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
9756         LHS.getOpcode() == ISD::SELECT &&
9757         isa<ConstantSDNode>(LHS.getOperand(1)) &&
9758         isa<ConstantSDNode>(LHS.getOperand(2)) &&
9759         LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) &&
9760         isBoolSGPR(LHS.getOperand(0))) {
9761       // Given CT != FT:
9762       // setcc (select cc, CT, CF), CF, eq => xor cc, -1
9763       // setcc (select cc, CT, CF), CF, ne => cc
9764       // setcc (select cc, CT, CF), CT, ne => xor cc, -1
9765       // setcc (select cc, CT, CF), CT, eq => cc
9766       uint64_t CT = LHS.getConstantOperandVal(1);
9767       uint64_t CF = LHS.getConstantOperandVal(2);
9768 
9769       if ((CF == CRHSVal && CC == ISD::SETEQ) ||
9770           (CT == CRHSVal && CC == ISD::SETNE))
9771         return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9772                            DAG.getConstant(-1, SL, MVT::i1));
9773       if ((CF == CRHSVal && CC == ISD::SETNE) ||
9774           (CT == CRHSVal && CC == ISD::SETEQ))
9775         return LHS.getOperand(0);
9776     }
9777   }
9778 
9779   if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
9780                                            VT != MVT::f16))
9781     return SDValue();
9782 
9783   // Match isinf/isfinite pattern
9784   // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
9785   // (fcmp one (fabs x), inf) -> (fp_class x,
9786   // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero)
9787   if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) {
9788     const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
9789     if (!CRHS)
9790       return SDValue();
9791 
9792     const APFloat &APF = CRHS->getValueAPF();
9793     if (APF.isInfinity() && !APF.isNegative()) {
9794       const unsigned IsInfMask = SIInstrFlags::P_INFINITY |
9795                                  SIInstrFlags::N_INFINITY;
9796       const unsigned IsFiniteMask = SIInstrFlags::N_ZERO |
9797                                     SIInstrFlags::P_ZERO |
9798                                     SIInstrFlags::N_NORMAL |
9799                                     SIInstrFlags::P_NORMAL |
9800                                     SIInstrFlags::N_SUBNORMAL |
9801                                     SIInstrFlags::P_SUBNORMAL;
9802       unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask;
9803       return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
9804                          DAG.getConstant(Mask, SL, MVT::i32));
9805     }
9806   }
9807 
9808   return SDValue();
9809 }
9810 
9811 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
9812                                                      DAGCombinerInfo &DCI) const {
9813   SelectionDAG &DAG = DCI.DAG;
9814   SDLoc SL(N);
9815   unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
9816 
9817   SDValue Src = N->getOperand(0);
9818   SDValue Srl = N->getOperand(0);
9819   if (Srl.getOpcode() == ISD::ZERO_EXTEND)
9820     Srl = Srl.getOperand(0);
9821 
9822   // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
9823   if (Srl.getOpcode() == ISD::SRL) {
9824     // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
9825     // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
9826     // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
9827 
9828     if (const ConstantSDNode *C =
9829         dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
9830       Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
9831                                EVT(MVT::i32));
9832 
9833       unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
9834       if (SrcOffset < 32 && SrcOffset % 8 == 0) {
9835         return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
9836                            MVT::f32, Srl);
9837       }
9838     }
9839   }
9840 
9841   APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
9842 
9843   KnownBits Known;
9844   TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
9845                                         !DCI.isBeforeLegalizeOps());
9846   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9847   if (TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
9848     DCI.CommitTargetLoweringOpt(TLO);
9849   }
9850 
9851   return SDValue();
9852 }
9853 
9854 SDValue SITargetLowering::performClampCombine(SDNode *N,
9855                                               DAGCombinerInfo &DCI) const {
9856   ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
9857   if (!CSrc)
9858     return SDValue();
9859 
9860   const MachineFunction &MF = DCI.DAG.getMachineFunction();
9861   const APFloat &F = CSrc->getValueAPF();
9862   APFloat Zero = APFloat::getZero(F.getSemantics());
9863   APFloat::cmpResult Cmp0 = F.compare(Zero);
9864   if (Cmp0 == APFloat::cmpLessThan ||
9865       (Cmp0 == APFloat::cmpUnordered &&
9866        MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) {
9867     return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
9868   }
9869 
9870   APFloat One(F.getSemantics(), "1.0");
9871   APFloat::cmpResult Cmp1 = F.compare(One);
9872   if (Cmp1 == APFloat::cmpGreaterThan)
9873     return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
9874 
9875   return SDValue(CSrc, 0);
9876 }
9877 
9878 
9879 SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
9880                                             DAGCombinerInfo &DCI) const {
9881   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
9882     return SDValue();
9883   switch (N->getOpcode()) {
9884   default:
9885     return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
9886   case ISD::ADD:
9887     return performAddCombine(N, DCI);
9888   case ISD::SUB:
9889     return performSubCombine(N, DCI);
9890   case ISD::ADDCARRY:
9891   case ISD::SUBCARRY:
9892     return performAddCarrySubCarryCombine(N, DCI);
9893   case ISD::FADD:
9894     return performFAddCombine(N, DCI);
9895   case ISD::FSUB:
9896     return performFSubCombine(N, DCI);
9897   case ISD::SETCC:
9898     return performSetCCCombine(N, DCI);
9899   case ISD::FMAXNUM:
9900   case ISD::FMINNUM:
9901   case ISD::FMAXNUM_IEEE:
9902   case ISD::FMINNUM_IEEE:
9903   case ISD::SMAX:
9904   case ISD::SMIN:
9905   case ISD::UMAX:
9906   case ISD::UMIN:
9907   case AMDGPUISD::FMIN_LEGACY:
9908   case AMDGPUISD::FMAX_LEGACY:
9909     return performMinMaxCombine(N, DCI);
9910   case ISD::FMA:
9911     return performFMACombine(N, DCI);
9912   case ISD::LOAD: {
9913     if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
9914       return Widended;
9915     LLVM_FALLTHROUGH;
9916   }
9917   case ISD::STORE:
9918   case ISD::ATOMIC_LOAD:
9919   case ISD::ATOMIC_STORE:
9920   case ISD::ATOMIC_CMP_SWAP:
9921   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
9922   case ISD::ATOMIC_SWAP:
9923   case ISD::ATOMIC_LOAD_ADD:
9924   case ISD::ATOMIC_LOAD_SUB:
9925   case ISD::ATOMIC_LOAD_AND:
9926   case ISD::ATOMIC_LOAD_OR:
9927   case ISD::ATOMIC_LOAD_XOR:
9928   case ISD::ATOMIC_LOAD_NAND:
9929   case ISD::ATOMIC_LOAD_MIN:
9930   case ISD::ATOMIC_LOAD_MAX:
9931   case ISD::ATOMIC_LOAD_UMIN:
9932   case ISD::ATOMIC_LOAD_UMAX:
9933   case ISD::ATOMIC_LOAD_FADD:
9934   case AMDGPUISD::ATOMIC_INC:
9935   case AMDGPUISD::ATOMIC_DEC:
9936   case AMDGPUISD::ATOMIC_LOAD_FMIN:
9937   case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
9938     if (DCI.isBeforeLegalize())
9939       break;
9940     return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
9941   case ISD::AND:
9942     return performAndCombine(N, DCI);
9943   case ISD::OR:
9944     return performOrCombine(N, DCI);
9945   case ISD::XOR:
9946     return performXorCombine(N, DCI);
9947   case ISD::ZERO_EXTEND:
9948     return performZeroExtendCombine(N, DCI);
9949   case ISD::SIGN_EXTEND_INREG:
9950     return performSignExtendInRegCombine(N , DCI);
9951   case AMDGPUISD::FP_CLASS:
9952     return performClassCombine(N, DCI);
9953   case ISD::FCANONICALIZE:
9954     return performFCanonicalizeCombine(N, DCI);
9955   case AMDGPUISD::RCP:
9956     return performRcpCombine(N, DCI);
9957   case AMDGPUISD::FRACT:
9958   case AMDGPUISD::RSQ:
9959   case AMDGPUISD::RCP_LEGACY:
9960   case AMDGPUISD::RSQ_LEGACY:
9961   case AMDGPUISD::RCP_IFLAG:
9962   case AMDGPUISD::RSQ_CLAMP:
9963   case AMDGPUISD::LDEXP: {
9964     SDValue Src = N->getOperand(0);
9965     if (Src.isUndef())
9966       return Src;
9967     break;
9968   }
9969   case ISD::SINT_TO_FP:
9970   case ISD::UINT_TO_FP:
9971     return performUCharToFloatCombine(N, DCI);
9972   case AMDGPUISD::CVT_F32_UBYTE0:
9973   case AMDGPUISD::CVT_F32_UBYTE1:
9974   case AMDGPUISD::CVT_F32_UBYTE2:
9975   case AMDGPUISD::CVT_F32_UBYTE3:
9976     return performCvtF32UByteNCombine(N, DCI);
9977   case AMDGPUISD::FMED3:
9978     return performFMed3Combine(N, DCI);
9979   case AMDGPUISD::CVT_PKRTZ_F16_F32:
9980     return performCvtPkRTZCombine(N, DCI);
9981   case AMDGPUISD::CLAMP:
9982     return performClampCombine(N, DCI);
9983   case ISD::SCALAR_TO_VECTOR: {
9984     SelectionDAG &DAG = DCI.DAG;
9985     EVT VT = N->getValueType(0);
9986 
9987     // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
9988     if (VT == MVT::v2i16 || VT == MVT::v2f16) {
9989       SDLoc SL(N);
9990       SDValue Src = N->getOperand(0);
9991       EVT EltVT = Src.getValueType();
9992       if (EltVT == MVT::f16)
9993         Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
9994 
9995       SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
9996       return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
9997     }
9998 
9999     break;
10000   }
10001   case ISD::EXTRACT_VECTOR_ELT:
10002     return performExtractVectorEltCombine(N, DCI);
10003   case ISD::INSERT_VECTOR_ELT:
10004     return performInsertVectorEltCombine(N, DCI);
10005   }
10006   return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
10007 }
10008 
10009 /// Helper function for adjustWritemask
10010 static unsigned SubIdx2Lane(unsigned Idx) {
10011   switch (Idx) {
10012   default: return 0;
10013   case AMDGPU::sub0: return 0;
10014   case AMDGPU::sub1: return 1;
10015   case AMDGPU::sub2: return 2;
10016   case AMDGPU::sub3: return 3;
10017   case AMDGPU::sub4: return 4; // Possible with TFE/LWE
10018   }
10019 }
10020 
10021 /// Adjust the writemask of MIMG instructions
10022 SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
10023                                           SelectionDAG &DAG) const {
10024   unsigned Opcode = Node->getMachineOpcode();
10025 
10026   // Subtract 1 because the vdata output is not a MachineSDNode operand.
10027   int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1;
10028   if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
10029     return Node; // not implemented for D16
10030 
10031   SDNode *Users[5] = { nullptr };
10032   unsigned Lane = 0;
10033   unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
10034   unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
10035   unsigned NewDmask = 0;
10036   unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1;
10037   unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1;
10038   bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) ||
10039                   Node->getConstantOperandVal(LWEIdx)) ? 1 : 0;
10040   unsigned TFCLane = 0;
10041   bool HasChain = Node->getNumValues() > 1;
10042 
10043   if (OldDmask == 0) {
10044     // These are folded out, but on the chance it happens don't assert.
10045     return Node;
10046   }
10047 
10048   unsigned OldBitsSet = countPopulation(OldDmask);
10049   // Work out which is the TFE/LWE lane if that is enabled.
10050   if (UsesTFC) {
10051     TFCLane = OldBitsSet;
10052   }
10053 
10054   // Try to figure out the used register components
10055   for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
10056        I != E; ++I) {
10057 
10058     // Don't look at users of the chain.
10059     if (I.getUse().getResNo() != 0)
10060       continue;
10061 
10062     // Abort if we can't understand the usage
10063     if (!I->isMachineOpcode() ||
10064         I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
10065       return Node;
10066 
10067     // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
10068     // Note that subregs are packed, i.e. Lane==0 is the first bit set
10069     // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
10070     // set, etc.
10071     Lane = SubIdx2Lane(I->getConstantOperandVal(1));
10072 
10073     // Check if the use is for the TFE/LWE generated result at VGPRn+1.
10074     if (UsesTFC && Lane == TFCLane) {
10075       Users[Lane] = *I;
10076     } else {
10077       // Set which texture component corresponds to the lane.
10078       unsigned Comp;
10079       for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) {
10080         Comp = countTrailingZeros(Dmask);
10081         Dmask &= ~(1 << Comp);
10082       }
10083 
10084       // Abort if we have more than one user per component.
10085       if (Users[Lane])
10086         return Node;
10087 
10088       Users[Lane] = *I;
10089       NewDmask |= 1 << Comp;
10090     }
10091   }
10092 
10093   // Don't allow 0 dmask, as hardware assumes one channel enabled.
10094   bool NoChannels = !NewDmask;
10095   if (NoChannels) {
10096     if (!UsesTFC) {
10097       // No uses of the result and not using TFC. Then do nothing.
10098       return Node;
10099     }
10100     // If the original dmask has one channel - then nothing to do
10101     if (OldBitsSet == 1)
10102       return Node;
10103     // Use an arbitrary dmask - required for the instruction to work
10104     NewDmask = 1;
10105   }
10106   // Abort if there's no change
10107   if (NewDmask == OldDmask)
10108     return Node;
10109 
10110   unsigned BitsSet = countPopulation(NewDmask);
10111 
10112   // Check for TFE or LWE - increase the number of channels by one to account
10113   // for the extra return value
10114   // This will need adjustment for D16 if this is also included in
10115   // adjustWriteMask (this function) but at present D16 are excluded.
10116   unsigned NewChannels = BitsSet + UsesTFC;
10117 
10118   int NewOpcode =
10119       AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels);
10120   assert(NewOpcode != -1 &&
10121          NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
10122          "failed to find equivalent MIMG op");
10123 
10124   // Adjust the writemask in the node
10125   SmallVector<SDValue, 12> Ops;
10126   Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
10127   Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
10128   Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
10129 
10130   MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
10131 
10132   MVT ResultVT = NewChannels == 1 ?
10133     SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 :
10134                            NewChannels == 5 ? 8 : NewChannels);
10135   SDVTList NewVTList = HasChain ?
10136     DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
10137 
10138 
10139   MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
10140                                               NewVTList, Ops);
10141 
10142   if (HasChain) {
10143     // Update chain.
10144     DAG.setNodeMemRefs(NewNode, Node->memoperands());
10145     DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
10146   }
10147 
10148   if (NewChannels == 1) {
10149     assert(Node->hasNUsesOfValue(1, 0));
10150     SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
10151                                       SDLoc(Node), Users[Lane]->getValueType(0),
10152                                       SDValue(NewNode, 0));
10153     DAG.ReplaceAllUsesWith(Users[Lane], Copy);
10154     return nullptr;
10155   }
10156 
10157   // Update the users of the node with the new indices
10158   for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) {
10159     SDNode *User = Users[i];
10160     if (!User) {
10161       // Handle the special case of NoChannels. We set NewDmask to 1 above, but
10162       // Users[0] is still nullptr because channel 0 doesn't really have a use.
10163       if (i || !NoChannels)
10164         continue;
10165     } else {
10166       SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
10167       DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
10168     }
10169 
10170     switch (Idx) {
10171     default: break;
10172     case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
10173     case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
10174     case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
10175     case AMDGPU::sub3: Idx = AMDGPU::sub4; break;
10176     }
10177   }
10178 
10179   DAG.RemoveDeadNode(Node);
10180   return nullptr;
10181 }
10182 
10183 static bool isFrameIndexOp(SDValue Op) {
10184   if (Op.getOpcode() == ISD::AssertZext)
10185     Op = Op.getOperand(0);
10186 
10187   return isa<FrameIndexSDNode>(Op);
10188 }
10189 
10190 /// Legalize target independent instructions (e.g. INSERT_SUBREG)
10191 /// with frame index operands.
10192 /// LLVM assumes that inputs are to these instructions are registers.
10193 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
10194                                                         SelectionDAG &DAG) const {
10195   if (Node->getOpcode() == ISD::CopyToReg) {
10196     RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
10197     SDValue SrcVal = Node->getOperand(2);
10198 
10199     // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
10200     // to try understanding copies to physical registers.
10201     if (SrcVal.getValueType() == MVT::i1 &&
10202         Register::isPhysicalRegister(DestReg->getReg())) {
10203       SDLoc SL(Node);
10204       MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
10205       SDValue VReg = DAG.getRegister(
10206         MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
10207 
10208       SDNode *Glued = Node->getGluedNode();
10209       SDValue ToVReg
10210         = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
10211                          SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
10212       SDValue ToResultReg
10213         = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
10214                            VReg, ToVReg.getValue(1));
10215       DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
10216       DAG.RemoveDeadNode(Node);
10217       return ToResultReg.getNode();
10218     }
10219   }
10220 
10221   SmallVector<SDValue, 8> Ops;
10222   for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
10223     if (!isFrameIndexOp(Node->getOperand(i))) {
10224       Ops.push_back(Node->getOperand(i));
10225       continue;
10226     }
10227 
10228     SDLoc DL(Node);
10229     Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
10230                                      Node->getOperand(i).getValueType(),
10231                                      Node->getOperand(i)), 0));
10232   }
10233 
10234   return DAG.UpdateNodeOperands(Node, Ops);
10235 }
10236 
10237 /// Fold the instructions after selecting them.
10238 /// Returns null if users were already updated.
10239 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
10240                                           SelectionDAG &DAG) const {
10241   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10242   unsigned Opcode = Node->getMachineOpcode();
10243 
10244   if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
10245       !TII->isGather4(Opcode)) {
10246     return adjustWritemask(Node, DAG);
10247   }
10248 
10249   if (Opcode == AMDGPU::INSERT_SUBREG ||
10250       Opcode == AMDGPU::REG_SEQUENCE) {
10251     legalizeTargetIndependentNode(Node, DAG);
10252     return Node;
10253   }
10254 
10255   switch (Opcode) {
10256   case AMDGPU::V_DIV_SCALE_F32:
10257   case AMDGPU::V_DIV_SCALE_F64: {
10258     // Satisfy the operand register constraint when one of the inputs is
10259     // undefined. Ordinarily each undef value will have its own implicit_def of
10260     // a vreg, so force these to use a single register.
10261     SDValue Src0 = Node->getOperand(0);
10262     SDValue Src1 = Node->getOperand(1);
10263     SDValue Src2 = Node->getOperand(2);
10264 
10265     if ((Src0.isMachineOpcode() &&
10266          Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
10267         (Src0 == Src1 || Src0 == Src2))
10268       break;
10269 
10270     MVT VT = Src0.getValueType().getSimpleVT();
10271     const TargetRegisterClass *RC =
10272         getRegClassFor(VT, Src0.getNode()->isDivergent());
10273 
10274     MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
10275     SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
10276 
10277     SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
10278                                       UndefReg, Src0, SDValue());
10279 
10280     // src0 must be the same register as src1 or src2, even if the value is
10281     // undefined, so make sure we don't violate this constraint.
10282     if (Src0.isMachineOpcode() &&
10283         Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
10284       if (Src1.isMachineOpcode() &&
10285           Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
10286         Src0 = Src1;
10287       else if (Src2.isMachineOpcode() &&
10288                Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
10289         Src0 = Src2;
10290       else {
10291         assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
10292         Src0 = UndefReg;
10293         Src1 = UndefReg;
10294       }
10295     } else
10296       break;
10297 
10298     SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
10299     for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
10300       Ops.push_back(Node->getOperand(I));
10301 
10302     Ops.push_back(ImpDef.getValue(1));
10303     return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
10304   }
10305   case AMDGPU::V_PERMLANE16_B32:
10306   case AMDGPU::V_PERMLANEX16_B32: {
10307     ConstantSDNode *FI = cast<ConstantSDNode>(Node->getOperand(0));
10308     ConstantSDNode *BC = cast<ConstantSDNode>(Node->getOperand(2));
10309     if (!FI->getZExtValue() && !BC->getZExtValue())
10310       break;
10311     SDValue VDstIn = Node->getOperand(6);
10312     if (VDstIn.isMachineOpcode()
10313         && VDstIn.getMachineOpcode() == AMDGPU::IMPLICIT_DEF)
10314       break;
10315     MachineSDNode *ImpDef = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
10316                                                SDLoc(Node), MVT::i32);
10317     SmallVector<SDValue, 8> Ops = { SDValue(FI, 0), Node->getOperand(1),
10318                                     SDValue(BC, 0), Node->getOperand(3),
10319                                     Node->getOperand(4), Node->getOperand(5),
10320                                     SDValue(ImpDef, 0), Node->getOperand(7) };
10321     return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
10322   }
10323   default:
10324     break;
10325   }
10326 
10327   return Node;
10328 }
10329 
10330 /// Assign the register class depending on the number of
10331 /// bits set in the writemask
10332 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
10333                                                      SDNode *Node) const {
10334   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10335 
10336   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
10337 
10338   if (TII->isVOP3(MI.getOpcode())) {
10339     // Make sure constant bus requirements are respected.
10340     TII->legalizeOperandsVOP3(MRI, MI);
10341 
10342     // Prefer VGPRs over AGPRs in mAI instructions where possible.
10343     // This saves a chain-copy of registers and better ballance register
10344     // use between vgpr and agpr as agpr tuples tend to be big.
10345     if (const MCOperandInfo *OpInfo = MI.getDesc().OpInfo) {
10346       unsigned Opc = MI.getOpcode();
10347       const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
10348       for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
10349                       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) {
10350         if (I == -1)
10351           break;
10352         MachineOperand &Op = MI.getOperand(I);
10353         if ((OpInfo[I].RegClass != llvm::AMDGPU::AV_64RegClassID &&
10354              OpInfo[I].RegClass != llvm::AMDGPU::AV_32RegClassID) ||
10355             !Register::isVirtualRegister(Op.getReg()) ||
10356             !TRI->isAGPR(MRI, Op.getReg()))
10357           continue;
10358         auto *Src = MRI.getUniqueVRegDef(Op.getReg());
10359         if (!Src || !Src->isCopy() ||
10360             !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg()))
10361           continue;
10362         auto *RC = TRI->getRegClassForReg(MRI, Op.getReg());
10363         auto *NewRC = TRI->getEquivalentVGPRClass(RC);
10364         // All uses of agpr64 and agpr32 can also accept vgpr except for
10365         // v_accvgpr_read, but we do not produce agpr reads during selection,
10366         // so no use checks are needed.
10367         MRI.setRegClass(Op.getReg(), NewRC);
10368       }
10369     }
10370 
10371     return;
10372   }
10373 
10374   // Replace unused atomics with the no return version.
10375   int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
10376   if (NoRetAtomicOp != -1) {
10377     if (!Node->hasAnyUseOfValue(0)) {
10378       MI.setDesc(TII->get(NoRetAtomicOp));
10379       MI.RemoveOperand(0);
10380       return;
10381     }
10382 
10383     // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
10384     // instruction, because the return type of these instructions is a vec2 of
10385     // the memory type, so it can be tied to the input operand.
10386     // This means these instructions always have a use, so we need to add a
10387     // special case to check if the atomic has only one extract_subreg use,
10388     // which itself has no uses.
10389     if ((Node->hasNUsesOfValue(1, 0) &&
10390          Node->use_begin()->isMachineOpcode() &&
10391          Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
10392          !Node->use_begin()->hasAnyUseOfValue(0))) {
10393       Register Def = MI.getOperand(0).getReg();
10394 
10395       // Change this into a noret atomic.
10396       MI.setDesc(TII->get(NoRetAtomicOp));
10397       MI.RemoveOperand(0);
10398 
10399       // If we only remove the def operand from the atomic instruction, the
10400       // extract_subreg will be left with a use of a vreg without a def.
10401       // So we need to insert an implicit_def to avoid machine verifier
10402       // errors.
10403       BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
10404               TII->get(AMDGPU::IMPLICIT_DEF), Def);
10405     }
10406     return;
10407   }
10408 }
10409 
10410 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
10411                               uint64_t Val) {
10412   SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
10413   return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
10414 }
10415 
10416 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
10417                                                 const SDLoc &DL,
10418                                                 SDValue Ptr) const {
10419   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10420 
10421   // Build the half of the subregister with the constants before building the
10422   // full 128-bit register. If we are building multiple resource descriptors,
10423   // this will allow CSEing of the 2-component register.
10424   const SDValue Ops0[] = {
10425     DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
10426     buildSMovImm32(DAG, DL, 0),
10427     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
10428     buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
10429     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
10430   };
10431 
10432   SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
10433                                                 MVT::v2i32, Ops0), 0);
10434 
10435   // Combine the constants and the pointer.
10436   const SDValue Ops1[] = {
10437     DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
10438     Ptr,
10439     DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
10440     SubRegHi,
10441     DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
10442   };
10443 
10444   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
10445 }
10446 
10447 /// Return a resource descriptor with the 'Add TID' bit enabled
10448 ///        The TID (Thread ID) is multiplied by the stride value (bits [61:48]
10449 ///        of the resource descriptor) to create an offset, which is added to
10450 ///        the resource pointer.
10451 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
10452                                            SDValue Ptr, uint32_t RsrcDword1,
10453                                            uint64_t RsrcDword2And3) const {
10454   SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
10455   SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
10456   if (RsrcDword1) {
10457     PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
10458                                      DAG.getConstant(RsrcDword1, DL, MVT::i32)),
10459                     0);
10460   }
10461 
10462   SDValue DataLo = buildSMovImm32(DAG, DL,
10463                                   RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
10464   SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
10465 
10466   const SDValue Ops[] = {
10467     DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
10468     PtrLo,
10469     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
10470     PtrHi,
10471     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
10472     DataLo,
10473     DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
10474     DataHi,
10475     DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
10476   };
10477 
10478   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
10479 }
10480 
10481 //===----------------------------------------------------------------------===//
10482 //                         SI Inline Assembly Support
10483 //===----------------------------------------------------------------------===//
10484 
10485 std::pair<unsigned, const TargetRegisterClass *>
10486 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
10487                                                StringRef Constraint,
10488                                                MVT VT) const {
10489   const TargetRegisterClass *RC = nullptr;
10490   if (Constraint.size() == 1) {
10491     switch (Constraint[0]) {
10492     default:
10493       return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10494     case 's':
10495     case 'r':
10496       switch (VT.getSizeInBits()) {
10497       default:
10498         return std::make_pair(0U, nullptr);
10499       case 32:
10500       case 16:
10501         RC = &AMDGPU::SReg_32_XM0RegClass;
10502         break;
10503       case 64:
10504         RC = &AMDGPU::SGPR_64RegClass;
10505         break;
10506       case 96:
10507         RC = &AMDGPU::SReg_96RegClass;
10508         break;
10509       case 128:
10510         RC = &AMDGPU::SReg_128RegClass;
10511         break;
10512       case 160:
10513         RC = &AMDGPU::SReg_160RegClass;
10514         break;
10515       case 256:
10516         RC = &AMDGPU::SReg_256RegClass;
10517         break;
10518       case 512:
10519         RC = &AMDGPU::SReg_512RegClass;
10520         break;
10521       }
10522       break;
10523     case 'v':
10524       switch (VT.getSizeInBits()) {
10525       default:
10526         return std::make_pair(0U, nullptr);
10527       case 32:
10528       case 16:
10529         RC = &AMDGPU::VGPR_32RegClass;
10530         break;
10531       case 64:
10532         RC = &AMDGPU::VReg_64RegClass;
10533         break;
10534       case 96:
10535         RC = &AMDGPU::VReg_96RegClass;
10536         break;
10537       case 128:
10538         RC = &AMDGPU::VReg_128RegClass;
10539         break;
10540       case 160:
10541         RC = &AMDGPU::VReg_160RegClass;
10542         break;
10543       case 256:
10544         RC = &AMDGPU::VReg_256RegClass;
10545         break;
10546       case 512:
10547         RC = &AMDGPU::VReg_512RegClass;
10548         break;
10549       }
10550       break;
10551     case 'a':
10552       if (!Subtarget->hasMAIInsts())
10553         break;
10554       switch (VT.getSizeInBits()) {
10555       default:
10556         return std::make_pair(0U, nullptr);
10557       case 32:
10558       case 16:
10559         RC = &AMDGPU::AGPR_32RegClass;
10560         break;
10561       case 64:
10562         RC = &AMDGPU::AReg_64RegClass;
10563         break;
10564       case 128:
10565         RC = &AMDGPU::AReg_128RegClass;
10566         break;
10567       case 512:
10568         RC = &AMDGPU::AReg_512RegClass;
10569         break;
10570       case 1024:
10571         RC = &AMDGPU::AReg_1024RegClass;
10572         // v32 types are not legal but we support them here.
10573         return std::make_pair(0U, RC);
10574       }
10575       break;
10576     }
10577     // We actually support i128, i16 and f16 as inline parameters
10578     // even if they are not reported as legal
10579     if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
10580                VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
10581       return std::make_pair(0U, RC);
10582   }
10583 
10584   if (Constraint.size() > 1) {
10585     if (Constraint[1] == 'v') {
10586       RC = &AMDGPU::VGPR_32RegClass;
10587     } else if (Constraint[1] == 's') {
10588       RC = &AMDGPU::SGPR_32RegClass;
10589     } else if (Constraint[1] == 'a') {
10590       RC = &AMDGPU::AGPR_32RegClass;
10591     }
10592 
10593     if (RC) {
10594       uint32_t Idx;
10595       bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
10596       if (!Failed && Idx < RC->getNumRegs())
10597         return std::make_pair(RC->getRegister(Idx), RC);
10598     }
10599   }
10600   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10601 }
10602 
10603 SITargetLowering::ConstraintType
10604 SITargetLowering::getConstraintType(StringRef Constraint) const {
10605   if (Constraint.size() == 1) {
10606     switch (Constraint[0]) {
10607     default: break;
10608     case 's':
10609     case 'v':
10610     case 'a':
10611       return C_RegisterClass;
10612     }
10613   }
10614   return TargetLowering::getConstraintType(Constraint);
10615 }
10616 
10617 // Figure out which registers should be reserved for stack access. Only after
10618 // the function is legalized do we know all of the non-spill stack objects or if
10619 // calls are present.
10620 void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
10621   MachineRegisterInfo &MRI = MF.getRegInfo();
10622   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10623   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
10624   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
10625 
10626   if (Info->isEntryFunction()) {
10627     // Callable functions have fixed registers used for stack access.
10628     reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
10629   }
10630 
10631   assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
10632                              Info->getStackPtrOffsetReg()));
10633   if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG)
10634     MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
10635 
10636   // We need to worry about replacing the default register with itself in case
10637   // of MIR testcases missing the MFI.
10638   if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG)
10639     MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
10640 
10641   if (Info->getFrameOffsetReg() != AMDGPU::FP_REG)
10642     MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
10643 
10644   if (Info->getScratchWaveOffsetReg() != AMDGPU::SCRATCH_WAVE_OFFSET_REG) {
10645     MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
10646                        Info->getScratchWaveOffsetReg());
10647   }
10648 
10649   Info->limitOccupancy(MF);
10650 
10651   if (ST.isWave32() && !MF.empty()) {
10652     // Add VCC_HI def because many instructions marked as imp-use VCC where
10653     // we may only define VCC_LO. If nothing defines VCC_HI we may end up
10654     // having a use of undef.
10655 
10656     const SIInstrInfo *TII = ST.getInstrInfo();
10657     DebugLoc DL;
10658 
10659     MachineBasicBlock &MBB = MF.front();
10660     MachineBasicBlock::iterator I = MBB.getFirstNonDebugInstr();
10661     BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), AMDGPU::VCC_HI);
10662 
10663     for (auto &MBB : MF) {
10664       for (auto &MI : MBB) {
10665         TII->fixImplicitOperands(MI);
10666       }
10667     }
10668   }
10669 
10670   TargetLoweringBase::finalizeLowering(MF);
10671 }
10672 
10673 void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
10674                                                      KnownBits &Known,
10675                                                      const APInt &DemandedElts,
10676                                                      const SelectionDAG &DAG,
10677                                                      unsigned Depth) const {
10678   TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
10679                                                 DAG, Depth);
10680 
10681   // Set the high bits to zero based on the maximum allowed scratch size per
10682   // wave. We can't use vaddr in MUBUF instructions if we don't know the address
10683   // calculation won't overflow, so assume the sign bit is never set.
10684   Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
10685 }
10686 
10687 llvm::Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
10688   const llvm::Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
10689   const llvm::Align CacheLineAlign = llvm::Align(64);
10690 
10691   // Pre-GFX10 target did not benefit from loop alignment
10692   if (!ML || DisableLoopAlignment ||
10693       (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
10694       getSubtarget()->hasInstFwdPrefetchBug())
10695     return PrefAlign;
10696 
10697   // On GFX10 I$ is 4 x 64 bytes cache lines.
10698   // By default prefetcher keeps one cache line behind and reads two ahead.
10699   // We can modify it with S_INST_PREFETCH for larger loops to have two lines
10700   // behind and one ahead.
10701   // Therefor we can benefit from aligning loop headers if loop fits 192 bytes.
10702   // If loop fits 64 bytes it always spans no more than two cache lines and
10703   // does not need an alignment.
10704   // Else if loop is less or equal 128 bytes we do not need to modify prefetch,
10705   // Else if loop is less or equal 192 bytes we need two lines behind.
10706 
10707   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10708   const MachineBasicBlock *Header = ML->getHeader();
10709   if (Header->getAlignment() != PrefAlign)
10710     return Header->getAlignment(); // Already processed.
10711 
10712   unsigned LoopSize = 0;
10713   for (const MachineBasicBlock *MBB : ML->blocks()) {
10714     // If inner loop block is aligned assume in average half of the alignment
10715     // size to be added as nops.
10716     if (MBB != Header)
10717       LoopSize += MBB->getAlignment().value() / 2;
10718 
10719     for (const MachineInstr &MI : *MBB) {
10720       LoopSize += TII->getInstSizeInBytes(MI);
10721       if (LoopSize > 192)
10722         return PrefAlign;
10723     }
10724   }
10725 
10726   if (LoopSize <= 64)
10727     return PrefAlign;
10728 
10729   if (LoopSize <= 128)
10730     return CacheLineAlign;
10731 
10732   // If any of parent loops is surrounded by prefetch instructions do not
10733   // insert new for inner loop, which would reset parent's settings.
10734   for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) {
10735     if (MachineBasicBlock *Exit = P->getExitBlock()) {
10736       auto I = Exit->getFirstNonDebugInstr();
10737       if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
10738         return CacheLineAlign;
10739     }
10740   }
10741 
10742   MachineBasicBlock *Pre = ML->getLoopPreheader();
10743   MachineBasicBlock *Exit = ML->getExitBlock();
10744 
10745   if (Pre && Exit) {
10746     BuildMI(*Pre, Pre->getFirstTerminator(), DebugLoc(),
10747             TII->get(AMDGPU::S_INST_PREFETCH))
10748       .addImm(1); // prefetch 2 lines behind PC
10749 
10750     BuildMI(*Exit, Exit->getFirstNonDebugInstr(), DebugLoc(),
10751             TII->get(AMDGPU::S_INST_PREFETCH))
10752       .addImm(2); // prefetch 1 line behind PC
10753   }
10754 
10755   return CacheLineAlign;
10756 }
10757 
10758 LLVM_ATTRIBUTE_UNUSED
10759 static bool isCopyFromRegOfInlineAsm(const SDNode *N) {
10760   assert(N->getOpcode() == ISD::CopyFromReg);
10761   do {
10762     // Follow the chain until we find an INLINEASM node.
10763     N = N->getOperand(0).getNode();
10764     if (N->getOpcode() == ISD::INLINEASM ||
10765         N->getOpcode() == ISD::INLINEASM_BR)
10766       return true;
10767   } while (N->getOpcode() == ISD::CopyFromReg);
10768   return false;
10769 }
10770 
10771 bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
10772   FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const
10773 {
10774   switch (N->getOpcode()) {
10775     case ISD::CopyFromReg:
10776     {
10777       const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1));
10778       const MachineFunction * MF = FLI->MF;
10779       const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
10780       const MachineRegisterInfo &MRI = MF->getRegInfo();
10781       const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
10782       unsigned Reg = R->getReg();
10783       if (Register::isPhysicalRegister(Reg))
10784         return !TRI.isSGPRReg(MRI, Reg);
10785 
10786       if (MRI.isLiveIn(Reg)) {
10787         // workitem.id.x workitem.id.y workitem.id.z
10788         // Any VGPR formal argument is also considered divergent
10789         if (!TRI.isSGPRReg(MRI, Reg))
10790           return true;
10791         // Formal arguments of non-entry functions
10792         // are conservatively considered divergent
10793         else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv()))
10794           return true;
10795         return false;
10796       }
10797       const Value *V = FLI->getValueFromVirtualReg(Reg);
10798       if (V)
10799         return KDA->isDivergent(V);
10800       assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N));
10801       return !TRI.isSGPRReg(MRI, Reg);
10802     }
10803     break;
10804     case ISD::LOAD: {
10805       const LoadSDNode *L = cast<LoadSDNode>(N);
10806       unsigned AS = L->getAddressSpace();
10807       // A flat load may access private memory.
10808       return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
10809     } break;
10810     case ISD::CALLSEQ_END:
10811     return true;
10812     break;
10813     case ISD::INTRINSIC_WO_CHAIN:
10814     {
10815 
10816     }
10817       return AMDGPU::isIntrinsicSourceOfDivergence(
10818       cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
10819     case ISD::INTRINSIC_W_CHAIN:
10820       return AMDGPU::isIntrinsicSourceOfDivergence(
10821       cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
10822     // In some cases intrinsics that are a source of divergence have been
10823     // lowered to AMDGPUISD so we also need to check those too.
10824     case AMDGPUISD::INTERP_MOV:
10825     case AMDGPUISD::INTERP_P1:
10826     case AMDGPUISD::INTERP_P2:
10827       return true;
10828   }
10829   return false;
10830 }
10831 
10832 bool SITargetLowering::denormalsEnabledForType(EVT VT) const {
10833   switch (VT.getScalarType().getSimpleVT().SimpleTy) {
10834   case MVT::f32:
10835     return Subtarget->hasFP32Denormals();
10836   case MVT::f64:
10837     return Subtarget->hasFP64Denormals();
10838   case MVT::f16:
10839     return Subtarget->hasFP16Denormals();
10840   default:
10841     return false;
10842   }
10843 }
10844 
10845 bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
10846                                                     const SelectionDAG &DAG,
10847                                                     bool SNaN,
10848                                                     unsigned Depth) const {
10849   if (Op.getOpcode() == AMDGPUISD::CLAMP) {
10850     const MachineFunction &MF = DAG.getMachineFunction();
10851     const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10852 
10853     if (Info->getMode().DX10Clamp)
10854       return true; // Clamped to 0.
10855     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
10856   }
10857 
10858   return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG,
10859                                                             SNaN, Depth);
10860 }
10861 
10862 TargetLowering::AtomicExpansionKind
10863 SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
10864   switch (RMW->getOperation()) {
10865   case AtomicRMWInst::FAdd: {
10866     Type *Ty = RMW->getType();
10867 
10868     // We don't have a way to support 16-bit atomics now, so just leave them
10869     // as-is.
10870     if (Ty->isHalfTy())
10871       return AtomicExpansionKind::None;
10872 
10873     if (!Ty->isFloatTy())
10874       return AtomicExpansionKind::CmpXChg;
10875 
10876     // TODO: Do have these for flat. Older targets also had them for buffers.
10877     unsigned AS = RMW->getPointerAddressSpace();
10878     return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ?
10879       AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg;
10880   }
10881   default:
10882     break;
10883   }
10884 
10885   return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
10886 }
10887