1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Custom DAG lowering for SI
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #if defined(_MSC_VER) || defined(__MINGW32__)
15 // Provide M_PI.
16 #define _USE_MATH_DEFINES
17 #endif
18 
19 #include "SIISelLowering.h"
20 #include "AMDGPU.h"
21 #include "AMDGPUSubtarget.h"
22 #include "AMDGPUTargetMachine.h"
23 #include "SIDefines.h"
24 #include "SIInstrInfo.h"
25 #include "SIMachineFunctionInfo.h"
26 #include "SIRegisterInfo.h"
27 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
28 #include "Utils/AMDGPUBaseInfo.h"
29 #include "llvm/ADT/APFloat.h"
30 #include "llvm/ADT/APInt.h"
31 #include "llvm/ADT/ArrayRef.h"
32 #include "llvm/ADT/BitVector.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/CodeGen/Analysis.h"
39 #include "llvm/CodeGen/CallingConvLower.h"
40 #include "llvm/CodeGen/DAGCombine.h"
41 #include "llvm/CodeGen/ISDOpcodes.h"
42 #include "llvm/CodeGen/MachineBasicBlock.h"
43 #include "llvm/CodeGen/MachineFrameInfo.h"
44 #include "llvm/CodeGen/MachineFunction.h"
45 #include "llvm/CodeGen/MachineInstr.h"
46 #include "llvm/CodeGen/MachineInstrBuilder.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineModuleInfo.h"
49 #include "llvm/CodeGen/MachineOperand.h"
50 #include "llvm/CodeGen/MachineRegisterInfo.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetCallingConv.h"
54 #include "llvm/CodeGen/TargetRegisterInfo.h"
55 #include "llvm/CodeGen/ValueTypes.h"
56 #include "llvm/IR/Constants.h"
57 #include "llvm/IR/DataLayout.h"
58 #include "llvm/IR/DebugLoc.h"
59 #include "llvm/IR/DerivedTypes.h"
60 #include "llvm/IR/DiagnosticInfo.h"
61 #include "llvm/IR/Function.h"
62 #include "llvm/IR/GlobalValue.h"
63 #include "llvm/IR/InstrTypes.h"
64 #include "llvm/IR/Instruction.h"
65 #include "llvm/IR/Instructions.h"
66 #include "llvm/IR/IntrinsicInst.h"
67 #include "llvm/IR/Type.h"
68 #include "llvm/Support/Casting.h"
69 #include "llvm/Support/CodeGen.h"
70 #include "llvm/Support/CommandLine.h"
71 #include "llvm/Support/Compiler.h"
72 #include "llvm/Support/ErrorHandling.h"
73 #include "llvm/Support/KnownBits.h"
74 #include "llvm/Support/MachineValueType.h"
75 #include "llvm/Support/MathExtras.h"
76 #include "llvm/Target/TargetOptions.h"
77 #include <cassert>
78 #include <cmath>
79 #include <cstdint>
80 #include <iterator>
81 #include <tuple>
82 #include <utility>
83 #include <vector>
84 
85 using namespace llvm;
86 
87 #define DEBUG_TYPE "si-lower"
88 
89 STATISTIC(NumTailCalls, "Number of tail calls");
90 
91 static cl::opt<bool> EnableVGPRIndexMode(
92   "amdgpu-vgpr-index-mode",
93   cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
94   cl::init(false));
95 
96 static cl::opt<bool> DisableLoopAlignment(
97   "amdgpu-disable-loop-alignment",
98   cl::desc("Do not align and prefetch loops"),
99   cl::init(false));
100 
101 static unsigned findFirstFreeSGPR(CCState &CCInfo) {
102   unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
103   for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
104     if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
105       return AMDGPU::SGPR0 + Reg;
106     }
107   }
108   llvm_unreachable("Cannot allocate sgpr");
109 }
110 
111 SITargetLowering::SITargetLowering(const TargetMachine &TM,
112                                    const GCNSubtarget &STI)
113     : AMDGPUTargetLowering(TM, STI),
114       Subtarget(&STI) {
115   addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
116   addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
117 
118   addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
119   addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
120 
121   addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
122   addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
123   addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
124 
125   addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
126   addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass);
127 
128   addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
129   addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
130 
131   addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
132   addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
133 
134   addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
135   addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass);
136 
137   addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
138   addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
139 
140   addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
141   addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
142 
143   if (Subtarget->has16BitInsts()) {
144     addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
145     addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
146 
147     // Unless there are also VOP3P operations, not operations are really legal.
148     addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
149     addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
150     addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
151     addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
152   }
153 
154   if (Subtarget->hasMAIInsts()) {
155     addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass);
156     addRegisterClass(MVT::v32f32, &AMDGPU::VReg_1024RegClass);
157   }
158 
159   computeRegisterProperties(Subtarget->getRegisterInfo());
160 
161   // We need to custom lower vector stores from local memory
162   setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
163   setOperationAction(ISD::LOAD, MVT::v3i32, Custom);
164   setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
165   setOperationAction(ISD::LOAD, MVT::v5i32, Custom);
166   setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
167   setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
168   setOperationAction(ISD::LOAD, MVT::i1, Custom);
169   setOperationAction(ISD::LOAD, MVT::v32i32, Custom);
170 
171   setOperationAction(ISD::STORE, MVT::v2i32, Custom);
172   setOperationAction(ISD::STORE, MVT::v3i32, Custom);
173   setOperationAction(ISD::STORE, MVT::v4i32, Custom);
174   setOperationAction(ISD::STORE, MVT::v5i32, Custom);
175   setOperationAction(ISD::STORE, MVT::v8i32, Custom);
176   setOperationAction(ISD::STORE, MVT::v16i32, Custom);
177   setOperationAction(ISD::STORE, MVT::i1, Custom);
178   setOperationAction(ISD::STORE, MVT::v32i32, Custom);
179 
180   setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
181   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
182   setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
183   setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
184   setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
185   setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
186   setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
187   setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
188   setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
189   setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
190 
191   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
192   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
193 
194   setOperationAction(ISD::SELECT, MVT::i1, Promote);
195   setOperationAction(ISD::SELECT, MVT::i64, Custom);
196   setOperationAction(ISD::SELECT, MVT::f64, Promote);
197   AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
198 
199   setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
200   setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
201   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
202   setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
203   setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
204 
205   setOperationAction(ISD::SETCC, MVT::i1, Promote);
206   setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
207   setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
208   AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
209 
210   setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
211   setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
212 
213   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
214   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
215   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
216   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
217   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
218   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
219   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
220 
221   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
222   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
223   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
224   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
225   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom);
226   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
227   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
228 
229   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
230   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
231   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom);
232   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
233   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
234   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
235 
236   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
237   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
238   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
239   setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
240   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
241   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
242 
243   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
244   setOperationAction(ISD::BR_CC, MVT::i1, Expand);
245   setOperationAction(ISD::BR_CC, MVT::i32, Expand);
246   setOperationAction(ISD::BR_CC, MVT::i64, Expand);
247   setOperationAction(ISD::BR_CC, MVT::f32, Expand);
248   setOperationAction(ISD::BR_CC, MVT::f64, Expand);
249 
250   setOperationAction(ISD::UADDO, MVT::i32, Legal);
251   setOperationAction(ISD::USUBO, MVT::i32, Legal);
252 
253   setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
254   setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
255 
256   setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
257   setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
258   setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
259 
260 #if 0
261   setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
262   setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
263 #endif
264 
265   // We only support LOAD/STORE and vector manipulation ops for vectors
266   // with > 4 elements.
267   for (MVT VT : { MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
268                   MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16,
269                   MVT::v32i32, MVT::v32f32 }) {
270     for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
271       switch (Op) {
272       case ISD::LOAD:
273       case ISD::STORE:
274       case ISD::BUILD_VECTOR:
275       case ISD::BITCAST:
276       case ISD::EXTRACT_VECTOR_ELT:
277       case ISD::INSERT_VECTOR_ELT:
278       case ISD::INSERT_SUBVECTOR:
279       case ISD::EXTRACT_SUBVECTOR:
280       case ISD::SCALAR_TO_VECTOR:
281         break;
282       case ISD::CONCAT_VECTORS:
283         setOperationAction(Op, VT, Custom);
284         break;
285       default:
286         setOperationAction(Op, VT, Expand);
287         break;
288       }
289     }
290   }
291 
292   setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
293 
294   // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
295   // is expanded to avoid having two separate loops in case the index is a VGPR.
296 
297   // Most operations are naturally 32-bit vector operations. We only support
298   // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
299   for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
300     setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
301     AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
302 
303     setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
304     AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
305 
306     setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
307     AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
308 
309     setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
310     AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
311   }
312 
313   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
314   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
315   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
316   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
317 
318   setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom);
319   setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
320 
321   // Avoid stack access for these.
322   // TODO: Generalize to more vector types.
323   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
324   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
325   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
326   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
327 
328   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
329   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
330   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom);
331   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom);
332   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom);
333 
334   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom);
335   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
336   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom);
337 
338   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom);
339   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom);
340   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
341   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
342 
343   // Deal with vec3 vector operations when widened to vec4.
344   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom);
345   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom);
346   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom);
347   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom);
348 
349   // Deal with vec5 vector operations when widened to vec8.
350   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom);
351   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom);
352   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom);
353   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom);
354 
355   // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
356   // and output demarshalling
357   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
358   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
359 
360   // We can't return success/failure, only the old value,
361   // let LLVM add the comparison
362   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
363   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
364 
365   if (Subtarget->hasFlatAddressSpace()) {
366     setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
367     setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
368   }
369 
370   setOperationAction(ISD::BSWAP, MVT::i32, Legal);
371   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
372 
373   // On SI this is s_memtime and s_memrealtime on VI.
374   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
375   setOperationAction(ISD::TRAP, MVT::Other, Custom);
376   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
377 
378   if (Subtarget->has16BitInsts()) {
379     setOperationAction(ISD::FLOG, MVT::f16, Custom);
380     setOperationAction(ISD::FEXP, MVT::f16, Custom);
381     setOperationAction(ISD::FLOG10, MVT::f16, Custom);
382   }
383 
384   // v_mad_f32 does not support denormals according to some sources.
385   if (!Subtarget->hasFP32Denormals())
386     setOperationAction(ISD::FMAD, MVT::f32, Legal);
387 
388   if (!Subtarget->hasBFI()) {
389     // fcopysign can be done in a single instruction with BFI.
390     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
391     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
392   }
393 
394   if (!Subtarget->hasBCNT(32))
395     setOperationAction(ISD::CTPOP, MVT::i32, Expand);
396 
397   if (!Subtarget->hasBCNT(64))
398     setOperationAction(ISD::CTPOP, MVT::i64, Expand);
399 
400   if (Subtarget->hasFFBH())
401     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
402 
403   if (Subtarget->hasFFBL())
404     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
405 
406   // We only really have 32-bit BFE instructions (and 16-bit on VI).
407   //
408   // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
409   // effort to match them now. We want this to be false for i64 cases when the
410   // extraction isn't restricted to the upper or lower half. Ideally we would
411   // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
412   // span the midpoint are probably relatively rare, so don't worry about them
413   // for now.
414   if (Subtarget->hasBFE())
415     setHasExtractBitsInsn(true);
416 
417   setOperationAction(ISD::FMINNUM, MVT::f32, Custom);
418   setOperationAction(ISD::FMAXNUM, MVT::f32, Custom);
419   setOperationAction(ISD::FMINNUM, MVT::f64, Custom);
420   setOperationAction(ISD::FMAXNUM, MVT::f64, Custom);
421 
422 
423   // These are really only legal for ieee_mode functions. We should be avoiding
424   // them for functions that don't have ieee_mode enabled, so just say they are
425   // legal.
426   setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
427   setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
428   setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
429   setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
430 
431 
432   if (Subtarget->haveRoundOpsF64()) {
433     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
434     setOperationAction(ISD::FCEIL, MVT::f64, Legal);
435     setOperationAction(ISD::FRINT, MVT::f64, Legal);
436   } else {
437     setOperationAction(ISD::FCEIL, MVT::f64, Custom);
438     setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
439     setOperationAction(ISD::FRINT, MVT::f64, Custom);
440     setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
441   }
442 
443   setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
444 
445   setOperationAction(ISD::FSIN, MVT::f32, Custom);
446   setOperationAction(ISD::FCOS, MVT::f32, Custom);
447   setOperationAction(ISD::FDIV, MVT::f32, Custom);
448   setOperationAction(ISD::FDIV, MVT::f64, Custom);
449 
450   if (Subtarget->has16BitInsts()) {
451     setOperationAction(ISD::Constant, MVT::i16, Legal);
452 
453     setOperationAction(ISD::SMIN, MVT::i16, Legal);
454     setOperationAction(ISD::SMAX, MVT::i16, Legal);
455 
456     setOperationAction(ISD::UMIN, MVT::i16, Legal);
457     setOperationAction(ISD::UMAX, MVT::i16, Legal);
458 
459     setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
460     AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
461 
462     setOperationAction(ISD::ROTR, MVT::i16, Promote);
463     setOperationAction(ISD::ROTL, MVT::i16, Promote);
464 
465     setOperationAction(ISD::SDIV, MVT::i16, Promote);
466     setOperationAction(ISD::UDIV, MVT::i16, Promote);
467     setOperationAction(ISD::SREM, MVT::i16, Promote);
468     setOperationAction(ISD::UREM, MVT::i16, Promote);
469 
470     setOperationAction(ISD::BSWAP, MVT::i16, Promote);
471     setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
472 
473     setOperationAction(ISD::CTTZ, MVT::i16, Promote);
474     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
475     setOperationAction(ISD::CTLZ, MVT::i16, Promote);
476     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
477     setOperationAction(ISD::CTPOP, MVT::i16, Promote);
478 
479     setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
480 
481     setOperationAction(ISD::BR_CC, MVT::i16, Expand);
482 
483     setOperationAction(ISD::LOAD, MVT::i16, Custom);
484 
485     setTruncStoreAction(MVT::i64, MVT::i16, Expand);
486 
487     setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
488     AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
489     setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
490     AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
491 
492     setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
493     setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
494     setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
495     setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
496 
497     // F16 - Constant Actions.
498     setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
499 
500     // F16 - Load/Store Actions.
501     setOperationAction(ISD::LOAD, MVT::f16, Promote);
502     AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
503     setOperationAction(ISD::STORE, MVT::f16, Promote);
504     AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
505 
506     // F16 - VOP1 Actions.
507     setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
508     setOperationAction(ISD::FCOS, MVT::f16, Promote);
509     setOperationAction(ISD::FSIN, MVT::f16, Promote);
510     setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
511     setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
512     setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
513     setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
514     setOperationAction(ISD::FROUND, MVT::f16, Custom);
515 
516     // F16 - VOP2 Actions.
517     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
518     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
519 
520     setOperationAction(ISD::FDIV, MVT::f16, Custom);
521 
522     // F16 - VOP3 Actions.
523     setOperationAction(ISD::FMA, MVT::f16, Legal);
524     if (!Subtarget->hasFP16Denormals() && STI.hasMadF16())
525       setOperationAction(ISD::FMAD, MVT::f16, Legal);
526 
527     for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
528       for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
529         switch (Op) {
530         case ISD::LOAD:
531         case ISD::STORE:
532         case ISD::BUILD_VECTOR:
533         case ISD::BITCAST:
534         case ISD::EXTRACT_VECTOR_ELT:
535         case ISD::INSERT_VECTOR_ELT:
536         case ISD::INSERT_SUBVECTOR:
537         case ISD::EXTRACT_SUBVECTOR:
538         case ISD::SCALAR_TO_VECTOR:
539           break;
540         case ISD::CONCAT_VECTORS:
541           setOperationAction(Op, VT, Custom);
542           break;
543         default:
544           setOperationAction(Op, VT, Expand);
545           break;
546         }
547       }
548     }
549 
550     // XXX - Do these do anything? Vector constants turn into build_vector.
551     setOperationAction(ISD::Constant, MVT::v2i16, Legal);
552     setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
553 
554     setOperationAction(ISD::UNDEF, MVT::v2i16, Legal);
555     setOperationAction(ISD::UNDEF, MVT::v2f16, Legal);
556 
557     setOperationAction(ISD::STORE, MVT::v2i16, Promote);
558     AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
559     setOperationAction(ISD::STORE, MVT::v2f16, Promote);
560     AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
561 
562     setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
563     AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
564     setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
565     AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
566 
567     setOperationAction(ISD::AND, MVT::v2i16, Promote);
568     AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
569     setOperationAction(ISD::OR, MVT::v2i16, Promote);
570     AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
571     setOperationAction(ISD::XOR, MVT::v2i16, Promote);
572     AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
573 
574     setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
575     AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
576     setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
577     AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
578 
579     setOperationAction(ISD::STORE, MVT::v4i16, Promote);
580     AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
581     setOperationAction(ISD::STORE, MVT::v4f16, Promote);
582     AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
583 
584     setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
585     setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
586     setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
587     setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
588 
589     setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand);
590     setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand);
591     setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand);
592 
593     if (!Subtarget->hasVOP3PInsts()) {
594       setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
595       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
596     }
597 
598     setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
599     // This isn't really legal, but this avoids the legalizer unrolling it (and
600     // allows matching fneg (fabs x) patterns)
601     setOperationAction(ISD::FABS, MVT::v2f16, Legal);
602 
603     setOperationAction(ISD::FMAXNUM, MVT::f16, Custom);
604     setOperationAction(ISD::FMINNUM, MVT::f16, Custom);
605     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal);
606     setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal);
607 
608     setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom);
609     setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom);
610 
611     setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand);
612     setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand);
613   }
614 
615   if (Subtarget->hasVOP3PInsts()) {
616     setOperationAction(ISD::ADD, MVT::v2i16, Legal);
617     setOperationAction(ISD::SUB, MVT::v2i16, Legal);
618     setOperationAction(ISD::MUL, MVT::v2i16, Legal);
619     setOperationAction(ISD::SHL, MVT::v2i16, Legal);
620     setOperationAction(ISD::SRL, MVT::v2i16, Legal);
621     setOperationAction(ISD::SRA, MVT::v2i16, Legal);
622     setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
623     setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
624     setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
625     setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
626 
627     setOperationAction(ISD::FADD, MVT::v2f16, Legal);
628     setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
629     setOperationAction(ISD::FMA, MVT::v2f16, Legal);
630 
631     setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal);
632     setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal);
633 
634     setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal);
635 
636     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
637     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
638 
639     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f16, Custom);
640     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
641 
642     setOperationAction(ISD::SHL, MVT::v4i16, Custom);
643     setOperationAction(ISD::SRA, MVT::v4i16, Custom);
644     setOperationAction(ISD::SRL, MVT::v4i16, Custom);
645     setOperationAction(ISD::ADD, MVT::v4i16, Custom);
646     setOperationAction(ISD::SUB, MVT::v4i16, Custom);
647     setOperationAction(ISD::MUL, MVT::v4i16, Custom);
648 
649     setOperationAction(ISD::SMIN, MVT::v4i16, Custom);
650     setOperationAction(ISD::SMAX, MVT::v4i16, Custom);
651     setOperationAction(ISD::UMIN, MVT::v4i16, Custom);
652     setOperationAction(ISD::UMAX, MVT::v4i16, Custom);
653 
654     setOperationAction(ISD::FADD, MVT::v4f16, Custom);
655     setOperationAction(ISD::FMUL, MVT::v4f16, Custom);
656     setOperationAction(ISD::FMA, MVT::v4f16, Custom);
657 
658     setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom);
659     setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom);
660 
661     setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom);
662     setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom);
663     setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom);
664 
665     setOperationAction(ISD::FEXP, MVT::v2f16, Custom);
666     setOperationAction(ISD::SELECT, MVT::v4i16, Custom);
667     setOperationAction(ISD::SELECT, MVT::v4f16, Custom);
668   }
669 
670   setOperationAction(ISD::FNEG, MVT::v4f16, Custom);
671   setOperationAction(ISD::FABS, MVT::v4f16, Custom);
672 
673   if (Subtarget->has16BitInsts()) {
674     setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
675     AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
676     setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
677     AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
678   } else {
679     // Legalization hack.
680     setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
681     setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
682 
683     setOperationAction(ISD::FNEG, MVT::v2f16, Custom);
684     setOperationAction(ISD::FABS, MVT::v2f16, Custom);
685   }
686 
687   for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
688     setOperationAction(ISD::SELECT, VT, Custom);
689   }
690 
691   setTargetDAGCombine(ISD::ADD);
692   setTargetDAGCombine(ISD::ADDCARRY);
693   setTargetDAGCombine(ISD::SUB);
694   setTargetDAGCombine(ISD::SUBCARRY);
695   setTargetDAGCombine(ISD::FADD);
696   setTargetDAGCombine(ISD::FSUB);
697   setTargetDAGCombine(ISD::FMINNUM);
698   setTargetDAGCombine(ISD::FMAXNUM);
699   setTargetDAGCombine(ISD::FMINNUM_IEEE);
700   setTargetDAGCombine(ISD::FMAXNUM_IEEE);
701   setTargetDAGCombine(ISD::FMA);
702   setTargetDAGCombine(ISD::SMIN);
703   setTargetDAGCombine(ISD::SMAX);
704   setTargetDAGCombine(ISD::UMIN);
705   setTargetDAGCombine(ISD::UMAX);
706   setTargetDAGCombine(ISD::SETCC);
707   setTargetDAGCombine(ISD::AND);
708   setTargetDAGCombine(ISD::OR);
709   setTargetDAGCombine(ISD::XOR);
710   setTargetDAGCombine(ISD::SINT_TO_FP);
711   setTargetDAGCombine(ISD::UINT_TO_FP);
712   setTargetDAGCombine(ISD::FCANONICALIZE);
713   setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
714   setTargetDAGCombine(ISD::ZERO_EXTEND);
715   setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
716   setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
717   setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
718 
719   // All memory operations. Some folding on the pointer operand is done to help
720   // matching the constant offsets in the addressing modes.
721   setTargetDAGCombine(ISD::LOAD);
722   setTargetDAGCombine(ISD::STORE);
723   setTargetDAGCombine(ISD::ATOMIC_LOAD);
724   setTargetDAGCombine(ISD::ATOMIC_STORE);
725   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
726   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
727   setTargetDAGCombine(ISD::ATOMIC_SWAP);
728   setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
729   setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
730   setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
731   setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
732   setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
733   setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
734   setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
735   setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
736   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
737   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
738   setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD);
739 
740   setSchedulingPreference(Sched::RegPressure);
741 }
742 
743 const GCNSubtarget *SITargetLowering::getSubtarget() const {
744   return Subtarget;
745 }
746 
747 //===----------------------------------------------------------------------===//
748 // TargetLowering queries
749 //===----------------------------------------------------------------------===//
750 
751 // v_mad_mix* support a conversion from f16 to f32.
752 //
753 // There is only one special case when denormals are enabled we don't currently,
754 // where this is OK to use.
755 bool SITargetLowering::isFPExtFoldable(unsigned Opcode,
756                                            EVT DestVT, EVT SrcVT) const {
757   return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
758           (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
759          DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() &&
760          SrcVT.getScalarType() == MVT::f16;
761 }
762 
763 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
764   // SI has some legal vector types, but no legal vector operations. Say no
765   // shuffles are legal in order to prefer scalarizing some vector operations.
766   return false;
767 }
768 
769 MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
770                                                     CallingConv::ID CC,
771                                                     EVT VT) const {
772   if (CC == CallingConv::AMDGPU_KERNEL)
773     return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
774 
775   if (VT.isVector()) {
776     EVT ScalarVT = VT.getScalarType();
777     unsigned Size = ScalarVT.getSizeInBits();
778     if (Size == 32)
779       return ScalarVT.getSimpleVT();
780 
781     if (Size > 32)
782       return MVT::i32;
783 
784     if (Size == 16 && Subtarget->has16BitInsts())
785       return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
786   } else if (VT.getSizeInBits() > 32)
787     return MVT::i32;
788 
789   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
790 }
791 
792 unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
793                                                          CallingConv::ID CC,
794                                                          EVT VT) const {
795   if (CC == CallingConv::AMDGPU_KERNEL)
796     return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
797 
798   if (VT.isVector()) {
799     unsigned NumElts = VT.getVectorNumElements();
800     EVT ScalarVT = VT.getScalarType();
801     unsigned Size = ScalarVT.getSizeInBits();
802 
803     if (Size == 32)
804       return NumElts;
805 
806     if (Size > 32)
807       return NumElts * ((Size + 31) / 32);
808 
809     if (Size == 16 && Subtarget->has16BitInsts())
810       return (NumElts + 1) / 2;
811   } else if (VT.getSizeInBits() > 32)
812     return (VT.getSizeInBits() + 31) / 32;
813 
814   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
815 }
816 
817 unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
818   LLVMContext &Context, CallingConv::ID CC,
819   EVT VT, EVT &IntermediateVT,
820   unsigned &NumIntermediates, MVT &RegisterVT) const {
821   if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
822     unsigned NumElts = VT.getVectorNumElements();
823     EVT ScalarVT = VT.getScalarType();
824     unsigned Size = ScalarVT.getSizeInBits();
825     if (Size == 32) {
826       RegisterVT = ScalarVT.getSimpleVT();
827       IntermediateVT = RegisterVT;
828       NumIntermediates = NumElts;
829       return NumIntermediates;
830     }
831 
832     if (Size > 32) {
833       RegisterVT = MVT::i32;
834       IntermediateVT = RegisterVT;
835       NumIntermediates = NumElts * ((Size + 31) / 32);
836       return NumIntermediates;
837     }
838 
839     // FIXME: We should fix the ABI to be the same on targets without 16-bit
840     // support, but unless we can properly handle 3-vectors, it will be still be
841     // inconsistent.
842     if (Size == 16 && Subtarget->has16BitInsts()) {
843       RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
844       IntermediateVT = RegisterVT;
845       NumIntermediates = (NumElts + 1) / 2;
846       return NumIntermediates;
847     }
848   }
849 
850   return TargetLowering::getVectorTypeBreakdownForCallingConv(
851     Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
852 }
853 
854 static MVT memVTFromAggregate(Type *Ty) {
855   // Only limited forms of aggregate type currently expected.
856   assert(Ty->isStructTy() && "Expected struct type");
857 
858 
859   Type *ElementType = nullptr;
860   unsigned NumElts;
861   if (Ty->getContainedType(0)->isVectorTy()) {
862     VectorType *VecComponent = cast<VectorType>(Ty->getContainedType(0));
863     ElementType = VecComponent->getElementType();
864     NumElts = VecComponent->getNumElements();
865   } else {
866     ElementType = Ty->getContainedType(0);
867     NumElts = 1;
868   }
869 
870   assert((Ty->getContainedType(1) && Ty->getContainedType(1)->isIntegerTy(32)) && "Expected int32 type");
871 
872   // Calculate the size of the memVT type from the aggregate
873   unsigned Pow2Elts = 0;
874   unsigned ElementSize;
875   switch (ElementType->getTypeID()) {
876     default:
877       llvm_unreachable("Unknown type!");
878     case Type::IntegerTyID:
879       ElementSize = cast<IntegerType>(ElementType)->getBitWidth();
880       break;
881     case Type::HalfTyID:
882       ElementSize = 16;
883       break;
884     case Type::FloatTyID:
885       ElementSize = 32;
886       break;
887   }
888   unsigned AdditionalElts = ElementSize == 16 ? 2 : 1;
889   Pow2Elts = 1 << Log2_32_Ceil(NumElts + AdditionalElts);
890 
891   return MVT::getVectorVT(MVT::getVT(ElementType, false),
892                           Pow2Elts);
893 }
894 
895 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
896                                           const CallInst &CI,
897                                           MachineFunction &MF,
898                                           unsigned IntrID) const {
899   if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
900           AMDGPU::lookupRsrcIntrinsic(IntrID)) {
901     AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
902                                                   (Intrinsic::ID)IntrID);
903     if (Attr.hasFnAttribute(Attribute::ReadNone))
904       return false;
905 
906     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
907 
908     if (RsrcIntr->IsImage) {
909       Info.ptrVal = MFI->getImagePSV(
910         *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
911         CI.getArgOperand(RsrcIntr->RsrcArg));
912       Info.align = 0;
913     } else {
914       Info.ptrVal = MFI->getBufferPSV(
915         *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
916         CI.getArgOperand(RsrcIntr->RsrcArg));
917     }
918 
919     Info.flags = MachineMemOperand::MODereferenceable;
920     if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
921       Info.opc = ISD::INTRINSIC_W_CHAIN;
922       Info.memVT = MVT::getVT(CI.getType(), true);
923       if (Info.memVT == MVT::Other) {
924         // Some intrinsics return an aggregate type - special case to work out
925         // the correct memVT
926         Info.memVT = memVTFromAggregate(CI.getType());
927       }
928       Info.flags |= MachineMemOperand::MOLoad;
929     } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
930       Info.opc = ISD::INTRINSIC_VOID;
931       Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
932       Info.flags |= MachineMemOperand::MOStore;
933     } else {
934       // Atomic
935       Info.opc = ISD::INTRINSIC_W_CHAIN;
936       Info.memVT = MVT::getVT(CI.getType());
937       Info.flags = MachineMemOperand::MOLoad |
938                    MachineMemOperand::MOStore |
939                    MachineMemOperand::MODereferenceable;
940 
941       // XXX - Should this be volatile without known ordering?
942       Info.flags |= MachineMemOperand::MOVolatile;
943     }
944     return true;
945   }
946 
947   switch (IntrID) {
948   case Intrinsic::amdgcn_atomic_inc:
949   case Intrinsic::amdgcn_atomic_dec:
950   case Intrinsic::amdgcn_ds_ordered_add:
951   case Intrinsic::amdgcn_ds_ordered_swap:
952   case Intrinsic::amdgcn_ds_fadd:
953   case Intrinsic::amdgcn_ds_fmin:
954   case Intrinsic::amdgcn_ds_fmax: {
955     Info.opc = ISD::INTRINSIC_W_CHAIN;
956     Info.memVT = MVT::getVT(CI.getType());
957     Info.ptrVal = CI.getOperand(0);
958     Info.align = 0;
959     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
960 
961     const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4));
962     if (!Vol->isZero())
963       Info.flags |= MachineMemOperand::MOVolatile;
964 
965     return true;
966   }
967   case Intrinsic::amdgcn_buffer_atomic_fadd: {
968     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
969 
970     Info.opc = ISD::INTRINSIC_VOID;
971     Info.memVT = MVT::getVT(CI.getOperand(0)->getType());
972     Info.ptrVal = MFI->getBufferPSV(
973       *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
974       CI.getArgOperand(1));
975     Info.align = 0;
976     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
977 
978     const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
979     if (!Vol || !Vol->isZero())
980       Info.flags |= MachineMemOperand::MOVolatile;
981 
982     return true;
983   }
984   case Intrinsic::amdgcn_global_atomic_fadd: {
985     Info.opc = ISD::INTRINSIC_VOID;
986     Info.memVT = MVT::getVT(CI.getOperand(0)->getType()
987                             ->getPointerElementType());
988     Info.ptrVal = CI.getOperand(0);
989     Info.align = 0;
990     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
991 
992     return true;
993   }
994   case Intrinsic::amdgcn_ds_append:
995   case Intrinsic::amdgcn_ds_consume: {
996     Info.opc = ISD::INTRINSIC_W_CHAIN;
997     Info.memVT = MVT::getVT(CI.getType());
998     Info.ptrVal = CI.getOperand(0);
999     Info.align = 0;
1000     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1001 
1002     const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1));
1003     if (!Vol->isZero())
1004       Info.flags |= MachineMemOperand::MOVolatile;
1005 
1006     return true;
1007   }
1008   case Intrinsic::amdgcn_ds_gws_init:
1009   case Intrinsic::amdgcn_ds_gws_barrier:
1010   case Intrinsic::amdgcn_ds_gws_sema_v:
1011   case Intrinsic::amdgcn_ds_gws_sema_br:
1012   case Intrinsic::amdgcn_ds_gws_sema_p:
1013   case Intrinsic::amdgcn_ds_gws_sema_release_all: {
1014     Info.opc = ISD::INTRINSIC_VOID;
1015 
1016     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1017     Info.ptrVal =
1018         MFI->getGWSPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo());
1019 
1020     // This is an abstract access, but we need to specify a type and size.
1021     Info.memVT = MVT::i32;
1022     Info.size = 4;
1023     Info.align = 4;
1024 
1025     Info.flags = MachineMemOperand::MOStore;
1026     if (IntrID == Intrinsic::amdgcn_ds_gws_barrier)
1027       Info.flags = MachineMemOperand::MOLoad;
1028     return true;
1029   }
1030   default:
1031     return false;
1032   }
1033 }
1034 
1035 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
1036                                             SmallVectorImpl<Value*> &Ops,
1037                                             Type *&AccessTy) const {
1038   switch (II->getIntrinsicID()) {
1039   case Intrinsic::amdgcn_atomic_inc:
1040   case Intrinsic::amdgcn_atomic_dec:
1041   case Intrinsic::amdgcn_ds_ordered_add:
1042   case Intrinsic::amdgcn_ds_ordered_swap:
1043   case Intrinsic::amdgcn_ds_fadd:
1044   case Intrinsic::amdgcn_ds_fmin:
1045   case Intrinsic::amdgcn_ds_fmax: {
1046     Value *Ptr = II->getArgOperand(0);
1047     AccessTy = II->getType();
1048     Ops.push_back(Ptr);
1049     return true;
1050   }
1051   default:
1052     return false;
1053   }
1054 }
1055 
1056 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
1057   if (!Subtarget->hasFlatInstOffsets()) {
1058     // Flat instructions do not have offsets, and only have the register
1059     // address.
1060     return AM.BaseOffs == 0 && AM.Scale == 0;
1061   }
1062 
1063   // GFX9 added a 13-bit signed offset. When using regular flat instructions,
1064   // the sign bit is ignored and is treated as a 12-bit unsigned offset.
1065 
1066   // GFX10 shrinked signed offset to 12 bits. When using regular flat
1067   // instructions, the sign bit is also ignored and is treated as 11-bit
1068   // unsigned offset.
1069 
1070   if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
1071     return isUInt<11>(AM.BaseOffs) && AM.Scale == 0;
1072 
1073   // Just r + i
1074   return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
1075 }
1076 
1077 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
1078   if (Subtarget->hasFlatGlobalInsts())
1079     return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
1080 
1081   if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
1082       // Assume the we will use FLAT for all global memory accesses
1083       // on VI.
1084       // FIXME: This assumption is currently wrong.  On VI we still use
1085       // MUBUF instructions for the r + i addressing mode.  As currently
1086       // implemented, the MUBUF instructions only work on buffer < 4GB.
1087       // It may be possible to support > 4GB buffers with MUBUF instructions,
1088       // by setting the stride value in the resource descriptor which would
1089       // increase the size limit to (stride * 4GB).  However, this is risky,
1090       // because it has never been validated.
1091     return isLegalFlatAddressingMode(AM);
1092   }
1093 
1094   return isLegalMUBUFAddressingMode(AM);
1095 }
1096 
1097 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
1098   // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
1099   // additionally can do r + r + i with addr64. 32-bit has more addressing
1100   // mode options. Depending on the resource constant, it can also do
1101   // (i64 r0) + (i32 r1) * (i14 i).
1102   //
1103   // Private arrays end up using a scratch buffer most of the time, so also
1104   // assume those use MUBUF instructions. Scratch loads / stores are currently
1105   // implemented as mubuf instructions with offen bit set, so slightly
1106   // different than the normal addr64.
1107   if (!isUInt<12>(AM.BaseOffs))
1108     return false;
1109 
1110   // FIXME: Since we can split immediate into soffset and immediate offset,
1111   // would it make sense to allow any immediate?
1112 
1113   switch (AM.Scale) {
1114   case 0: // r + i or just i, depending on HasBaseReg.
1115     return true;
1116   case 1:
1117     return true; // We have r + r or r + i.
1118   case 2:
1119     if (AM.HasBaseReg) {
1120       // Reject 2 * r + r.
1121       return false;
1122     }
1123 
1124     // Allow 2 * r as r + r
1125     // Or  2 * r + i is allowed as r + r + i.
1126     return true;
1127   default: // Don't allow n * r
1128     return false;
1129   }
1130 }
1131 
1132 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
1133                                              const AddrMode &AM, Type *Ty,
1134                                              unsigned AS, Instruction *I) const {
1135   // No global is ever allowed as a base.
1136   if (AM.BaseGV)
1137     return false;
1138 
1139   if (AS == AMDGPUAS::GLOBAL_ADDRESS)
1140     return isLegalGlobalAddressingMode(AM);
1141 
1142   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
1143       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
1144       AS == AMDGPUAS::BUFFER_FAT_POINTER) {
1145     // If the offset isn't a multiple of 4, it probably isn't going to be
1146     // correctly aligned.
1147     // FIXME: Can we get the real alignment here?
1148     if (AM.BaseOffs % 4 != 0)
1149       return isLegalMUBUFAddressingMode(AM);
1150 
1151     // There are no SMRD extloads, so if we have to do a small type access we
1152     // will use a MUBUF load.
1153     // FIXME?: We also need to do this if unaligned, but we don't know the
1154     // alignment here.
1155     if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
1156       return isLegalGlobalAddressingMode(AM);
1157 
1158     if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
1159       // SMRD instructions have an 8-bit, dword offset on SI.
1160       if (!isUInt<8>(AM.BaseOffs / 4))
1161         return false;
1162     } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
1163       // On CI+, this can also be a 32-bit literal constant offset. If it fits
1164       // in 8-bits, it can use a smaller encoding.
1165       if (!isUInt<32>(AM.BaseOffs / 4))
1166         return false;
1167     } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
1168       // On VI, these use the SMEM format and the offset is 20-bit in bytes.
1169       if (!isUInt<20>(AM.BaseOffs))
1170         return false;
1171     } else
1172       llvm_unreachable("unhandled generation");
1173 
1174     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1175       return true;
1176 
1177     if (AM.Scale == 1 && AM.HasBaseReg)
1178       return true;
1179 
1180     return false;
1181 
1182   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1183     return isLegalMUBUFAddressingMode(AM);
1184   } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
1185              AS == AMDGPUAS::REGION_ADDRESS) {
1186     // Basic, single offset DS instructions allow a 16-bit unsigned immediate
1187     // field.
1188     // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
1189     // an 8-bit dword offset but we don't know the alignment here.
1190     if (!isUInt<16>(AM.BaseOffs))
1191       return false;
1192 
1193     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1194       return true;
1195 
1196     if (AM.Scale == 1 && AM.HasBaseReg)
1197       return true;
1198 
1199     return false;
1200   } else if (AS == AMDGPUAS::FLAT_ADDRESS ||
1201              AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) {
1202     // For an unknown address space, this usually means that this is for some
1203     // reason being used for pure arithmetic, and not based on some addressing
1204     // computation. We don't have instructions that compute pointers with any
1205     // addressing modes, so treat them as having no offset like flat
1206     // instructions.
1207     return isLegalFlatAddressingMode(AM);
1208   } else {
1209     llvm_unreachable("unhandled address space");
1210   }
1211 }
1212 
1213 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1214                                         const SelectionDAG &DAG) const {
1215   if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
1216     return (MemVT.getSizeInBits() <= 4 * 32);
1217   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1218     unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1219     return (MemVT.getSizeInBits() <= MaxPrivateBits);
1220   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
1221     return (MemVT.getSizeInBits() <= 2 * 32);
1222   }
1223   return true;
1224 }
1225 
1226 bool SITargetLowering::allowsMisalignedMemoryAccesses(
1227     EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
1228     bool *IsFast) const {
1229   if (IsFast)
1230     *IsFast = false;
1231 
1232   // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
1233   // which isn't a simple VT.
1234   // Until MVT is extended to handle this, simply check for the size and
1235   // rely on the condition below: allow accesses if the size is a multiple of 4.
1236   if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
1237                            VT.getStoreSize() > 16)) {
1238     return false;
1239   }
1240 
1241   if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1242       AddrSpace == AMDGPUAS::REGION_ADDRESS) {
1243     // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1244     // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1245     // with adjacent offsets.
1246     bool AlignedBy4 = (Align % 4 == 0);
1247     if (IsFast)
1248       *IsFast = AlignedBy4;
1249 
1250     return AlignedBy4;
1251   }
1252 
1253   // FIXME: We have to be conservative here and assume that flat operations
1254   // will access scratch.  If we had access to the IR function, then we
1255   // could determine if any private memory was used in the function.
1256   if (!Subtarget->hasUnalignedScratchAccess() &&
1257       (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1258        AddrSpace == AMDGPUAS::FLAT_ADDRESS)) {
1259     bool AlignedBy4 = Align >= 4;
1260     if (IsFast)
1261       *IsFast = AlignedBy4;
1262 
1263     return AlignedBy4;
1264   }
1265 
1266   if (Subtarget->hasUnalignedBufferAccess()) {
1267     // If we have an uniform constant load, it still requires using a slow
1268     // buffer instruction if unaligned.
1269     if (IsFast) {
1270       *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
1271                  AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
1272         (Align % 4 == 0) : true;
1273     }
1274 
1275     return true;
1276   }
1277 
1278   // Smaller than dword value must be aligned.
1279   if (VT.bitsLT(MVT::i32))
1280     return false;
1281 
1282   // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1283   // byte-address are ignored, thus forcing Dword alignment.
1284   // This applies to private, global, and constant memory.
1285   if (IsFast)
1286     *IsFast = true;
1287 
1288   return VT.bitsGT(MVT::i32) && Align % 4 == 0;
1289 }
1290 
1291 EVT SITargetLowering::getOptimalMemOpType(
1292     uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
1293     bool ZeroMemset, bool MemcpyStrSrc,
1294     const AttributeList &FuncAttributes) const {
1295   // FIXME: Should account for address space here.
1296 
1297   // The default fallback uses the private pointer size as a guess for a type to
1298   // use. Make sure we switch these to 64-bit accesses.
1299 
1300   if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
1301     return MVT::v4i32;
1302 
1303   if (Size >= 8 && DstAlign >= 4)
1304     return MVT::v2i32;
1305 
1306   // Use the default.
1307   return MVT::Other;
1308 }
1309 
1310 static bool isFlatGlobalAddrSpace(unsigned AS) {
1311   return AS == AMDGPUAS::GLOBAL_ADDRESS ||
1312          AS == AMDGPUAS::FLAT_ADDRESS ||
1313          AS == AMDGPUAS::CONSTANT_ADDRESS ||
1314          AS > AMDGPUAS::MAX_AMDGPU_ADDRESS;
1315 }
1316 
1317 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1318                                            unsigned DestAS) const {
1319   return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
1320 }
1321 
1322 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
1323   const MemSDNode *MemNode = cast<MemSDNode>(N);
1324   const Value *Ptr = MemNode->getMemOperand()->getValue();
1325   const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
1326   return I && I->getMetadata("amdgpu.noclobber");
1327 }
1328 
1329 bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS,
1330                                            unsigned DestAS) const {
1331   // Flat -> private/local is a simple truncate.
1332   // Flat -> global is no-op
1333   if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
1334     return true;
1335 
1336   return isNoopAddrSpaceCast(SrcAS, DestAS);
1337 }
1338 
1339 bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1340   const MemSDNode *MemNode = cast<MemSDNode>(N);
1341 
1342   return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
1343 }
1344 
1345 TargetLoweringBase::LegalizeTypeAction
1346 SITargetLowering::getPreferredVectorAction(MVT VT) const {
1347   if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
1348     return TypeSplitVector;
1349 
1350   return TargetLoweringBase::getPreferredVectorAction(VT);
1351 }
1352 
1353 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1354                                                          Type *Ty) const {
1355   // FIXME: Could be smarter if called for vector constants.
1356   return true;
1357 }
1358 
1359 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
1360   if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1361     switch (Op) {
1362     case ISD::LOAD:
1363     case ISD::STORE:
1364 
1365     // These operations are done with 32-bit instructions anyway.
1366     case ISD::AND:
1367     case ISD::OR:
1368     case ISD::XOR:
1369     case ISD::SELECT:
1370       // TODO: Extensions?
1371       return true;
1372     default:
1373       return false;
1374     }
1375   }
1376 
1377   // SimplifySetCC uses this function to determine whether or not it should
1378   // create setcc with i1 operands.  We don't have instructions for i1 setcc.
1379   if (VT == MVT::i1 && Op == ISD::SETCC)
1380     return false;
1381 
1382   return TargetLowering::isTypeDesirableForOp(Op, VT);
1383 }
1384 
1385 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1386                                                    const SDLoc &SL,
1387                                                    SDValue Chain,
1388                                                    uint64_t Offset) const {
1389   const DataLayout &DL = DAG.getDataLayout();
1390   MachineFunction &MF = DAG.getMachineFunction();
1391   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1392 
1393   const ArgDescriptor *InputPtrReg;
1394   const TargetRegisterClass *RC;
1395 
1396   std::tie(InputPtrReg, RC)
1397     = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1398 
1399   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1400   MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
1401   SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
1402     MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1403 
1404   return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
1405 }
1406 
1407 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1408                                             const SDLoc &SL) const {
1409   uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1410                                                FIRST_IMPLICIT);
1411   return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1412 }
1413 
1414 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1415                                          const SDLoc &SL, SDValue Val,
1416                                          bool Signed,
1417                                          const ISD::InputArg *Arg) const {
1418   // First, if it is a widened vector, narrow it.
1419   if (VT.isVector() &&
1420       VT.getVectorNumElements() != MemVT.getVectorNumElements()) {
1421     EVT NarrowedVT =
1422         EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(),
1423                          VT.getVectorNumElements());
1424     Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val,
1425                       DAG.getConstant(0, SL, MVT::i32));
1426   }
1427 
1428   // Then convert the vector elements or scalar value.
1429   if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1430       VT.bitsLT(MemVT)) {
1431     unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1432     Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1433   }
1434 
1435   if (MemVT.isFloatingPoint())
1436     Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
1437   else if (Signed)
1438     Val = DAG.getSExtOrTrunc(Val, SL, VT);
1439   else
1440     Val = DAG.getZExtOrTrunc(Val, SL, VT);
1441 
1442   return Val;
1443 }
1444 
1445 SDValue SITargetLowering::lowerKernargMemParameter(
1446   SelectionDAG &DAG, EVT VT, EVT MemVT,
1447   const SDLoc &SL, SDValue Chain,
1448   uint64_t Offset, unsigned Align, bool Signed,
1449   const ISD::InputArg *Arg) const {
1450   Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
1451   PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
1452   MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1453 
1454   // Try to avoid using an extload by loading earlier than the argument address,
1455   // and extracting the relevant bits. The load should hopefully be merged with
1456   // the previous argument.
1457   if (MemVT.getStoreSize() < 4 && Align < 4) {
1458     // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
1459     int64_t AlignDownOffset = alignDown(Offset, 4);
1460     int64_t OffsetDiff = Offset - AlignDownOffset;
1461 
1462     EVT IntVT = MemVT.changeTypeToInteger();
1463 
1464     // TODO: If we passed in the base kernel offset we could have a better
1465     // alignment than 4, but we don't really need it.
1466     SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1467     SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1468                                MachineMemOperand::MODereferenceable |
1469                                MachineMemOperand::MOInvariant);
1470 
1471     SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1472     SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1473 
1474     SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1475     ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1476     ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1477 
1478 
1479     return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1480   }
1481 
1482   SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1483   SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
1484                              MachineMemOperand::MODereferenceable |
1485                              MachineMemOperand::MOInvariant);
1486 
1487   SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
1488   return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
1489 }
1490 
1491 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1492                                               const SDLoc &SL, SDValue Chain,
1493                                               const ISD::InputArg &Arg) const {
1494   MachineFunction &MF = DAG.getMachineFunction();
1495   MachineFrameInfo &MFI = MF.getFrameInfo();
1496 
1497   if (Arg.Flags.isByVal()) {
1498     unsigned Size = Arg.Flags.getByValSize();
1499     int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1500     return DAG.getFrameIndex(FrameIdx, MVT::i32);
1501   }
1502 
1503   unsigned ArgOffset = VA.getLocMemOffset();
1504   unsigned ArgSize = VA.getValVT().getStoreSize();
1505 
1506   int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1507 
1508   // Create load nodes to retrieve arguments from the stack.
1509   SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1510   SDValue ArgValue;
1511 
1512   // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1513   ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1514   MVT MemVT = VA.getValVT();
1515 
1516   switch (VA.getLocInfo()) {
1517   default:
1518     break;
1519   case CCValAssign::BCvt:
1520     MemVT = VA.getLocVT();
1521     break;
1522   case CCValAssign::SExt:
1523     ExtType = ISD::SEXTLOAD;
1524     break;
1525   case CCValAssign::ZExt:
1526     ExtType = ISD::ZEXTLOAD;
1527     break;
1528   case CCValAssign::AExt:
1529     ExtType = ISD::EXTLOAD;
1530     break;
1531   }
1532 
1533   ArgValue = DAG.getExtLoad(
1534     ExtType, SL, VA.getLocVT(), Chain, FIN,
1535     MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1536     MemVT);
1537   return ArgValue;
1538 }
1539 
1540 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1541   const SIMachineFunctionInfo &MFI,
1542   EVT VT,
1543   AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1544   const ArgDescriptor *Reg;
1545   const TargetRegisterClass *RC;
1546 
1547   std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1548   return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1549 }
1550 
1551 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1552                                    CallingConv::ID CallConv,
1553                                    ArrayRef<ISD::InputArg> Ins,
1554                                    BitVector &Skipped,
1555                                    FunctionType *FType,
1556                                    SIMachineFunctionInfo *Info) {
1557   for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1558     const ISD::InputArg *Arg = &Ins[I];
1559 
1560     assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1561            "vector type argument should have been split");
1562 
1563     // First check if it's a PS input addr.
1564     if (CallConv == CallingConv::AMDGPU_PS &&
1565         !Arg->Flags.isInReg() && PSInputNum <= 15) {
1566       bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1567 
1568       // Inconveniently only the first part of the split is marked as isSplit,
1569       // so skip to the end. We only want to increment PSInputNum once for the
1570       // entire split argument.
1571       if (Arg->Flags.isSplit()) {
1572         while (!Arg->Flags.isSplitEnd()) {
1573           assert((!Arg->VT.isVector() ||
1574                   Arg->VT.getScalarSizeInBits() == 16) &&
1575                  "unexpected vector split in ps argument type");
1576           if (!SkipArg)
1577             Splits.push_back(*Arg);
1578           Arg = &Ins[++I];
1579         }
1580       }
1581 
1582       if (SkipArg) {
1583         // We can safely skip PS inputs.
1584         Skipped.set(Arg->getOrigArgIndex());
1585         ++PSInputNum;
1586         continue;
1587       }
1588 
1589       Info->markPSInputAllocated(PSInputNum);
1590       if (Arg->Used)
1591         Info->markPSInputEnabled(PSInputNum);
1592 
1593       ++PSInputNum;
1594     }
1595 
1596     Splits.push_back(*Arg);
1597   }
1598 }
1599 
1600 // Allocate special inputs passed in VGPRs.
1601 void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1602                                                       MachineFunction &MF,
1603                                                       const SIRegisterInfo &TRI,
1604                                                       SIMachineFunctionInfo &Info) const {
1605   const LLT S32 = LLT::scalar(32);
1606   MachineRegisterInfo &MRI = MF.getRegInfo();
1607 
1608   if (Info.hasWorkItemIDX()) {
1609     Register Reg = AMDGPU::VGPR0;
1610     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1611 
1612     CCInfo.AllocateReg(Reg);
1613     Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1614   }
1615 
1616   if (Info.hasWorkItemIDY()) {
1617     Register Reg = AMDGPU::VGPR1;
1618     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1619 
1620     CCInfo.AllocateReg(Reg);
1621     Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1622   }
1623 
1624   if (Info.hasWorkItemIDZ()) {
1625     Register Reg = AMDGPU::VGPR2;
1626     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1627 
1628     CCInfo.AllocateReg(Reg);
1629     Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1630   }
1631 }
1632 
1633 // Try to allocate a VGPR at the end of the argument list, or if no argument
1634 // VGPRs are left allocating a stack slot.
1635 // If \p Mask is is given it indicates bitfield position in the register.
1636 // If \p Arg is given use it with new ]p Mask instead of allocating new.
1637 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u,
1638                                          ArgDescriptor Arg = ArgDescriptor()) {
1639   if (Arg.isSet())
1640     return ArgDescriptor::createArg(Arg, Mask);
1641 
1642   ArrayRef<MCPhysReg> ArgVGPRs
1643     = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1644   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1645   if (RegIdx == ArgVGPRs.size()) {
1646     // Spill to stack required.
1647     int64_t Offset = CCInfo.AllocateStack(4, 4);
1648 
1649     return ArgDescriptor::createStack(Offset, Mask);
1650   }
1651 
1652   unsigned Reg = ArgVGPRs[RegIdx];
1653   Reg = CCInfo.AllocateReg(Reg);
1654   assert(Reg != AMDGPU::NoRegister);
1655 
1656   MachineFunction &MF = CCInfo.getMachineFunction();
1657   MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1658   return ArgDescriptor::createRegister(Reg, Mask);
1659 }
1660 
1661 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1662                                              const TargetRegisterClass *RC,
1663                                              unsigned NumArgRegs) {
1664   ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1665   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1666   if (RegIdx == ArgSGPRs.size())
1667     report_fatal_error("ran out of SGPRs for arguments");
1668 
1669   unsigned Reg = ArgSGPRs[RegIdx];
1670   Reg = CCInfo.AllocateReg(Reg);
1671   assert(Reg != AMDGPU::NoRegister);
1672 
1673   MachineFunction &MF = CCInfo.getMachineFunction();
1674   MF.addLiveIn(Reg, RC);
1675   return ArgDescriptor::createRegister(Reg);
1676 }
1677 
1678 static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1679   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1680 }
1681 
1682 static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1683   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1684 }
1685 
1686 void SITargetLowering::allocateSpecialInputVGPRs(CCState &CCInfo,
1687                                                  MachineFunction &MF,
1688                                                  const SIRegisterInfo &TRI,
1689                                                  SIMachineFunctionInfo &Info) const {
1690   const unsigned Mask = 0x3ff;
1691   ArgDescriptor Arg;
1692 
1693   if (Info.hasWorkItemIDX()) {
1694     Arg = allocateVGPR32Input(CCInfo, Mask);
1695     Info.setWorkItemIDX(Arg);
1696   }
1697 
1698   if (Info.hasWorkItemIDY()) {
1699     Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg);
1700     Info.setWorkItemIDY(Arg);
1701   }
1702 
1703   if (Info.hasWorkItemIDZ())
1704     Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg));
1705 }
1706 
1707 void SITargetLowering::allocateSpecialInputSGPRs(
1708   CCState &CCInfo,
1709   MachineFunction &MF,
1710   const SIRegisterInfo &TRI,
1711   SIMachineFunctionInfo &Info) const {
1712   auto &ArgInfo = Info.getArgInfo();
1713 
1714   // TODO: Unify handling with private memory pointers.
1715 
1716   if (Info.hasDispatchPtr())
1717     ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1718 
1719   if (Info.hasQueuePtr())
1720     ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1721 
1722   if (Info.hasKernargSegmentPtr())
1723     ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1724 
1725   if (Info.hasDispatchID())
1726     ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1727 
1728   // flat_scratch_init is not applicable for non-kernel functions.
1729 
1730   if (Info.hasWorkGroupIDX())
1731     ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1732 
1733   if (Info.hasWorkGroupIDY())
1734     ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1735 
1736   if (Info.hasWorkGroupIDZ())
1737     ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
1738 
1739   if (Info.hasImplicitArgPtr())
1740     ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
1741 }
1742 
1743 // Allocate special inputs passed in user SGPRs.
1744 void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo,
1745                                             MachineFunction &MF,
1746                                             const SIRegisterInfo &TRI,
1747                                             SIMachineFunctionInfo &Info) const {
1748   if (Info.hasImplicitBufferPtr()) {
1749     unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1750     MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1751     CCInfo.AllocateReg(ImplicitBufferPtrReg);
1752   }
1753 
1754   // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1755   if (Info.hasPrivateSegmentBuffer()) {
1756     unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1757     MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1758     CCInfo.AllocateReg(PrivateSegmentBufferReg);
1759   }
1760 
1761   if (Info.hasDispatchPtr()) {
1762     unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1763     MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1764     CCInfo.AllocateReg(DispatchPtrReg);
1765   }
1766 
1767   if (Info.hasQueuePtr()) {
1768     unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1769     MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1770     CCInfo.AllocateReg(QueuePtrReg);
1771   }
1772 
1773   if (Info.hasKernargSegmentPtr()) {
1774     MachineRegisterInfo &MRI = MF.getRegInfo();
1775     Register InputPtrReg = Info.addKernargSegmentPtr(TRI);
1776     CCInfo.AllocateReg(InputPtrReg);
1777 
1778     Register VReg = MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1779     MRI.setType(VReg, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
1780   }
1781 
1782   if (Info.hasDispatchID()) {
1783     unsigned DispatchIDReg = Info.addDispatchID(TRI);
1784     MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1785     CCInfo.AllocateReg(DispatchIDReg);
1786   }
1787 
1788   if (Info.hasFlatScratchInit()) {
1789     unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1790     MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1791     CCInfo.AllocateReg(FlatScratchInitReg);
1792   }
1793 
1794   // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1795   // these from the dispatch pointer.
1796 }
1797 
1798 // Allocate special input registers that are initialized per-wave.
1799 void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo,
1800                                            MachineFunction &MF,
1801                                            SIMachineFunctionInfo &Info,
1802                                            CallingConv::ID CallConv,
1803                                            bool IsShader) const {
1804   if (Info.hasWorkGroupIDX()) {
1805     unsigned Reg = Info.addWorkGroupIDX();
1806     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1807     CCInfo.AllocateReg(Reg);
1808   }
1809 
1810   if (Info.hasWorkGroupIDY()) {
1811     unsigned Reg = Info.addWorkGroupIDY();
1812     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1813     CCInfo.AllocateReg(Reg);
1814   }
1815 
1816   if (Info.hasWorkGroupIDZ()) {
1817     unsigned Reg = Info.addWorkGroupIDZ();
1818     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1819     CCInfo.AllocateReg(Reg);
1820   }
1821 
1822   if (Info.hasWorkGroupInfo()) {
1823     unsigned Reg = Info.addWorkGroupInfo();
1824     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1825     CCInfo.AllocateReg(Reg);
1826   }
1827 
1828   if (Info.hasPrivateSegmentWaveByteOffset()) {
1829     // Scratch wave offset passed in system SGPR.
1830     unsigned PrivateSegmentWaveByteOffsetReg;
1831 
1832     if (IsShader) {
1833       PrivateSegmentWaveByteOffsetReg =
1834         Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1835 
1836       // This is true if the scratch wave byte offset doesn't have a fixed
1837       // location.
1838       if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1839         PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1840         Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1841       }
1842     } else
1843       PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1844 
1845     MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1846     CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1847   }
1848 }
1849 
1850 static void reservePrivateMemoryRegs(const TargetMachine &TM,
1851                                      MachineFunction &MF,
1852                                      const SIRegisterInfo &TRI,
1853                                      SIMachineFunctionInfo &Info) {
1854   // Now that we've figured out where the scratch register inputs are, see if
1855   // should reserve the arguments and use them directly.
1856   MachineFrameInfo &MFI = MF.getFrameInfo();
1857   bool HasStackObjects = MFI.hasStackObjects();
1858   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1859 
1860   // Record that we know we have non-spill stack objects so we don't need to
1861   // check all stack objects later.
1862   if (HasStackObjects)
1863     Info.setHasNonSpillStackObjects(true);
1864 
1865   // Everything live out of a block is spilled with fast regalloc, so it's
1866   // almost certain that spilling will be required.
1867   if (TM.getOptLevel() == CodeGenOpt::None)
1868     HasStackObjects = true;
1869 
1870   // For now assume stack access is needed in any callee functions, so we need
1871   // the scratch registers to pass in.
1872   bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1873 
1874   if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) {
1875     // If we have stack objects, we unquestionably need the private buffer
1876     // resource. For the Code Object V2 ABI, this will be the first 4 user
1877     // SGPR inputs. We can reserve those and use them directly.
1878 
1879     unsigned PrivateSegmentBufferReg =
1880         Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
1881     Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1882   } else {
1883     unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1884     // We tentatively reserve the last registers (skipping the last registers
1885     // which may contain VCC, FLAT_SCR, and XNACK). After register allocation,
1886     // we'll replace these with the ones immediately after those which were
1887     // really allocated. In the prologue copies will be inserted from the
1888     // argument to these reserved registers.
1889 
1890     // Without HSA, relocations are used for the scratch pointer and the
1891     // buffer resource setup is always inserted in the prologue. Scratch wave
1892     // offset is still in an input SGPR.
1893     Info.setScratchRSrcReg(ReservedBufferReg);
1894   }
1895 
1896   // hasFP should be accurate for kernels even before the frame is finalized.
1897   if (ST.getFrameLowering()->hasFP(MF)) {
1898     MachineRegisterInfo &MRI = MF.getRegInfo();
1899 
1900     // Try to use s32 as the SP, but move it if it would interfere with input
1901     // arguments. This won't work with calls though.
1902     //
1903     // FIXME: Move SP to avoid any possible inputs, or find a way to spill input
1904     // registers.
1905     if (!MRI.isLiveIn(AMDGPU::SGPR32)) {
1906       Info.setStackPtrOffsetReg(AMDGPU::SGPR32);
1907     } else {
1908       assert(AMDGPU::isShader(MF.getFunction().getCallingConv()));
1909 
1910       if (MFI.hasCalls())
1911         report_fatal_error("call in graphics shader with too many input SGPRs");
1912 
1913       for (unsigned Reg : AMDGPU::SGPR_32RegClass) {
1914         if (!MRI.isLiveIn(Reg)) {
1915           Info.setStackPtrOffsetReg(Reg);
1916           break;
1917         }
1918       }
1919 
1920       if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG)
1921         report_fatal_error("failed to find register for SP");
1922     }
1923 
1924     if (MFI.hasCalls()) {
1925       Info.setScratchWaveOffsetReg(AMDGPU::SGPR33);
1926       Info.setFrameOffsetReg(AMDGPU::SGPR33);
1927     } else {
1928       unsigned ReservedOffsetReg =
1929         TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1930       Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1931       Info.setFrameOffsetReg(ReservedOffsetReg);
1932     }
1933   } else if (RequiresStackAccess) {
1934     assert(!MFI.hasCalls());
1935     // We know there are accesses and they will be done relative to SP, so just
1936     // pin it to the input.
1937     //
1938     // FIXME: Should not do this if inline asm is reading/writing these
1939     // registers.
1940     unsigned PreloadedSP = Info.getPreloadedReg(
1941         AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1942 
1943     Info.setStackPtrOffsetReg(PreloadedSP);
1944     Info.setScratchWaveOffsetReg(PreloadedSP);
1945     Info.setFrameOffsetReg(PreloadedSP);
1946   } else {
1947     assert(!MFI.hasCalls());
1948 
1949     // There may not be stack access at all. There may still be spills, or
1950     // access of a constant pointer (in which cases an extra copy will be
1951     // emitted in the prolog).
1952     unsigned ReservedOffsetReg
1953       = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1954     Info.setStackPtrOffsetReg(ReservedOffsetReg);
1955     Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1956     Info.setFrameOffsetReg(ReservedOffsetReg);
1957   }
1958 }
1959 
1960 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1961   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1962   return !Info->isEntryFunction();
1963 }
1964 
1965 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1966 
1967 }
1968 
1969 void SITargetLowering::insertCopiesSplitCSR(
1970   MachineBasicBlock *Entry,
1971   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1972   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1973 
1974   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1975   if (!IStart)
1976     return;
1977 
1978   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1979   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1980   MachineBasicBlock::iterator MBBI = Entry->begin();
1981   for (const MCPhysReg *I = IStart; *I; ++I) {
1982     const TargetRegisterClass *RC = nullptr;
1983     if (AMDGPU::SReg_64RegClass.contains(*I))
1984       RC = &AMDGPU::SGPR_64RegClass;
1985     else if (AMDGPU::SReg_32RegClass.contains(*I))
1986       RC = &AMDGPU::SGPR_32RegClass;
1987     else
1988       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1989 
1990     unsigned NewVR = MRI->createVirtualRegister(RC);
1991     // Create copy from CSR to a virtual register.
1992     Entry->addLiveIn(*I);
1993     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1994       .addReg(*I);
1995 
1996     // Insert the copy-back instructions right before the terminator.
1997     for (auto *Exit : Exits)
1998       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1999               TII->get(TargetOpcode::COPY), *I)
2000         .addReg(NewVR);
2001   }
2002 }
2003 
2004 SDValue SITargetLowering::LowerFormalArguments(
2005     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
2006     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2007     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2008   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2009 
2010   MachineFunction &MF = DAG.getMachineFunction();
2011   const Function &Fn = MF.getFunction();
2012   FunctionType *FType = MF.getFunction().getFunctionType();
2013   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2014 
2015   if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
2016     DiagnosticInfoUnsupported NoGraphicsHSA(
2017         Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
2018     DAG.getContext()->diagnose(NoGraphicsHSA);
2019     return DAG.getEntryNode();
2020   }
2021 
2022   SmallVector<ISD::InputArg, 16> Splits;
2023   SmallVector<CCValAssign, 16> ArgLocs;
2024   BitVector Skipped(Ins.size());
2025   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
2026                  *DAG.getContext());
2027 
2028   bool IsShader = AMDGPU::isShader(CallConv);
2029   bool IsKernel = AMDGPU::isKernel(CallConv);
2030   bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
2031 
2032   if (IsShader) {
2033     processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
2034 
2035     // At least one interpolation mode must be enabled or else the GPU will
2036     // hang.
2037     //
2038     // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
2039     // set PSInputAddr, the user wants to enable some bits after the compilation
2040     // based on run-time states. Since we can't know what the final PSInputEna
2041     // will look like, so we shouldn't do anything here and the user should take
2042     // responsibility for the correct programming.
2043     //
2044     // Otherwise, the following restrictions apply:
2045     // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
2046     // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
2047     //   enabled too.
2048     if (CallConv == CallingConv::AMDGPU_PS) {
2049       if ((Info->getPSInputAddr() & 0x7F) == 0 ||
2050            ((Info->getPSInputAddr() & 0xF) == 0 &&
2051             Info->isPSInputAllocated(11))) {
2052         CCInfo.AllocateReg(AMDGPU::VGPR0);
2053         CCInfo.AllocateReg(AMDGPU::VGPR1);
2054         Info->markPSInputAllocated(0);
2055         Info->markPSInputEnabled(0);
2056       }
2057       if (Subtarget->isAmdPalOS()) {
2058         // For isAmdPalOS, the user does not enable some bits after compilation
2059         // based on run-time states; the register values being generated here are
2060         // the final ones set in hardware. Therefore we need to apply the
2061         // workaround to PSInputAddr and PSInputEnable together.  (The case where
2062         // a bit is set in PSInputAddr but not PSInputEnable is where the
2063         // frontend set up an input arg for a particular interpolation mode, but
2064         // nothing uses that input arg. Really we should have an earlier pass
2065         // that removes such an arg.)
2066         unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
2067         if ((PsInputBits & 0x7F) == 0 ||
2068             ((PsInputBits & 0xF) == 0 &&
2069              (PsInputBits >> 11 & 1)))
2070           Info->markPSInputEnabled(
2071               countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
2072       }
2073     }
2074 
2075     assert(!Info->hasDispatchPtr() &&
2076            !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
2077            !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
2078            !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
2079            !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
2080            !Info->hasWorkItemIDZ());
2081   } else if (IsKernel) {
2082     assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
2083   } else {
2084     Splits.append(Ins.begin(), Ins.end());
2085   }
2086 
2087   if (IsEntryFunc) {
2088     allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
2089     allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
2090   }
2091 
2092   if (IsKernel) {
2093     analyzeFormalArgumentsCompute(CCInfo, Ins);
2094   } else {
2095     CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
2096     CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
2097   }
2098 
2099   SmallVector<SDValue, 16> Chains;
2100 
2101   // FIXME: This is the minimum kernel argument alignment. We should improve
2102   // this to the maximum alignment of the arguments.
2103   //
2104   // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
2105   // kern arg offset.
2106   const unsigned KernelArgBaseAlign = 16;
2107 
2108    for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
2109     const ISD::InputArg &Arg = Ins[i];
2110     if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
2111       InVals.push_back(DAG.getUNDEF(Arg.VT));
2112       continue;
2113     }
2114 
2115     CCValAssign &VA = ArgLocs[ArgIdx++];
2116     MVT VT = VA.getLocVT();
2117 
2118     if (IsEntryFunc && VA.isMemLoc()) {
2119       VT = Ins[i].VT;
2120       EVT MemVT = VA.getLocVT();
2121 
2122       const uint64_t Offset = VA.getLocMemOffset();
2123       unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
2124 
2125       SDValue Arg = lowerKernargMemParameter(
2126         DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
2127       Chains.push_back(Arg.getValue(1));
2128 
2129       auto *ParamTy =
2130         dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
2131       if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
2132           ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
2133                       ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) {
2134         // On SI local pointers are just offsets into LDS, so they are always
2135         // less than 16-bits.  On CI and newer they could potentially be
2136         // real pointers, so we can't guarantee their size.
2137         Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
2138                           DAG.getValueType(MVT::i16));
2139       }
2140 
2141       InVals.push_back(Arg);
2142       continue;
2143     } else if (!IsEntryFunc && VA.isMemLoc()) {
2144       SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
2145       InVals.push_back(Val);
2146       if (!Arg.Flags.isByVal())
2147         Chains.push_back(Val.getValue(1));
2148       continue;
2149     }
2150 
2151     assert(VA.isRegLoc() && "Parameter must be in a register!");
2152 
2153     unsigned Reg = VA.getLocReg();
2154     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2155     EVT ValVT = VA.getValVT();
2156 
2157     Reg = MF.addLiveIn(Reg, RC);
2158     SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
2159 
2160     if (Arg.Flags.isSRet()) {
2161       // The return object should be reasonably addressable.
2162 
2163       // FIXME: This helps when the return is a real sret. If it is a
2164       // automatically inserted sret (i.e. CanLowerReturn returns false), an
2165       // extra copy is inserted in SelectionDAGBuilder which obscures this.
2166       unsigned NumBits
2167         = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex();
2168       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2169         DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
2170     }
2171 
2172     // If this is an 8 or 16-bit value, it is really passed promoted
2173     // to 32 bits. Insert an assert[sz]ext to capture this, then
2174     // truncate to the right size.
2175     switch (VA.getLocInfo()) {
2176     case CCValAssign::Full:
2177       break;
2178     case CCValAssign::BCvt:
2179       Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
2180       break;
2181     case CCValAssign::SExt:
2182       Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
2183                         DAG.getValueType(ValVT));
2184       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2185       break;
2186     case CCValAssign::ZExt:
2187       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2188                         DAG.getValueType(ValVT));
2189       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2190       break;
2191     case CCValAssign::AExt:
2192       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2193       break;
2194     default:
2195       llvm_unreachable("Unknown loc info!");
2196     }
2197 
2198     InVals.push_back(Val);
2199   }
2200 
2201   if (!IsEntryFunc) {
2202     // Special inputs come after user arguments.
2203     allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
2204   }
2205 
2206   // Start adding system SGPRs.
2207   if (IsEntryFunc) {
2208     allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
2209   } else {
2210     CCInfo.AllocateReg(Info->getScratchRSrcReg());
2211     CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
2212     CCInfo.AllocateReg(Info->getFrameOffsetReg());
2213     allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
2214   }
2215 
2216   auto &ArgUsageInfo =
2217     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2218   ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
2219 
2220   unsigned StackArgSize = CCInfo.getNextStackOffset();
2221   Info->setBytesInStackArgArea(StackArgSize);
2222 
2223   return Chains.empty() ? Chain :
2224     DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
2225 }
2226 
2227 // TODO: If return values can't fit in registers, we should return as many as
2228 // possible in registers before passing on stack.
2229 bool SITargetLowering::CanLowerReturn(
2230   CallingConv::ID CallConv,
2231   MachineFunction &MF, bool IsVarArg,
2232   const SmallVectorImpl<ISD::OutputArg> &Outs,
2233   LLVMContext &Context) const {
2234   // Replacing returns with sret/stack usage doesn't make sense for shaders.
2235   // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
2236   // for shaders. Vector types should be explicitly handled by CC.
2237   if (AMDGPU::isEntryFunctionCC(CallConv))
2238     return true;
2239 
2240   SmallVector<CCValAssign, 16> RVLocs;
2241   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2242   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2243 }
2244 
2245 SDValue
2246 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2247                               bool isVarArg,
2248                               const SmallVectorImpl<ISD::OutputArg> &Outs,
2249                               const SmallVectorImpl<SDValue> &OutVals,
2250                               const SDLoc &DL, SelectionDAG &DAG) const {
2251   MachineFunction &MF = DAG.getMachineFunction();
2252   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2253 
2254   if (AMDGPU::isKernel(CallConv)) {
2255     return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2256                                              OutVals, DL, DAG);
2257   }
2258 
2259   bool IsShader = AMDGPU::isShader(CallConv);
2260 
2261   Info->setIfReturnsVoid(Outs.empty());
2262   bool IsWaveEnd = Info->returnsVoid() && IsShader;
2263 
2264   // CCValAssign - represent the assignment of the return value to a location.
2265   SmallVector<CCValAssign, 48> RVLocs;
2266   SmallVector<ISD::OutputArg, 48> Splits;
2267 
2268   // CCState - Info about the registers and stack slots.
2269   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2270                  *DAG.getContext());
2271 
2272   // Analyze outgoing return values.
2273   CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2274 
2275   SDValue Flag;
2276   SmallVector<SDValue, 48> RetOps;
2277   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2278 
2279   // Add return address for callable functions.
2280   if (!Info->isEntryFunction()) {
2281     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2282     SDValue ReturnAddrReg = CreateLiveInRegister(
2283       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2284 
2285     SDValue ReturnAddrVirtualReg = DAG.getRegister(
2286         MF.getRegInfo().createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass),
2287         MVT::i64);
2288     Chain =
2289         DAG.getCopyToReg(Chain, DL, ReturnAddrVirtualReg, ReturnAddrReg, Flag);
2290     Flag = Chain.getValue(1);
2291     RetOps.push_back(ReturnAddrVirtualReg);
2292   }
2293 
2294   // Copy the result values into the output registers.
2295   for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2296        ++I, ++RealRVLocIdx) {
2297     CCValAssign &VA = RVLocs[I];
2298     assert(VA.isRegLoc() && "Can only return in registers!");
2299     // TODO: Partially return in registers if return values don't fit.
2300     SDValue Arg = OutVals[RealRVLocIdx];
2301 
2302     // Copied from other backends.
2303     switch (VA.getLocInfo()) {
2304     case CCValAssign::Full:
2305       break;
2306     case CCValAssign::BCvt:
2307       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2308       break;
2309     case CCValAssign::SExt:
2310       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2311       break;
2312     case CCValAssign::ZExt:
2313       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2314       break;
2315     case CCValAssign::AExt:
2316       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2317       break;
2318     default:
2319       llvm_unreachable("Unknown loc info!");
2320     }
2321 
2322     Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2323     Flag = Chain.getValue(1);
2324     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2325   }
2326 
2327   // FIXME: Does sret work properly?
2328   if (!Info->isEntryFunction()) {
2329     const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2330     const MCPhysReg *I =
2331       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2332     if (I) {
2333       for (; *I; ++I) {
2334         if (AMDGPU::SReg_64RegClass.contains(*I))
2335           RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2336         else if (AMDGPU::SReg_32RegClass.contains(*I))
2337           RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2338         else
2339           llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2340       }
2341     }
2342   }
2343 
2344   // Update chain and glue.
2345   RetOps[0] = Chain;
2346   if (Flag.getNode())
2347     RetOps.push_back(Flag);
2348 
2349   unsigned Opc = AMDGPUISD::ENDPGM;
2350   if (!IsWaveEnd)
2351     Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
2352   return DAG.getNode(Opc, DL, MVT::Other, RetOps);
2353 }
2354 
2355 SDValue SITargetLowering::LowerCallResult(
2356     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2357     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2358     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2359     SDValue ThisVal) const {
2360   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2361 
2362   // Assign locations to each value returned by this call.
2363   SmallVector<CCValAssign, 16> RVLocs;
2364   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2365                  *DAG.getContext());
2366   CCInfo.AnalyzeCallResult(Ins, RetCC);
2367 
2368   // Copy all of the result registers out of their specified physreg.
2369   for (unsigned i = 0; i != RVLocs.size(); ++i) {
2370     CCValAssign VA = RVLocs[i];
2371     SDValue Val;
2372 
2373     if (VA.isRegLoc()) {
2374       Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2375       Chain = Val.getValue(1);
2376       InFlag = Val.getValue(2);
2377     } else if (VA.isMemLoc()) {
2378       report_fatal_error("TODO: return values in memory");
2379     } else
2380       llvm_unreachable("unknown argument location type");
2381 
2382     switch (VA.getLocInfo()) {
2383     case CCValAssign::Full:
2384       break;
2385     case CCValAssign::BCvt:
2386       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2387       break;
2388     case CCValAssign::ZExt:
2389       Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2390                         DAG.getValueType(VA.getValVT()));
2391       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2392       break;
2393     case CCValAssign::SExt:
2394       Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2395                         DAG.getValueType(VA.getValVT()));
2396       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2397       break;
2398     case CCValAssign::AExt:
2399       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2400       break;
2401     default:
2402       llvm_unreachable("Unknown loc info!");
2403     }
2404 
2405     InVals.push_back(Val);
2406   }
2407 
2408   return Chain;
2409 }
2410 
2411 // Add code to pass special inputs required depending on used features separate
2412 // from the explicit user arguments present in the IR.
2413 void SITargetLowering::passSpecialInputs(
2414     CallLoweringInfo &CLI,
2415     CCState &CCInfo,
2416     const SIMachineFunctionInfo &Info,
2417     SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2418     SmallVectorImpl<SDValue> &MemOpChains,
2419     SDValue Chain) const {
2420   // If we don't have a call site, this was a call inserted by
2421   // legalization. These can never use special inputs.
2422   if (!CLI.CS)
2423     return;
2424 
2425   const Function *CalleeFunc = CLI.CS.getCalledFunction();
2426   assert(CalleeFunc);
2427 
2428   SelectionDAG &DAG = CLI.DAG;
2429   const SDLoc &DL = CLI.DL;
2430 
2431   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2432 
2433   auto &ArgUsageInfo =
2434     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2435   const AMDGPUFunctionArgInfo &CalleeArgInfo
2436     = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2437 
2438   const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2439 
2440   // TODO: Unify with private memory register handling. This is complicated by
2441   // the fact that at least in kernels, the input argument is not necessarily
2442   // in the same location as the input.
2443   AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2444     AMDGPUFunctionArgInfo::DISPATCH_PTR,
2445     AMDGPUFunctionArgInfo::QUEUE_PTR,
2446     AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
2447     AMDGPUFunctionArgInfo::DISPATCH_ID,
2448     AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2449     AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2450     AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
2451     AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
2452   };
2453 
2454   for (auto InputID : InputRegs) {
2455     const ArgDescriptor *OutgoingArg;
2456     const TargetRegisterClass *ArgRC;
2457 
2458     std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2459     if (!OutgoingArg)
2460       continue;
2461 
2462     const ArgDescriptor *IncomingArg;
2463     const TargetRegisterClass *IncomingArgRC;
2464     std::tie(IncomingArg, IncomingArgRC)
2465       = CallerArgInfo.getPreloadedValue(InputID);
2466     assert(IncomingArgRC == ArgRC);
2467 
2468     // All special arguments are ints for now.
2469     EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
2470     SDValue InputReg;
2471 
2472     if (IncomingArg) {
2473       InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2474     } else {
2475       // The implicit arg ptr is special because it doesn't have a corresponding
2476       // input for kernels, and is computed from the kernarg segment pointer.
2477       assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2478       InputReg = getImplicitArgPtr(DAG, DL);
2479     }
2480 
2481     if (OutgoingArg->isRegister()) {
2482       RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2483     } else {
2484       unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4);
2485       SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2486                                               SpecialArgOffset);
2487       MemOpChains.push_back(ArgStore);
2488     }
2489   }
2490 
2491   // Pack workitem IDs into a single register or pass it as is if already
2492   // packed.
2493   const ArgDescriptor *OutgoingArg;
2494   const TargetRegisterClass *ArgRC;
2495 
2496   std::tie(OutgoingArg, ArgRC) =
2497     CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
2498   if (!OutgoingArg)
2499     std::tie(OutgoingArg, ArgRC) =
2500       CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
2501   if (!OutgoingArg)
2502     std::tie(OutgoingArg, ArgRC) =
2503       CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
2504   if (!OutgoingArg)
2505     return;
2506 
2507   const ArgDescriptor *IncomingArgX
2508     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X).first;
2509   const ArgDescriptor *IncomingArgY
2510     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y).first;
2511   const ArgDescriptor *IncomingArgZ
2512     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z).first;
2513 
2514   SDValue InputReg;
2515   SDLoc SL;
2516 
2517   // If incoming ids are not packed we need to pack them.
2518   if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo.WorkItemIDX)
2519     InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX);
2520 
2521   if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo.WorkItemIDY) {
2522     SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY);
2523     Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y,
2524                     DAG.getShiftAmountConstant(10, MVT::i32, SL));
2525     InputReg = InputReg.getNode() ?
2526                  DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y;
2527   }
2528 
2529   if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo.WorkItemIDZ) {
2530     SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ);
2531     Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z,
2532                     DAG.getShiftAmountConstant(20, MVT::i32, SL));
2533     InputReg = InputReg.getNode() ?
2534                  DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z;
2535   }
2536 
2537   if (!InputReg.getNode()) {
2538     // Workitem ids are already packed, any of present incoming arguments
2539     // will carry all required fields.
2540     ArgDescriptor IncomingArg = ArgDescriptor::createArg(
2541       IncomingArgX ? *IncomingArgX :
2542       IncomingArgY ? *IncomingArgY :
2543                      *IncomingArgZ, ~0u);
2544     InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg);
2545   }
2546 
2547   if (OutgoingArg->isRegister()) {
2548     RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2549   } else {
2550     unsigned SpecialArgOffset = CCInfo.AllocateStack(4, 4);
2551     SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2552                                             SpecialArgOffset);
2553     MemOpChains.push_back(ArgStore);
2554   }
2555 }
2556 
2557 static bool canGuaranteeTCO(CallingConv::ID CC) {
2558   return CC == CallingConv::Fast;
2559 }
2560 
2561 /// Return true if we might ever do TCO for calls with this calling convention.
2562 static bool mayTailCallThisCC(CallingConv::ID CC) {
2563   switch (CC) {
2564   case CallingConv::C:
2565     return true;
2566   default:
2567     return canGuaranteeTCO(CC);
2568   }
2569 }
2570 
2571 bool SITargetLowering::isEligibleForTailCallOptimization(
2572     SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2573     const SmallVectorImpl<ISD::OutputArg> &Outs,
2574     const SmallVectorImpl<SDValue> &OutVals,
2575     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2576   if (!mayTailCallThisCC(CalleeCC))
2577     return false;
2578 
2579   MachineFunction &MF = DAG.getMachineFunction();
2580   const Function &CallerF = MF.getFunction();
2581   CallingConv::ID CallerCC = CallerF.getCallingConv();
2582   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2583   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2584 
2585   // Kernels aren't callable, and don't have a live in return address so it
2586   // doesn't make sense to do a tail call with entry functions.
2587   if (!CallerPreserved)
2588     return false;
2589 
2590   bool CCMatch = CallerCC == CalleeCC;
2591 
2592   if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2593     if (canGuaranteeTCO(CalleeCC) && CCMatch)
2594       return true;
2595     return false;
2596   }
2597 
2598   // TODO: Can we handle var args?
2599   if (IsVarArg)
2600     return false;
2601 
2602   for (const Argument &Arg : CallerF.args()) {
2603     if (Arg.hasByValAttr())
2604       return false;
2605   }
2606 
2607   LLVMContext &Ctx = *DAG.getContext();
2608 
2609   // Check that the call results are passed in the same way.
2610   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2611                                   CCAssignFnForCall(CalleeCC, IsVarArg),
2612                                   CCAssignFnForCall(CallerCC, IsVarArg)))
2613     return false;
2614 
2615   // The callee has to preserve all registers the caller needs to preserve.
2616   if (!CCMatch) {
2617     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2618     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2619       return false;
2620   }
2621 
2622   // Nothing more to check if the callee is taking no arguments.
2623   if (Outs.empty())
2624     return true;
2625 
2626   SmallVector<CCValAssign, 16> ArgLocs;
2627   CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2628 
2629   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2630 
2631   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2632   // If the stack arguments for this call do not fit into our own save area then
2633   // the call cannot be made tail.
2634   // TODO: Is this really necessary?
2635   if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2636     return false;
2637 
2638   const MachineRegisterInfo &MRI = MF.getRegInfo();
2639   return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2640 }
2641 
2642 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2643   if (!CI->isTailCall())
2644     return false;
2645 
2646   const Function *ParentFn = CI->getParent()->getParent();
2647   if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2648     return false;
2649 
2650   auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2651   return (Attr.getValueAsString() != "true");
2652 }
2653 
2654 // The wave scratch offset register is used as the global base pointer.
2655 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2656                                     SmallVectorImpl<SDValue> &InVals) const {
2657   SelectionDAG &DAG = CLI.DAG;
2658   const SDLoc &DL = CLI.DL;
2659   SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2660   SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2661   SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2662   SDValue Chain = CLI.Chain;
2663   SDValue Callee = CLI.Callee;
2664   bool &IsTailCall = CLI.IsTailCall;
2665   CallingConv::ID CallConv = CLI.CallConv;
2666   bool IsVarArg = CLI.IsVarArg;
2667   bool IsSibCall = false;
2668   bool IsThisReturn = false;
2669   MachineFunction &MF = DAG.getMachineFunction();
2670 
2671   if (IsVarArg) {
2672     return lowerUnhandledCall(CLI, InVals,
2673                               "unsupported call to variadic function ");
2674   }
2675 
2676   if (!CLI.CS.getInstruction())
2677     report_fatal_error("unsupported libcall legalization");
2678 
2679   if (!CLI.CS.getCalledFunction()) {
2680     return lowerUnhandledCall(CLI, InVals,
2681                               "unsupported indirect call to function ");
2682   }
2683 
2684   if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2685     return lowerUnhandledCall(CLI, InVals,
2686                               "unsupported required tail call to function ");
2687   }
2688 
2689   if (AMDGPU::isShader(MF.getFunction().getCallingConv())) {
2690     // Note the issue is with the CC of the calling function, not of the call
2691     // itself.
2692     return lowerUnhandledCall(CLI, InVals,
2693                           "unsupported call from graphics shader of function ");
2694   }
2695 
2696   if (IsTailCall) {
2697     IsTailCall = isEligibleForTailCallOptimization(
2698       Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2699     if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2700       report_fatal_error("failed to perform tail call elimination on a call "
2701                          "site marked musttail");
2702     }
2703 
2704     bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2705 
2706     // A sibling call is one where we're under the usual C ABI and not planning
2707     // to change that but can still do a tail call:
2708     if (!TailCallOpt && IsTailCall)
2709       IsSibCall = true;
2710 
2711     if (IsTailCall)
2712       ++NumTailCalls;
2713   }
2714 
2715   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2716 
2717   // Analyze operands of the call, assigning locations to each operand.
2718   SmallVector<CCValAssign, 16> ArgLocs;
2719   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2720   CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2721 
2722   CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2723 
2724   // Get a count of how many bytes are to be pushed on the stack.
2725   unsigned NumBytes = CCInfo.getNextStackOffset();
2726 
2727   if (IsSibCall) {
2728     // Since we're not changing the ABI to make this a tail call, the memory
2729     // operands are already available in the caller's incoming argument space.
2730     NumBytes = 0;
2731   }
2732 
2733   // FPDiff is the byte offset of the call's argument area from the callee's.
2734   // Stores to callee stack arguments will be placed in FixedStackSlots offset
2735   // by this amount for a tail call. In a sibling call it must be 0 because the
2736   // caller will deallocate the entire stack and the callee still expects its
2737   // arguments to begin at SP+0. Completely unused for non-tail calls.
2738   int32_t FPDiff = 0;
2739   MachineFrameInfo &MFI = MF.getFrameInfo();
2740   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2741 
2742   // Adjust the stack pointer for the new arguments...
2743   // These operations are automatically eliminated by the prolog/epilog pass
2744   if (!IsSibCall) {
2745     Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2746 
2747     SmallVector<SDValue, 4> CopyFromChains;
2748 
2749     // In the HSA case, this should be an identity copy.
2750     SDValue ScratchRSrcReg
2751       = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2752     RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2753     CopyFromChains.push_back(ScratchRSrcReg.getValue(1));
2754     Chain = DAG.getTokenFactor(DL, CopyFromChains);
2755   }
2756 
2757   SmallVector<SDValue, 8> MemOpChains;
2758   MVT PtrVT = MVT::i32;
2759 
2760   // Walk the register/memloc assignments, inserting copies/loads.
2761   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2762        ++i, ++realArgIdx) {
2763     CCValAssign &VA = ArgLocs[i];
2764     SDValue Arg = OutVals[realArgIdx];
2765 
2766     // Promote the value if needed.
2767     switch (VA.getLocInfo()) {
2768     case CCValAssign::Full:
2769       break;
2770     case CCValAssign::BCvt:
2771       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2772       break;
2773     case CCValAssign::ZExt:
2774       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2775       break;
2776     case CCValAssign::SExt:
2777       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2778       break;
2779     case CCValAssign::AExt:
2780       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2781       break;
2782     case CCValAssign::FPExt:
2783       Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2784       break;
2785     default:
2786       llvm_unreachable("Unknown loc info!");
2787     }
2788 
2789     if (VA.isRegLoc()) {
2790       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2791     } else {
2792       assert(VA.isMemLoc());
2793 
2794       SDValue DstAddr;
2795       MachinePointerInfo DstInfo;
2796 
2797       unsigned LocMemOffset = VA.getLocMemOffset();
2798       int32_t Offset = LocMemOffset;
2799 
2800       SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT);
2801       unsigned Align = 0;
2802 
2803       if (IsTailCall) {
2804         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2805         unsigned OpSize = Flags.isByVal() ?
2806           Flags.getByValSize() : VA.getValVT().getStoreSize();
2807 
2808         // FIXME: We can have better than the minimum byval required alignment.
2809         Align = Flags.isByVal() ? Flags.getByValAlign() :
2810           MinAlign(Subtarget->getStackAlignment(), Offset);
2811 
2812         Offset = Offset + FPDiff;
2813         int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2814 
2815         DstAddr = DAG.getFrameIndex(FI, PtrVT);
2816         DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2817 
2818         // Make sure any stack arguments overlapping with where we're storing
2819         // are loaded before this eventual operation. Otherwise they'll be
2820         // clobbered.
2821 
2822         // FIXME: Why is this really necessary? This seems to just result in a
2823         // lot of code to copy the stack and write them back to the same
2824         // locations, which are supposed to be immutable?
2825         Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2826       } else {
2827         DstAddr = PtrOff;
2828         DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2829         Align = MinAlign(Subtarget->getStackAlignment(), LocMemOffset);
2830       }
2831 
2832       if (Outs[i].Flags.isByVal()) {
2833         SDValue SizeNode =
2834             DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2835         SDValue Cpy = DAG.getMemcpy(
2836             Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2837             /*isVol = */ false, /*AlwaysInline = */ true,
2838             /*isTailCall = */ false, DstInfo,
2839             MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy(
2840                 *DAG.getContext(), AMDGPUAS::PRIVATE_ADDRESS))));
2841 
2842         MemOpChains.push_back(Cpy);
2843       } else {
2844         SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Align);
2845         MemOpChains.push_back(Store);
2846       }
2847     }
2848   }
2849 
2850   // Copy special input registers after user input arguments.
2851   passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
2852 
2853   if (!MemOpChains.empty())
2854     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2855 
2856   // Build a sequence of copy-to-reg nodes chained together with token chain
2857   // and flag operands which copy the outgoing args into the appropriate regs.
2858   SDValue InFlag;
2859   for (auto &RegToPass : RegsToPass) {
2860     Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2861                              RegToPass.second, InFlag);
2862     InFlag = Chain.getValue(1);
2863   }
2864 
2865 
2866   SDValue PhysReturnAddrReg;
2867   if (IsTailCall) {
2868     // Since the return is being combined with the call, we need to pass on the
2869     // return address.
2870 
2871     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2872     SDValue ReturnAddrReg = CreateLiveInRegister(
2873       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2874 
2875     PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2876                                         MVT::i64);
2877     Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2878     InFlag = Chain.getValue(1);
2879   }
2880 
2881   // We don't usually want to end the call-sequence here because we would tidy
2882   // the frame up *after* the call, however in the ABI-changing tail-call case
2883   // we've carefully laid out the parameters so that when sp is reset they'll be
2884   // in the correct location.
2885   if (IsTailCall && !IsSibCall) {
2886     Chain = DAG.getCALLSEQ_END(Chain,
2887                                DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2888                                DAG.getTargetConstant(0, DL, MVT::i32),
2889                                InFlag, DL);
2890     InFlag = Chain.getValue(1);
2891   }
2892 
2893   std::vector<SDValue> Ops;
2894   Ops.push_back(Chain);
2895   Ops.push_back(Callee);
2896   // Add a redundant copy of the callee global which will not be legalized, as
2897   // we need direct access to the callee later.
2898   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee);
2899   const GlobalValue *GV = GSD->getGlobal();
2900   Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
2901 
2902   if (IsTailCall) {
2903     // Each tail call may have to adjust the stack by a different amount, so
2904     // this information must travel along with the operation for eventual
2905     // consumption by emitEpilogue.
2906     Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
2907 
2908     Ops.push_back(PhysReturnAddrReg);
2909   }
2910 
2911   // Add argument registers to the end of the list so that they are known live
2912   // into the call.
2913   for (auto &RegToPass : RegsToPass) {
2914     Ops.push_back(DAG.getRegister(RegToPass.first,
2915                                   RegToPass.second.getValueType()));
2916   }
2917 
2918   // Add a register mask operand representing the call-preserved registers.
2919 
2920   auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
2921   const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2922   assert(Mask && "Missing call preserved mask for calling convention");
2923   Ops.push_back(DAG.getRegisterMask(Mask));
2924 
2925   if (InFlag.getNode())
2926     Ops.push_back(InFlag);
2927 
2928   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2929 
2930   // If we're doing a tall call, use a TC_RETURN here rather than an
2931   // actual call instruction.
2932   if (IsTailCall) {
2933     MFI.setHasTailCall();
2934     return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
2935   }
2936 
2937   // Returns a chain and a flag for retval copy to use.
2938   SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2939   Chain = Call.getValue(0);
2940   InFlag = Call.getValue(1);
2941 
2942   uint64_t CalleePopBytes = NumBytes;
2943   Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
2944                              DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2945                              InFlag, DL);
2946   if (!Ins.empty())
2947     InFlag = Chain.getValue(1);
2948 
2949   // Handle result values, copying them out of physregs into vregs that we
2950   // return.
2951   return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2952                          InVals, IsThisReturn,
2953                          IsThisReturn ? OutVals[0] : SDValue());
2954 }
2955 
2956 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2957                                              SelectionDAG &DAG) const {
2958   unsigned Reg = StringSwitch<unsigned>(RegName)
2959     .Case("m0", AMDGPU::M0)
2960     .Case("exec", AMDGPU::EXEC)
2961     .Case("exec_lo", AMDGPU::EXEC_LO)
2962     .Case("exec_hi", AMDGPU::EXEC_HI)
2963     .Case("flat_scratch", AMDGPU::FLAT_SCR)
2964     .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2965     .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2966     .Default(AMDGPU::NoRegister);
2967 
2968   if (Reg == AMDGPU::NoRegister) {
2969     report_fatal_error(Twine("invalid register name \""
2970                              + StringRef(RegName)  + "\"."));
2971 
2972   }
2973 
2974   if (!Subtarget->hasFlatScrRegister() &&
2975        Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2976     report_fatal_error(Twine("invalid register \""
2977                              + StringRef(RegName)  + "\" for subtarget."));
2978   }
2979 
2980   switch (Reg) {
2981   case AMDGPU::M0:
2982   case AMDGPU::EXEC_LO:
2983   case AMDGPU::EXEC_HI:
2984   case AMDGPU::FLAT_SCR_LO:
2985   case AMDGPU::FLAT_SCR_HI:
2986     if (VT.getSizeInBits() == 32)
2987       return Reg;
2988     break;
2989   case AMDGPU::EXEC:
2990   case AMDGPU::FLAT_SCR:
2991     if (VT.getSizeInBits() == 64)
2992       return Reg;
2993     break;
2994   default:
2995     llvm_unreachable("missing register type checking");
2996   }
2997 
2998   report_fatal_error(Twine("invalid type for register \""
2999                            + StringRef(RegName) + "\"."));
3000 }
3001 
3002 // If kill is not the last instruction, split the block so kill is always a
3003 // proper terminator.
3004 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
3005                                                     MachineBasicBlock *BB) const {
3006   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3007 
3008   MachineBasicBlock::iterator SplitPoint(&MI);
3009   ++SplitPoint;
3010 
3011   if (SplitPoint == BB->end()) {
3012     // Don't bother with a new block.
3013     MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
3014     return BB;
3015   }
3016 
3017   MachineFunction *MF = BB->getParent();
3018   MachineBasicBlock *SplitBB
3019     = MF->CreateMachineBasicBlock(BB->getBasicBlock());
3020 
3021   MF->insert(++MachineFunction::iterator(BB), SplitBB);
3022   SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
3023 
3024   SplitBB->transferSuccessorsAndUpdatePHIs(BB);
3025   BB->addSuccessor(SplitBB);
3026 
3027   MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
3028   return SplitBB;
3029 }
3030 
3031 // Split block \p MBB at \p MI, as to insert a loop. If \p InstInLoop is true,
3032 // \p MI will be the only instruction in the loop body block. Otherwise, it will
3033 // be the first instruction in the remainder block.
3034 //
3035 /// \returns { LoopBody, Remainder }
3036 static std::pair<MachineBasicBlock *, MachineBasicBlock *>
3037 splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) {
3038   MachineFunction *MF = MBB.getParent();
3039   MachineBasicBlock::iterator I(&MI);
3040 
3041   // To insert the loop we need to split the block. Move everything after this
3042   // point to a new block, and insert a new empty block between the two.
3043   MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
3044   MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
3045   MachineFunction::iterator MBBI(MBB);
3046   ++MBBI;
3047 
3048   MF->insert(MBBI, LoopBB);
3049   MF->insert(MBBI, RemainderBB);
3050 
3051   LoopBB->addSuccessor(LoopBB);
3052   LoopBB->addSuccessor(RemainderBB);
3053 
3054   // Move the rest of the block into a new block.
3055   RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
3056 
3057   if (InstInLoop) {
3058     auto Next = std::next(I);
3059 
3060     // Move instruction to loop body.
3061     LoopBB->splice(LoopBB->begin(), &MBB, I, Next);
3062 
3063     // Move the rest of the block.
3064     RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end());
3065   } else {
3066     RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
3067   }
3068 
3069   MBB.addSuccessor(LoopBB);
3070 
3071   return std::make_pair(LoopBB, RemainderBB);
3072 }
3073 
3074 /// Insert \p MI into a BUNDLE with an S_WAITCNT 0 immediately following it.
3075 void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const {
3076   MachineBasicBlock *MBB = MI.getParent();
3077   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3078   auto I = MI.getIterator();
3079   auto E = std::next(I);
3080 
3081   BuildMI(*MBB, E, MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT))
3082     .addImm(0);
3083 
3084   MIBundleBuilder Bundler(*MBB, I, E);
3085   finalizeBundle(*MBB, Bundler.begin());
3086 }
3087 
3088 MachineBasicBlock *
3089 SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI,
3090                                          MachineBasicBlock *BB) const {
3091   const DebugLoc &DL = MI.getDebugLoc();
3092 
3093   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3094 
3095   MachineBasicBlock *LoopBB;
3096   MachineBasicBlock *RemainderBB;
3097   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3098 
3099   MachineBasicBlock::iterator Prev = std::prev(MI.getIterator());
3100 
3101   std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true);
3102 
3103   MachineBasicBlock::iterator I = LoopBB->end();
3104   MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0);
3105 
3106   const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg(
3107     AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1);
3108 
3109   // Clear TRAP_STS.MEM_VIOL
3110   BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32))
3111     .addImm(0)
3112     .addImm(EncodedReg);
3113 
3114   // This is a pain, but we're not allowed to have physical register live-ins
3115   // yet. Insert a pair of copies if the VGPR0 hack is necessary.
3116   if (Src && TargetRegisterInfo::isPhysicalRegister(Src->getReg())) {
3117     unsigned Data0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3118     BuildMI(*BB, std::next(Prev), DL, TII->get(AMDGPU::COPY), Data0)
3119       .add(*Src);
3120 
3121     BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::COPY), Src->getReg())
3122       .addReg(Data0);
3123 
3124     MRI.setSimpleHint(Data0, Src->getReg());
3125   }
3126 
3127   bundleInstWithWaitcnt(MI);
3128 
3129   unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3130 
3131   // Load and check TRAP_STS.MEM_VIOL
3132   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg)
3133     .addImm(EncodedReg);
3134 
3135   // FIXME: Do we need to use an isel pseudo that may clobber scc?
3136   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32))
3137     .addReg(Reg, RegState::Kill)
3138     .addImm(0);
3139   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3140     .addMBB(LoopBB);
3141 
3142   return RemainderBB;
3143 }
3144 
3145 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
3146 // wavefront. If the value is uniform and just happens to be in a VGPR, this
3147 // will only do one iteration. In the worst case, this will loop 64 times.
3148 //
3149 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
3150 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
3151   const SIInstrInfo *TII,
3152   MachineRegisterInfo &MRI,
3153   MachineBasicBlock &OrigBB,
3154   MachineBasicBlock &LoopBB,
3155   const DebugLoc &DL,
3156   const MachineOperand &IdxReg,
3157   unsigned InitReg,
3158   unsigned ResultReg,
3159   unsigned PhiReg,
3160   unsigned InitSaveExecReg,
3161   int Offset,
3162   bool UseGPRIdxMode,
3163   bool IsIndirectSrc) {
3164   MachineFunction *MF = OrigBB.getParent();
3165   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3166   const SIRegisterInfo *TRI = ST.getRegisterInfo();
3167   MachineBasicBlock::iterator I = LoopBB.begin();
3168 
3169   const TargetRegisterClass *BoolRC = TRI->getBoolRC();
3170   unsigned PhiExec = MRI.createVirtualRegister(BoolRC);
3171   unsigned NewExec = MRI.createVirtualRegister(BoolRC);
3172   unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3173   unsigned CondReg = MRI.createVirtualRegister(BoolRC);
3174 
3175   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
3176     .addReg(InitReg)
3177     .addMBB(&OrigBB)
3178     .addReg(ResultReg)
3179     .addMBB(&LoopBB);
3180 
3181   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
3182     .addReg(InitSaveExecReg)
3183     .addMBB(&OrigBB)
3184     .addReg(NewExec)
3185     .addMBB(&LoopBB);
3186 
3187   // Read the next variant <- also loop target.
3188   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
3189     .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
3190 
3191   // Compare the just read M0 value to all possible Idx values.
3192   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
3193     .addReg(CurrentIdxReg)
3194     .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
3195 
3196   // Update EXEC, save the original EXEC value to VCC.
3197   BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32
3198                                                 : AMDGPU::S_AND_SAVEEXEC_B64),
3199           NewExec)
3200     .addReg(CondReg, RegState::Kill);
3201 
3202   MRI.setSimpleHint(NewExec, CondReg);
3203 
3204   if (UseGPRIdxMode) {
3205     unsigned IdxReg;
3206     if (Offset == 0) {
3207       IdxReg = CurrentIdxReg;
3208     } else {
3209       IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3210       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
3211         .addReg(CurrentIdxReg, RegState::Kill)
3212         .addImm(Offset);
3213     }
3214     unsigned IdxMode = IsIndirectSrc ?
3215       AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
3216     MachineInstr *SetOn =
3217       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3218       .addReg(IdxReg, RegState::Kill)
3219       .addImm(IdxMode);
3220     SetOn->getOperand(3).setIsUndef();
3221   } else {
3222     // Move index from VCC into M0
3223     if (Offset == 0) {
3224       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3225         .addReg(CurrentIdxReg, RegState::Kill);
3226     } else {
3227       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3228         .addReg(CurrentIdxReg, RegState::Kill)
3229         .addImm(Offset);
3230     }
3231   }
3232 
3233   // Update EXEC, switch all done bits to 0 and all todo bits to 1.
3234   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3235   MachineInstr *InsertPt =
3236     BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term
3237                                                   : AMDGPU::S_XOR_B64_term), Exec)
3238       .addReg(Exec)
3239       .addReg(NewExec);
3240 
3241   // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
3242   // s_cbranch_scc0?
3243 
3244   // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
3245   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
3246     .addMBB(&LoopBB);
3247 
3248   return InsertPt->getIterator();
3249 }
3250 
3251 // This has slightly sub-optimal regalloc when the source vector is killed by
3252 // the read. The register allocator does not understand that the kill is
3253 // per-workitem, so is kept alive for the whole loop so we end up not re-using a
3254 // subregister from it, using 1 more VGPR than necessary. This was saved when
3255 // this was expanded after register allocation.
3256 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
3257                                                   MachineBasicBlock &MBB,
3258                                                   MachineInstr &MI,
3259                                                   unsigned InitResultReg,
3260                                                   unsigned PhiReg,
3261                                                   int Offset,
3262                                                   bool UseGPRIdxMode,
3263                                                   bool IsIndirectSrc) {
3264   MachineFunction *MF = MBB.getParent();
3265   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3266   const SIRegisterInfo *TRI = ST.getRegisterInfo();
3267   MachineRegisterInfo &MRI = MF->getRegInfo();
3268   const DebugLoc &DL = MI.getDebugLoc();
3269   MachineBasicBlock::iterator I(&MI);
3270 
3271   const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
3272   unsigned DstReg = MI.getOperand(0).getReg();
3273   unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC);
3274   unsigned TmpExec = MRI.createVirtualRegister(BoolXExecRC);
3275   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3276   unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
3277 
3278   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
3279 
3280   // Save the EXEC mask
3281   BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec)
3282     .addReg(Exec);
3283 
3284   MachineBasicBlock *LoopBB;
3285   MachineBasicBlock *RemainderBB;
3286   std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false);
3287 
3288   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3289 
3290   auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
3291                                       InitResultReg, DstReg, PhiReg, TmpExec,
3292                                       Offset, UseGPRIdxMode, IsIndirectSrc);
3293 
3294   MachineBasicBlock::iterator First = RemainderBB->begin();
3295   BuildMI(*RemainderBB, First, DL, TII->get(MovExecOpc), Exec)
3296     .addReg(SaveExec);
3297 
3298   return InsPt;
3299 }
3300 
3301 // Returns subreg index, offset
3302 static std::pair<unsigned, int>
3303 computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
3304                             const TargetRegisterClass *SuperRC,
3305                             unsigned VecReg,
3306                             int Offset) {
3307   int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
3308 
3309   // Skip out of bounds offsets, or else we would end up using an undefined
3310   // register.
3311   if (Offset >= NumElts || Offset < 0)
3312     return std::make_pair(AMDGPU::sub0, Offset);
3313 
3314   return std::make_pair(AMDGPU::sub0 + Offset, 0);
3315 }
3316 
3317 // Return true if the index is an SGPR and was set.
3318 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
3319                                  MachineRegisterInfo &MRI,
3320                                  MachineInstr &MI,
3321                                  int Offset,
3322                                  bool UseGPRIdxMode,
3323                                  bool IsIndirectSrc) {
3324   MachineBasicBlock *MBB = MI.getParent();
3325   const DebugLoc &DL = MI.getDebugLoc();
3326   MachineBasicBlock::iterator I(&MI);
3327 
3328   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3329   const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
3330 
3331   assert(Idx->getReg() != AMDGPU::NoRegister);
3332 
3333   if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
3334     return false;
3335 
3336   if (UseGPRIdxMode) {
3337     unsigned IdxMode = IsIndirectSrc ?
3338       AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
3339     if (Offset == 0) {
3340       MachineInstr *SetOn =
3341           BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3342               .add(*Idx)
3343               .addImm(IdxMode);
3344 
3345       SetOn->getOperand(3).setIsUndef();
3346     } else {
3347       unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3348       BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
3349           .add(*Idx)
3350           .addImm(Offset);
3351       MachineInstr *SetOn =
3352         BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3353         .addReg(Tmp, RegState::Kill)
3354         .addImm(IdxMode);
3355 
3356       SetOn->getOperand(3).setIsUndef();
3357     }
3358 
3359     return true;
3360   }
3361 
3362   if (Offset == 0) {
3363     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3364       .add(*Idx);
3365   } else {
3366     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3367       .add(*Idx)
3368       .addImm(Offset);
3369   }
3370 
3371   return true;
3372 }
3373 
3374 // Control flow needs to be inserted if indexing with a VGPR.
3375 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
3376                                           MachineBasicBlock &MBB,
3377                                           const GCNSubtarget &ST) {
3378   const SIInstrInfo *TII = ST.getInstrInfo();
3379   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3380   MachineFunction *MF = MBB.getParent();
3381   MachineRegisterInfo &MRI = MF->getRegInfo();
3382 
3383   unsigned Dst = MI.getOperand(0).getReg();
3384   unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
3385   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3386 
3387   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
3388 
3389   unsigned SubReg;
3390   std::tie(SubReg, Offset)
3391     = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
3392 
3393   bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
3394 
3395   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
3396     MachineBasicBlock::iterator I(&MI);
3397     const DebugLoc &DL = MI.getDebugLoc();
3398 
3399     if (UseGPRIdxMode) {
3400       // TODO: Look at the uses to avoid the copy. This may require rescheduling
3401       // to avoid interfering with other uses, so probably requires a new
3402       // optimization pass.
3403       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3404         .addReg(SrcReg, RegState::Undef, SubReg)
3405         .addReg(SrcReg, RegState::Implicit)
3406         .addReg(AMDGPU::M0, RegState::Implicit);
3407       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3408     } else {
3409       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3410         .addReg(SrcReg, RegState::Undef, SubReg)
3411         .addReg(SrcReg, RegState::Implicit);
3412     }
3413 
3414     MI.eraseFromParent();
3415 
3416     return &MBB;
3417   }
3418 
3419   const DebugLoc &DL = MI.getDebugLoc();
3420   MachineBasicBlock::iterator I(&MI);
3421 
3422   unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3423   unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3424 
3425   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3426 
3427   auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3428                               Offset, UseGPRIdxMode, true);
3429   MachineBasicBlock *LoopBB = InsPt->getParent();
3430 
3431   if (UseGPRIdxMode) {
3432     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3433       .addReg(SrcReg, RegState::Undef, SubReg)
3434       .addReg(SrcReg, RegState::Implicit)
3435       .addReg(AMDGPU::M0, RegState::Implicit);
3436     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3437   } else {
3438     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3439       .addReg(SrcReg, RegState::Undef, SubReg)
3440       .addReg(SrcReg, RegState::Implicit);
3441   }
3442 
3443   MI.eraseFromParent();
3444 
3445   return LoopBB;
3446 }
3447 
3448 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
3449                                  const TargetRegisterClass *VecRC) {
3450   switch (TRI.getRegSizeInBits(*VecRC)) {
3451   case 32: // 4 bytes
3452     return AMDGPU::V_MOVRELD_B32_V1;
3453   case 64: // 8 bytes
3454     return AMDGPU::V_MOVRELD_B32_V2;
3455   case 128: // 16 bytes
3456     return AMDGPU::V_MOVRELD_B32_V4;
3457   case 256: // 32 bytes
3458     return AMDGPU::V_MOVRELD_B32_V8;
3459   case 512: // 64 bytes
3460     return AMDGPU::V_MOVRELD_B32_V16;
3461   default:
3462     llvm_unreachable("unsupported size for MOVRELD pseudos");
3463   }
3464 }
3465 
3466 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3467                                           MachineBasicBlock &MBB,
3468                                           const GCNSubtarget &ST) {
3469   const SIInstrInfo *TII = ST.getInstrInfo();
3470   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3471   MachineFunction *MF = MBB.getParent();
3472   MachineRegisterInfo &MRI = MF->getRegInfo();
3473 
3474   unsigned Dst = MI.getOperand(0).getReg();
3475   const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3476   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3477   const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3478   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3479   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3480 
3481   // This can be an immediate, but will be folded later.
3482   assert(Val->getReg());
3483 
3484   unsigned SubReg;
3485   std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3486                                                          SrcVec->getReg(),
3487                                                          Offset);
3488   bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
3489 
3490   if (Idx->getReg() == AMDGPU::NoRegister) {
3491     MachineBasicBlock::iterator I(&MI);
3492     const DebugLoc &DL = MI.getDebugLoc();
3493 
3494     assert(Offset == 0);
3495 
3496     BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
3497         .add(*SrcVec)
3498         .add(*Val)
3499         .addImm(SubReg);
3500 
3501     MI.eraseFromParent();
3502     return &MBB;
3503   }
3504 
3505   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
3506     MachineBasicBlock::iterator I(&MI);
3507     const DebugLoc &DL = MI.getDebugLoc();
3508 
3509     if (UseGPRIdxMode) {
3510       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3511           .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3512           .add(*Val)
3513           .addReg(Dst, RegState::ImplicitDefine)
3514           .addReg(SrcVec->getReg(), RegState::Implicit)
3515           .addReg(AMDGPU::M0, RegState::Implicit);
3516 
3517       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3518     } else {
3519       const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3520 
3521       BuildMI(MBB, I, DL, MovRelDesc)
3522           .addReg(Dst, RegState::Define)
3523           .addReg(SrcVec->getReg())
3524           .add(*Val)
3525           .addImm(SubReg - AMDGPU::sub0);
3526     }
3527 
3528     MI.eraseFromParent();
3529     return &MBB;
3530   }
3531 
3532   if (Val->isReg())
3533     MRI.clearKillFlags(Val->getReg());
3534 
3535   const DebugLoc &DL = MI.getDebugLoc();
3536 
3537   unsigned PhiReg = MRI.createVirtualRegister(VecRC);
3538 
3539   auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
3540                               Offset, UseGPRIdxMode, false);
3541   MachineBasicBlock *LoopBB = InsPt->getParent();
3542 
3543   if (UseGPRIdxMode) {
3544     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3545         .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3546         .add(*Val)                               // src0
3547         .addReg(Dst, RegState::ImplicitDefine)
3548         .addReg(PhiReg, RegState::Implicit)
3549         .addReg(AMDGPU::M0, RegState::Implicit);
3550     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3551   } else {
3552     const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3553 
3554     BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3555         .addReg(Dst, RegState::Define)
3556         .addReg(PhiReg)
3557         .add(*Val)
3558         .addImm(SubReg - AMDGPU::sub0);
3559   }
3560 
3561   MI.eraseFromParent();
3562 
3563   return LoopBB;
3564 }
3565 
3566 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3567   MachineInstr &MI, MachineBasicBlock *BB) const {
3568 
3569   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3570   MachineFunction *MF = BB->getParent();
3571   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3572 
3573   if (TII->isMIMG(MI)) {
3574     if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3575       report_fatal_error("missing mem operand from MIMG instruction");
3576     }
3577     // Add a memoperand for mimg instructions so that they aren't assumed to
3578     // be ordered memory instuctions.
3579 
3580     return BB;
3581   }
3582 
3583   switch (MI.getOpcode()) {
3584   case AMDGPU::S_ADD_U64_PSEUDO:
3585   case AMDGPU::S_SUB_U64_PSEUDO: {
3586     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3587     const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3588     const SIRegisterInfo *TRI = ST.getRegisterInfo();
3589     const TargetRegisterClass *BoolRC = TRI->getBoolRC();
3590     const DebugLoc &DL = MI.getDebugLoc();
3591 
3592     MachineOperand &Dest = MI.getOperand(0);
3593     MachineOperand &Src0 = MI.getOperand(1);
3594     MachineOperand &Src1 = MI.getOperand(2);
3595 
3596     unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3597     unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3598 
3599     MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3600      Src0, BoolRC, AMDGPU::sub0,
3601      &AMDGPU::SReg_32_XM0RegClass);
3602     MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3603       Src0, BoolRC, AMDGPU::sub1,
3604       &AMDGPU::SReg_32_XM0RegClass);
3605 
3606     MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3607       Src1, BoolRC, AMDGPU::sub0,
3608       &AMDGPU::SReg_32_XM0RegClass);
3609     MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3610       Src1, BoolRC, AMDGPU::sub1,
3611       &AMDGPU::SReg_32_XM0RegClass);
3612 
3613     bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3614 
3615     unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3616     unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3617     BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3618       .add(Src0Sub0)
3619       .add(Src1Sub0);
3620     BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3621       .add(Src0Sub1)
3622       .add(Src1Sub1);
3623     BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3624       .addReg(DestSub0)
3625       .addImm(AMDGPU::sub0)
3626       .addReg(DestSub1)
3627       .addImm(AMDGPU::sub1);
3628     MI.eraseFromParent();
3629     return BB;
3630   }
3631   case AMDGPU::SI_INIT_M0: {
3632     BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
3633             TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3634         .add(MI.getOperand(0));
3635     MI.eraseFromParent();
3636     return BB;
3637   }
3638   case AMDGPU::SI_INIT_EXEC:
3639     // This should be before all vector instructions.
3640     BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3641             AMDGPU::EXEC)
3642         .addImm(MI.getOperand(0).getImm());
3643     MI.eraseFromParent();
3644     return BB;
3645 
3646   case AMDGPU::SI_INIT_EXEC_LO:
3647     // This should be before all vector instructions.
3648     BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B32),
3649             AMDGPU::EXEC_LO)
3650         .addImm(MI.getOperand(0).getImm());
3651     MI.eraseFromParent();
3652     return BB;
3653 
3654   case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3655     // Extract the thread count from an SGPR input and set EXEC accordingly.
3656     // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3657     //
3658     // S_BFE_U32 count, input, {shift, 7}
3659     // S_BFM_B64 exec, count, 0
3660     // S_CMP_EQ_U32 count, 64
3661     // S_CMOV_B64 exec, -1
3662     MachineInstr *FirstMI = &*BB->begin();
3663     MachineRegisterInfo &MRI = MF->getRegInfo();
3664     unsigned InputReg = MI.getOperand(0).getReg();
3665     unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3666     bool Found = false;
3667 
3668     // Move the COPY of the input reg to the beginning, so that we can use it.
3669     for (auto I = BB->begin(); I != &MI; I++) {
3670       if (I->getOpcode() != TargetOpcode::COPY ||
3671           I->getOperand(0).getReg() != InputReg)
3672         continue;
3673 
3674       if (I == FirstMI) {
3675         FirstMI = &*++BB->begin();
3676       } else {
3677         I->removeFromParent();
3678         BB->insert(FirstMI, &*I);
3679       }
3680       Found = true;
3681       break;
3682     }
3683     assert(Found);
3684     (void)Found;
3685 
3686     // This should be before all vector instructions.
3687     unsigned Mask = (getSubtarget()->getWavefrontSize() << 1) - 1;
3688     bool isWave32 = getSubtarget()->isWave32();
3689     unsigned Exec = isWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3690     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3691         .addReg(InputReg)
3692         .addImm((MI.getOperand(1).getImm() & Mask) | 0x70000);
3693     BuildMI(*BB, FirstMI, DebugLoc(),
3694             TII->get(isWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64),
3695             Exec)
3696         .addReg(CountReg)
3697         .addImm(0);
3698     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3699         .addReg(CountReg, RegState::Kill)
3700         .addImm(getSubtarget()->getWavefrontSize());
3701     BuildMI(*BB, FirstMI, DebugLoc(),
3702             TII->get(isWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64),
3703             Exec)
3704         .addImm(-1);
3705     MI.eraseFromParent();
3706     return BB;
3707   }
3708 
3709   case AMDGPU::GET_GROUPSTATICSIZE: {
3710     assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||
3711            getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL);
3712     DebugLoc DL = MI.getDebugLoc();
3713     BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
3714         .add(MI.getOperand(0))
3715         .addImm(MFI->getLDSSize());
3716     MI.eraseFromParent();
3717     return BB;
3718   }
3719   case AMDGPU::SI_INDIRECT_SRC_V1:
3720   case AMDGPU::SI_INDIRECT_SRC_V2:
3721   case AMDGPU::SI_INDIRECT_SRC_V4:
3722   case AMDGPU::SI_INDIRECT_SRC_V8:
3723   case AMDGPU::SI_INDIRECT_SRC_V16:
3724     return emitIndirectSrc(MI, *BB, *getSubtarget());
3725   case AMDGPU::SI_INDIRECT_DST_V1:
3726   case AMDGPU::SI_INDIRECT_DST_V2:
3727   case AMDGPU::SI_INDIRECT_DST_V4:
3728   case AMDGPU::SI_INDIRECT_DST_V8:
3729   case AMDGPU::SI_INDIRECT_DST_V16:
3730     return emitIndirectDst(MI, *BB, *getSubtarget());
3731   case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3732   case AMDGPU::SI_KILL_I1_PSEUDO:
3733     return splitKillBlock(MI, BB);
3734   case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3735     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3736     const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3737     const SIRegisterInfo *TRI = ST.getRegisterInfo();
3738 
3739     unsigned Dst = MI.getOperand(0).getReg();
3740     unsigned Src0 = MI.getOperand(1).getReg();
3741     unsigned Src1 = MI.getOperand(2).getReg();
3742     const DebugLoc &DL = MI.getDebugLoc();
3743     unsigned SrcCond = MI.getOperand(3).getReg();
3744 
3745     unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3746     unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3747     const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
3748     unsigned SrcCondCopy = MRI.createVirtualRegister(CondRC);
3749 
3750     BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3751       .addReg(SrcCond);
3752     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3753       .addImm(0)
3754       .addReg(Src0, 0, AMDGPU::sub0)
3755       .addImm(0)
3756       .addReg(Src1, 0, AMDGPU::sub0)
3757       .addReg(SrcCondCopy);
3758     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3759       .addImm(0)
3760       .addReg(Src0, 0, AMDGPU::sub1)
3761       .addImm(0)
3762       .addReg(Src1, 0, AMDGPU::sub1)
3763       .addReg(SrcCondCopy);
3764 
3765     BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3766       .addReg(DstLo)
3767       .addImm(AMDGPU::sub0)
3768       .addReg(DstHi)
3769       .addImm(AMDGPU::sub1);
3770     MI.eraseFromParent();
3771     return BB;
3772   }
3773   case AMDGPU::SI_BR_UNDEF: {
3774     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3775     const DebugLoc &DL = MI.getDebugLoc();
3776     MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3777                            .add(MI.getOperand(0));
3778     Br->getOperand(1).setIsUndef(true); // read undef SCC
3779     MI.eraseFromParent();
3780     return BB;
3781   }
3782   case AMDGPU::ADJCALLSTACKUP:
3783   case AMDGPU::ADJCALLSTACKDOWN: {
3784     const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3785     MachineInstrBuilder MIB(*MF, &MI);
3786 
3787     // Add an implicit use of the frame offset reg to prevent the restore copy
3788     // inserted after the call from being reorderd after stack operations in the
3789     // the caller's frame.
3790     MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
3791         .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3792         .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
3793     return BB;
3794   }
3795   case AMDGPU::SI_CALL_ISEL: {
3796     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3797     const DebugLoc &DL = MI.getDebugLoc();
3798 
3799     unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
3800 
3801     MachineInstrBuilder MIB;
3802     MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
3803 
3804     for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
3805       MIB.add(MI.getOperand(I));
3806 
3807     MIB.cloneMemRefs(MI);
3808     MI.eraseFromParent();
3809     return BB;
3810   }
3811   case AMDGPU::V_ADD_I32_e32:
3812   case AMDGPU::V_SUB_I32_e32:
3813   case AMDGPU::V_SUBREV_I32_e32: {
3814     // TODO: Define distinct V_*_I32_Pseudo instructions instead.
3815     const DebugLoc &DL = MI.getDebugLoc();
3816     unsigned Opc = MI.getOpcode();
3817 
3818     bool NeedClampOperand = false;
3819     if (TII->pseudoToMCOpcode(Opc) == -1) {
3820       Opc = AMDGPU::getVOPe64(Opc);
3821       NeedClampOperand = true;
3822     }
3823 
3824     auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg());
3825     if (TII->isVOP3(*I)) {
3826       const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3827       const SIRegisterInfo *TRI = ST.getRegisterInfo();
3828       I.addReg(TRI->getVCC(), RegState::Define);
3829     }
3830     I.add(MI.getOperand(1))
3831      .add(MI.getOperand(2));
3832     if (NeedClampOperand)
3833       I.addImm(0); // clamp bit for e64 encoding
3834 
3835     TII->legalizeOperands(*I);
3836 
3837     MI.eraseFromParent();
3838     return BB;
3839   }
3840   case AMDGPU::DS_GWS_INIT:
3841   case AMDGPU::DS_GWS_SEMA_V:
3842   case AMDGPU::DS_GWS_SEMA_BR:
3843   case AMDGPU::DS_GWS_SEMA_P:
3844   case AMDGPU::DS_GWS_SEMA_RELEASE_ALL:
3845   case AMDGPU::DS_GWS_BARRIER:
3846     // A s_waitcnt 0 is required to be the instruction immediately following.
3847     if (getSubtarget()->hasGWSAutoReplay()) {
3848       bundleInstWithWaitcnt(MI);
3849       return BB;
3850     }
3851 
3852     return emitGWSMemViolTestLoop(MI, BB);
3853   default:
3854     return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
3855   }
3856 }
3857 
3858 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3859   return isTypeLegal(VT.getScalarType());
3860 }
3861 
3862 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3863   // This currently forces unfolding various combinations of fsub into fma with
3864   // free fneg'd operands. As long as we have fast FMA (controlled by
3865   // isFMAFasterThanFMulAndFAdd), we should perform these.
3866 
3867   // When fma is quarter rate, for f64 where add / sub are at best half rate,
3868   // most of these combines appear to be cycle neutral but save on instruction
3869   // count / code size.
3870   return true;
3871 }
3872 
3873 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3874                                          EVT VT) const {
3875   if (!VT.isVector()) {
3876     return MVT::i1;
3877   }
3878   return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
3879 }
3880 
3881 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3882   // TODO: Should i16 be used always if legal? For now it would force VALU
3883   // shifts.
3884   return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
3885 }
3886 
3887 // Answering this is somewhat tricky and depends on the specific device which
3888 // have different rates for fma or all f64 operations.
3889 //
3890 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3891 // regardless of which device (although the number of cycles differs between
3892 // devices), so it is always profitable for f64.
3893 //
3894 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3895 // only on full rate devices. Normally, we should prefer selecting v_mad_f32
3896 // which we can always do even without fused FP ops since it returns the same
3897 // result as the separate operations and since it is always full
3898 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3899 // however does not support denormals, so we do report fma as faster if we have
3900 // a fast fma device and require denormals.
3901 //
3902 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3903   VT = VT.getScalarType();
3904 
3905   switch (VT.getSimpleVT().SimpleTy) {
3906   case MVT::f32: {
3907     // This is as fast on some subtargets. However, we always have full rate f32
3908     // mad available which returns the same result as the separate operations
3909     // which we should prefer over fma. We can't use this if we want to support
3910     // denormals, so only report this in these cases.
3911     if (Subtarget->hasFP32Denormals())
3912       return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3913 
3914     // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3915     return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3916   }
3917   case MVT::f64:
3918     return true;
3919   case MVT::f16:
3920     return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
3921   default:
3922     break;
3923   }
3924 
3925   return false;
3926 }
3927 
3928 //===----------------------------------------------------------------------===//
3929 // Custom DAG Lowering Operations
3930 //===----------------------------------------------------------------------===//
3931 
3932 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3933 // wider vector type is legal.
3934 SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
3935                                              SelectionDAG &DAG) const {
3936   unsigned Opc = Op.getOpcode();
3937   EVT VT = Op.getValueType();
3938   assert(VT == MVT::v4f16);
3939 
3940   SDValue Lo, Hi;
3941   std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3942 
3943   SDLoc SL(Op);
3944   SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3945                              Op->getFlags());
3946   SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3947                              Op->getFlags());
3948 
3949   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3950 }
3951 
3952 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3953 // wider vector type is legal.
3954 SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
3955                                               SelectionDAG &DAG) const {
3956   unsigned Opc = Op.getOpcode();
3957   EVT VT = Op.getValueType();
3958   assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3959 
3960   SDValue Lo0, Hi0;
3961   std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3962   SDValue Lo1, Hi1;
3963   std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3964 
3965   SDLoc SL(Op);
3966 
3967   SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3968                              Op->getFlags());
3969   SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3970                              Op->getFlags());
3971 
3972   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3973 }
3974 
3975 SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op,
3976                                               SelectionDAG &DAG) const {
3977   unsigned Opc = Op.getOpcode();
3978   EVT VT = Op.getValueType();
3979   assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3980 
3981   SDValue Lo0, Hi0;
3982   std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3983   SDValue Lo1, Hi1;
3984   std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3985   SDValue Lo2, Hi2;
3986   std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2);
3987 
3988   SDLoc SL(Op);
3989 
3990   SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, Lo2,
3991                              Op->getFlags());
3992   SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, Hi2,
3993                              Op->getFlags());
3994 
3995   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3996 }
3997 
3998 
3999 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
4000   switch (Op.getOpcode()) {
4001   default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
4002   case ISD::BRCOND: return LowerBRCOND(Op, DAG);
4003   case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
4004   case ISD::LOAD: {
4005     SDValue Result = LowerLOAD(Op, DAG);
4006     assert((!Result.getNode() ||
4007             Result.getNode()->getNumValues() == 2) &&
4008            "Load should return a value and a chain");
4009     return Result;
4010   }
4011 
4012   case ISD::FSIN:
4013   case ISD::FCOS:
4014     return LowerTrig(Op, DAG);
4015   case ISD::SELECT: return LowerSELECT(Op, DAG);
4016   case ISD::FDIV: return LowerFDIV(Op, DAG);
4017   case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
4018   case ISD::STORE: return LowerSTORE(Op, DAG);
4019   case ISD::GlobalAddress: {
4020     MachineFunction &MF = DAG.getMachineFunction();
4021     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
4022     return LowerGlobalAddress(MFI, Op, DAG);
4023   }
4024   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
4025   case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
4026   case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
4027   case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
4028   case ISD::INSERT_SUBVECTOR:
4029     return lowerINSERT_SUBVECTOR(Op, DAG);
4030   case ISD::INSERT_VECTOR_ELT:
4031     return lowerINSERT_VECTOR_ELT(Op, DAG);
4032   case ISD::EXTRACT_VECTOR_ELT:
4033     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
4034   case ISD::VECTOR_SHUFFLE:
4035     return lowerVECTOR_SHUFFLE(Op, DAG);
4036   case ISD::BUILD_VECTOR:
4037     return lowerBUILD_VECTOR(Op, DAG);
4038   case ISD::FP_ROUND:
4039     return lowerFP_ROUND(Op, DAG);
4040   case ISD::TRAP:
4041     return lowerTRAP(Op, DAG);
4042   case ISD::DEBUGTRAP:
4043     return lowerDEBUGTRAP(Op, DAG);
4044   case ISD::FABS:
4045   case ISD::FNEG:
4046   case ISD::FCANONICALIZE:
4047     return splitUnaryVectorOp(Op, DAG);
4048   case ISD::FMINNUM:
4049   case ISD::FMAXNUM:
4050     return lowerFMINNUM_FMAXNUM(Op, DAG);
4051   case ISD::FMA:
4052     return splitTernaryVectorOp(Op, DAG);
4053   case ISD::SHL:
4054   case ISD::SRA:
4055   case ISD::SRL:
4056   case ISD::ADD:
4057   case ISD::SUB:
4058   case ISD::MUL:
4059   case ISD::SMIN:
4060   case ISD::SMAX:
4061   case ISD::UMIN:
4062   case ISD::UMAX:
4063   case ISD::FADD:
4064   case ISD::FMUL:
4065   case ISD::FMINNUM_IEEE:
4066   case ISD::FMAXNUM_IEEE:
4067     return splitBinaryVectorOp(Op, DAG);
4068   }
4069   return SDValue();
4070 }
4071 
4072 static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
4073                                        const SDLoc &DL,
4074                                        SelectionDAG &DAG, bool Unpacked) {
4075   if (!LoadVT.isVector())
4076     return Result;
4077 
4078   if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
4079     // Truncate to v2i16/v4i16.
4080     EVT IntLoadVT = LoadVT.changeTypeToInteger();
4081 
4082     // Workaround legalizer not scalarizing truncate after vector op
4083     // legalization byt not creating intermediate vector trunc.
4084     SmallVector<SDValue, 4> Elts;
4085     DAG.ExtractVectorElements(Result, Elts);
4086     for (SDValue &Elt : Elts)
4087       Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
4088 
4089     Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
4090 
4091     // Bitcast to original type (v2f16/v4f16).
4092     return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
4093   }
4094 
4095   // Cast back to the original packed type.
4096   return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
4097 }
4098 
4099 SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
4100                                               MemSDNode *M,
4101                                               SelectionDAG &DAG,
4102                                               ArrayRef<SDValue> Ops,
4103                                               bool IsIntrinsic) const {
4104   SDLoc DL(M);
4105 
4106   bool Unpacked = Subtarget->hasUnpackedD16VMem();
4107   EVT LoadVT = M->getValueType(0);
4108 
4109   EVT EquivLoadVT = LoadVT;
4110   if (Unpacked && LoadVT.isVector()) {
4111     EquivLoadVT = LoadVT.isVector() ?
4112       EVT::getVectorVT(*DAG.getContext(), MVT::i32,
4113                        LoadVT.getVectorNumElements()) : LoadVT;
4114   }
4115 
4116   // Change from v4f16/v2f16 to EquivLoadVT.
4117   SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
4118 
4119   SDValue Load
4120     = DAG.getMemIntrinsicNode(
4121       IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
4122       VTList, Ops, M->getMemoryVT(),
4123       M->getMemOperand());
4124   if (!Unpacked) // Just adjusted the opcode.
4125     return Load;
4126 
4127   SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
4128 
4129   return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
4130 }
4131 
4132 static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI,
4133                                   SDNode *N, SelectionDAG &DAG) {
4134   EVT VT = N->getValueType(0);
4135   const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
4136   int CondCode = CD->getSExtValue();
4137   if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
4138       CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
4139     return DAG.getUNDEF(VT);
4140 
4141   ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
4142 
4143   SDValue LHS = N->getOperand(1);
4144   SDValue RHS = N->getOperand(2);
4145 
4146   SDLoc DL(N);
4147 
4148   EVT CmpVT = LHS.getValueType();
4149   if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
4150     unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
4151       ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4152     LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
4153     RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
4154   }
4155 
4156   ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
4157 
4158   unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
4159   EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
4160 
4161   SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS,
4162                               DAG.getCondCode(CCOpcode));
4163   if (VT.bitsEq(CCVT))
4164     return SetCC;
4165   return DAG.getZExtOrTrunc(SetCC, DL, VT);
4166 }
4167 
4168 static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
4169                                   SDNode *N, SelectionDAG &DAG) {
4170   EVT VT = N->getValueType(0);
4171   const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
4172 
4173   int CondCode = CD->getSExtValue();
4174   if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
4175       CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) {
4176     return DAG.getUNDEF(VT);
4177   }
4178 
4179   SDValue Src0 = N->getOperand(1);
4180   SDValue Src1 = N->getOperand(2);
4181   EVT CmpVT = Src0.getValueType();
4182   SDLoc SL(N);
4183 
4184   if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
4185     Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
4186     Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
4187   }
4188 
4189   FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
4190   ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
4191   unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
4192   EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
4193   SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0,
4194                               Src1, DAG.getCondCode(CCOpcode));
4195   if (VT.bitsEq(CCVT))
4196     return SetCC;
4197   return DAG.getZExtOrTrunc(SetCC, SL, VT);
4198 }
4199 
4200 void SITargetLowering::ReplaceNodeResults(SDNode *N,
4201                                           SmallVectorImpl<SDValue> &Results,
4202                                           SelectionDAG &DAG) const {
4203   switch (N->getOpcode()) {
4204   case ISD::INSERT_VECTOR_ELT: {
4205     if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
4206       Results.push_back(Res);
4207     return;
4208   }
4209   case ISD::EXTRACT_VECTOR_ELT: {
4210     if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
4211       Results.push_back(Res);
4212     return;
4213   }
4214   case ISD::INTRINSIC_WO_CHAIN: {
4215     unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
4216     switch (IID) {
4217     case Intrinsic::amdgcn_cvt_pkrtz: {
4218       SDValue Src0 = N->getOperand(1);
4219       SDValue Src1 = N->getOperand(2);
4220       SDLoc SL(N);
4221       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
4222                                 Src0, Src1);
4223       Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
4224       return;
4225     }
4226     case Intrinsic::amdgcn_cvt_pknorm_i16:
4227     case Intrinsic::amdgcn_cvt_pknorm_u16:
4228     case Intrinsic::amdgcn_cvt_pk_i16:
4229     case Intrinsic::amdgcn_cvt_pk_u16: {
4230       SDValue Src0 = N->getOperand(1);
4231       SDValue Src1 = N->getOperand(2);
4232       SDLoc SL(N);
4233       unsigned Opcode;
4234 
4235       if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
4236         Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
4237       else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
4238         Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
4239       else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
4240         Opcode = AMDGPUISD::CVT_PK_I16_I32;
4241       else
4242         Opcode = AMDGPUISD::CVT_PK_U16_U32;
4243 
4244       EVT VT = N->getValueType(0);
4245       if (isTypeLegal(VT))
4246         Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
4247       else {
4248         SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
4249         Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
4250       }
4251       return;
4252     }
4253     }
4254     break;
4255   }
4256   case ISD::INTRINSIC_W_CHAIN: {
4257     if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
4258       Results.push_back(Res);
4259       Results.push_back(Res.getValue(1));
4260       return;
4261     }
4262 
4263     break;
4264   }
4265   case ISD::SELECT: {
4266     SDLoc SL(N);
4267     EVT VT = N->getValueType(0);
4268     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
4269     SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
4270     SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
4271 
4272     EVT SelectVT = NewVT;
4273     if (NewVT.bitsLT(MVT::i32)) {
4274       LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
4275       RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
4276       SelectVT = MVT::i32;
4277     }
4278 
4279     SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
4280                                     N->getOperand(0), LHS, RHS);
4281 
4282     if (NewVT != SelectVT)
4283       NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
4284     Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
4285     return;
4286   }
4287   case ISD::FNEG: {
4288     if (N->getValueType(0) != MVT::v2f16)
4289       break;
4290 
4291     SDLoc SL(N);
4292     SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4293 
4294     SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
4295                              BC,
4296                              DAG.getConstant(0x80008000, SL, MVT::i32));
4297     Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4298     return;
4299   }
4300   case ISD::FABS: {
4301     if (N->getValueType(0) != MVT::v2f16)
4302       break;
4303 
4304     SDLoc SL(N);
4305     SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4306 
4307     SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
4308                              BC,
4309                              DAG.getConstant(0x7fff7fff, SL, MVT::i32));
4310     Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4311     return;
4312   }
4313   default:
4314     break;
4315   }
4316 }
4317 
4318 /// Helper function for LowerBRCOND
4319 static SDNode *findUser(SDValue Value, unsigned Opcode) {
4320 
4321   SDNode *Parent = Value.getNode();
4322   for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
4323        I != E; ++I) {
4324 
4325     if (I.getUse().get() != Value)
4326       continue;
4327 
4328     if (I->getOpcode() == Opcode)
4329       return *I;
4330   }
4331   return nullptr;
4332 }
4333 
4334 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
4335   if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
4336     switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
4337     case Intrinsic::amdgcn_if:
4338       return AMDGPUISD::IF;
4339     case Intrinsic::amdgcn_else:
4340       return AMDGPUISD::ELSE;
4341     case Intrinsic::amdgcn_loop:
4342       return AMDGPUISD::LOOP;
4343     case Intrinsic::amdgcn_end_cf:
4344       llvm_unreachable("should not occur");
4345     default:
4346       return 0;
4347     }
4348   }
4349 
4350   // break, if_break, else_break are all only used as inputs to loop, not
4351   // directly as branch conditions.
4352   return 0;
4353 }
4354 
4355 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
4356   const Triple &TT = getTargetMachine().getTargetTriple();
4357   return (GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4358           GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
4359          AMDGPU::shouldEmitConstantsToTextSection(TT);
4360 }
4361 
4362 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
4363   // FIXME: Either avoid relying on address space here or change the default
4364   // address space for functions to avoid the explicit check.
4365   return (GV->getValueType()->isFunctionTy() ||
4366           GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
4367           GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4368           GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
4369          !shouldEmitFixup(GV) &&
4370          !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
4371 }
4372 
4373 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
4374   return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
4375 }
4376 
4377 /// This transforms the control flow intrinsics to get the branch destination as
4378 /// last parameter, also switches branch target with BR if the need arise
4379 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
4380                                       SelectionDAG &DAG) const {
4381   SDLoc DL(BRCOND);
4382 
4383   SDNode *Intr = BRCOND.getOperand(1).getNode();
4384   SDValue Target = BRCOND.getOperand(2);
4385   SDNode *BR = nullptr;
4386   SDNode *SetCC = nullptr;
4387 
4388   if (Intr->getOpcode() == ISD::SETCC) {
4389     // As long as we negate the condition everything is fine
4390     SetCC = Intr;
4391     Intr = SetCC->getOperand(0).getNode();
4392 
4393   } else {
4394     // Get the target from BR if we don't negate the condition
4395     BR = findUser(BRCOND, ISD::BR);
4396     Target = BR->getOperand(1);
4397   }
4398 
4399   // FIXME: This changes the types of the intrinsics instead of introducing new
4400   // nodes with the correct types.
4401   // e.g. llvm.amdgcn.loop
4402 
4403   // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
4404   // =>     t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
4405 
4406   unsigned CFNode = isCFIntrinsic(Intr);
4407   if (CFNode == 0) {
4408     // This is a uniform branch so we don't need to legalize.
4409     return BRCOND;
4410   }
4411 
4412   bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
4413                    Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
4414 
4415   assert(!SetCC ||
4416         (SetCC->getConstantOperandVal(1) == 1 &&
4417          cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
4418                                                              ISD::SETNE));
4419 
4420   // operands of the new intrinsic call
4421   SmallVector<SDValue, 4> Ops;
4422   if (HaveChain)
4423     Ops.push_back(BRCOND.getOperand(0));
4424 
4425   Ops.append(Intr->op_begin() + (HaveChain ?  2 : 1), Intr->op_end());
4426   Ops.push_back(Target);
4427 
4428   ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
4429 
4430   // build the new intrinsic call
4431   SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
4432 
4433   if (!HaveChain) {
4434     SDValue Ops[] =  {
4435       SDValue(Result, 0),
4436       BRCOND.getOperand(0)
4437     };
4438 
4439     Result = DAG.getMergeValues(Ops, DL).getNode();
4440   }
4441 
4442   if (BR) {
4443     // Give the branch instruction our target
4444     SDValue Ops[] = {
4445       BR->getOperand(0),
4446       BRCOND.getOperand(2)
4447     };
4448     SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
4449     DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
4450     BR = NewBR.getNode();
4451   }
4452 
4453   SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
4454 
4455   // Copy the intrinsic results to registers
4456   for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
4457     SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
4458     if (!CopyToReg)
4459       continue;
4460 
4461     Chain = DAG.getCopyToReg(
4462       Chain, DL,
4463       CopyToReg->getOperand(1),
4464       SDValue(Result, i - 1),
4465       SDValue());
4466 
4467     DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
4468   }
4469 
4470   // Remove the old intrinsic from the chain
4471   DAG.ReplaceAllUsesOfValueWith(
4472     SDValue(Intr, Intr->getNumValues() - 1),
4473     Intr->getOperand(0));
4474 
4475   return Chain;
4476 }
4477 
4478 SDValue SITargetLowering::LowerRETURNADDR(SDValue Op,
4479                                           SelectionDAG &DAG) const {
4480   MVT VT = Op.getSimpleValueType();
4481   SDLoc DL(Op);
4482   // Checking the depth
4483   if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0)
4484     return DAG.getConstant(0, DL, VT);
4485 
4486   MachineFunction &MF = DAG.getMachineFunction();
4487   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4488   // Check for kernel and shader functions
4489   if (Info->isEntryFunction())
4490     return DAG.getConstant(0, DL, VT);
4491 
4492   MachineFrameInfo &MFI = MF.getFrameInfo();
4493   // There is a call to @llvm.returnaddress in this function
4494   MFI.setReturnAddressIsTaken(true);
4495 
4496   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
4497   // Get the return address reg and mark it as an implicit live-in
4498   unsigned Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent()));
4499 
4500   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
4501 }
4502 
4503 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
4504                                             SDValue Op,
4505                                             const SDLoc &DL,
4506                                             EVT VT) const {
4507   return Op.getValueType().bitsLE(VT) ?
4508       DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
4509       DAG.getNode(ISD::FTRUNC, DL, VT, Op);
4510 }
4511 
4512 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
4513   assert(Op.getValueType() == MVT::f16 &&
4514          "Do not know how to custom lower FP_ROUND for non-f16 type");
4515 
4516   SDValue Src = Op.getOperand(0);
4517   EVT SrcVT = Src.getValueType();
4518   if (SrcVT != MVT::f64)
4519     return Op;
4520 
4521   SDLoc DL(Op);
4522 
4523   SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
4524   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
4525   return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
4526 }
4527 
4528 SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
4529                                                SelectionDAG &DAG) const {
4530   EVT VT = Op.getValueType();
4531   const MachineFunction &MF = DAG.getMachineFunction();
4532   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4533   bool IsIEEEMode = Info->getMode().IEEE;
4534 
4535   // FIXME: Assert during eslection that this is only selected for
4536   // ieee_mode. Currently a combine can produce the ieee version for non-ieee
4537   // mode functions, but this happens to be OK since it's only done in cases
4538   // where there is known no sNaN.
4539   if (IsIEEEMode)
4540     return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
4541 
4542   if (VT == MVT::v4f16)
4543     return splitBinaryVectorOp(Op, DAG);
4544   return Op;
4545 }
4546 
4547 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
4548   SDLoc SL(Op);
4549   SDValue Chain = Op.getOperand(0);
4550 
4551   if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4552       !Subtarget->isTrapHandlerEnabled())
4553     return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
4554 
4555   MachineFunction &MF = DAG.getMachineFunction();
4556   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4557   unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4558   assert(UserSGPR != AMDGPU::NoRegister);
4559   SDValue QueuePtr = CreateLiveInRegister(
4560     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4561   SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4562   SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4563                                    QueuePtr, SDValue());
4564   SDValue Ops[] = {
4565     ToReg,
4566     DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16),
4567     SGPR01,
4568     ToReg.getValue(1)
4569   };
4570   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4571 }
4572 
4573 SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4574   SDLoc SL(Op);
4575   SDValue Chain = Op.getOperand(0);
4576   MachineFunction &MF = DAG.getMachineFunction();
4577 
4578   if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4579       !Subtarget->isTrapHandlerEnabled()) {
4580     DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
4581                                      "debugtrap handler not supported",
4582                                      Op.getDebugLoc(),
4583                                      DS_Warning);
4584     LLVMContext &Ctx = MF.getFunction().getContext();
4585     Ctx.diagnose(NoTrap);
4586     return Chain;
4587   }
4588 
4589   SDValue Ops[] = {
4590     Chain,
4591     DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16)
4592   };
4593   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4594 }
4595 
4596 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
4597                                              SelectionDAG &DAG) const {
4598   // FIXME: Use inline constants (src_{shared, private}_base) instead.
4599   if (Subtarget->hasApertureRegs()) {
4600     unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
4601         AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
4602         AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
4603     unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
4604         AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
4605         AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
4606     unsigned Encoding =
4607         AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
4608         Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4609         WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
4610 
4611     SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4612     SDValue ApertureReg = SDValue(
4613         DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4614     SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4615     return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
4616   }
4617 
4618   MachineFunction &MF = DAG.getMachineFunction();
4619   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4620   unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4621   assert(UserSGPR != AMDGPU::NoRegister);
4622 
4623   SDValue QueuePtr = CreateLiveInRegister(
4624     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4625 
4626   // Offset into amd_queue_t for group_segment_aperture_base_hi /
4627   // private_segment_aperture_base_hi.
4628   uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
4629 
4630   SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
4631 
4632   // TODO: Use custom target PseudoSourceValue.
4633   // TODO: We should use the value from the IR intrinsic call, but it might not
4634   // be available and how do we get it?
4635   Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
4636                                               AMDGPUAS::CONSTANT_ADDRESS));
4637 
4638   MachinePointerInfo PtrInfo(V, StructOffset);
4639   return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
4640                      MinAlign(64, StructOffset),
4641                      MachineMemOperand::MODereferenceable |
4642                          MachineMemOperand::MOInvariant);
4643 }
4644 
4645 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4646                                              SelectionDAG &DAG) const {
4647   SDLoc SL(Op);
4648   const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4649 
4650   SDValue Src = ASC->getOperand(0);
4651   SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4652 
4653   const AMDGPUTargetMachine &TM =
4654     static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4655 
4656   // flat -> local/private
4657   if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
4658     unsigned DestAS = ASC->getDestAddressSpace();
4659 
4660     if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
4661         DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
4662       unsigned NullVal = TM.getNullPointerValue(DestAS);
4663       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4664       SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4665       SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4666 
4667       return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4668                          NonNull, Ptr, SegmentNullPtr);
4669     }
4670   }
4671 
4672   // local/private -> flat
4673   if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
4674     unsigned SrcAS = ASC->getSrcAddressSpace();
4675 
4676     if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
4677         SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
4678       unsigned NullVal = TM.getNullPointerValue(SrcAS);
4679       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4680 
4681       SDValue NonNull
4682         = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4683 
4684       SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
4685       SDValue CvtPtr
4686         = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4687 
4688       return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4689                          DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4690                          FlatNullPtr);
4691     }
4692   }
4693 
4694   // global <-> flat are no-ops and never emitted.
4695 
4696   const MachineFunction &MF = DAG.getMachineFunction();
4697   DiagnosticInfoUnsupported InvalidAddrSpaceCast(
4698     MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
4699   DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4700 
4701   return DAG.getUNDEF(ASC->getValueType(0));
4702 }
4703 
4704 // This lowers an INSERT_SUBVECTOR by extracting the individual elements from
4705 // the small vector and inserting them into the big vector. That is better than
4706 // the default expansion of doing it via a stack slot. Even though the use of
4707 // the stack slot would be optimized away afterwards, the stack slot itself
4708 // remains.
4709 SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
4710                                                 SelectionDAG &DAG) const {
4711   SDValue Vec = Op.getOperand(0);
4712   SDValue Ins = Op.getOperand(1);
4713   SDValue Idx = Op.getOperand(2);
4714   EVT VecVT = Vec.getValueType();
4715   EVT InsVT = Ins.getValueType();
4716   EVT EltVT = VecVT.getVectorElementType();
4717   unsigned InsNumElts = InsVT.getVectorNumElements();
4718   unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
4719   SDLoc SL(Op);
4720 
4721   for (unsigned I = 0; I != InsNumElts; ++I) {
4722     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins,
4723                               DAG.getConstant(I, SL, MVT::i32));
4724     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt,
4725                       DAG.getConstant(IdxVal + I, SL, MVT::i32));
4726   }
4727   return Vec;
4728 }
4729 
4730 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4731                                                  SelectionDAG &DAG) const {
4732   SDValue Vec = Op.getOperand(0);
4733   SDValue InsVal = Op.getOperand(1);
4734   SDValue Idx = Op.getOperand(2);
4735   EVT VecVT = Vec.getValueType();
4736   EVT EltVT = VecVT.getVectorElementType();
4737   unsigned VecSize = VecVT.getSizeInBits();
4738   unsigned EltSize = EltVT.getSizeInBits();
4739 
4740 
4741   assert(VecSize <= 64);
4742 
4743   unsigned NumElts = VecVT.getVectorNumElements();
4744   SDLoc SL(Op);
4745   auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4746 
4747   if (NumElts == 4 && EltSize == 16 && KIdx) {
4748     SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4749 
4750     SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4751                                  DAG.getConstant(0, SL, MVT::i32));
4752     SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4753                                  DAG.getConstant(1, SL, MVT::i32));
4754 
4755     SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4756     SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4757 
4758     unsigned Idx = KIdx->getZExtValue();
4759     bool InsertLo = Idx < 2;
4760     SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4761       InsertLo ? LoVec : HiVec,
4762       DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4763       DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4764 
4765     InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4766 
4767     SDValue Concat = InsertLo ?
4768       DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4769       DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4770 
4771     return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4772   }
4773 
4774   if (isa<ConstantSDNode>(Idx))
4775     return SDValue();
4776 
4777   MVT IntVT = MVT::getIntegerVT(VecSize);
4778 
4779   // Avoid stack access for dynamic indexing.
4780   // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
4781 
4782   // Create a congruent vector with the target value in each element so that
4783   // the required element can be masked and ORed into the target vector.
4784   SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT,
4785                                DAG.getSplatBuildVector(VecVT, SL, InsVal));
4786 
4787   assert(isPowerOf2_32(EltSize));
4788   SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4789 
4790   // Convert vector index to bit-index.
4791   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
4792 
4793   SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4794   SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
4795                             DAG.getConstant(0xffff, SL, IntVT),
4796                             ScaledIdx);
4797 
4798   SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
4799   SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
4800                             DAG.getNOT(SL, BFM, IntVT), BCVec);
4801 
4802   SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
4803   return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
4804 }
4805 
4806 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4807                                                   SelectionDAG &DAG) const {
4808   SDLoc SL(Op);
4809 
4810   EVT ResultVT = Op.getValueType();
4811   SDValue Vec = Op.getOperand(0);
4812   SDValue Idx = Op.getOperand(1);
4813   EVT VecVT = Vec.getValueType();
4814   unsigned VecSize = VecVT.getSizeInBits();
4815   EVT EltVT = VecVT.getVectorElementType();
4816   assert(VecSize <= 64);
4817 
4818   DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4819 
4820   // Make sure we do any optimizations that will make it easier to fold
4821   // source modifiers before obscuring it with bit operations.
4822 
4823   // XXX - Why doesn't this get called when vector_shuffle is expanded?
4824   if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4825     return Combined;
4826 
4827   unsigned EltSize = EltVT.getSizeInBits();
4828   assert(isPowerOf2_32(EltSize));
4829 
4830   MVT IntVT = MVT::getIntegerVT(VecSize);
4831   SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4832 
4833   // Convert vector index to bit-index (* EltSize)
4834   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
4835 
4836   SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4837   SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
4838 
4839   if (ResultVT == MVT::f16) {
4840     SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
4841     return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4842   }
4843 
4844   return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
4845 }
4846 
4847 static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) {
4848   assert(Elt % 2 == 0);
4849   return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0);
4850 }
4851 
4852 SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
4853                                               SelectionDAG &DAG) const {
4854   SDLoc SL(Op);
4855   EVT ResultVT = Op.getValueType();
4856   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
4857 
4858   EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16;
4859   EVT EltVT = PackVT.getVectorElementType();
4860   int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements();
4861 
4862   // vector_shuffle <0,1,6,7> lhs, rhs
4863   // -> concat_vectors (extract_subvector lhs, 0), (extract_subvector rhs, 2)
4864   //
4865   // vector_shuffle <6,7,2,3> lhs, rhs
4866   // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 2)
4867   //
4868   // vector_shuffle <6,7,0,1> lhs, rhs
4869   // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 0)
4870 
4871   // Avoid scalarizing when both halves are reading from consecutive elements.
4872   SmallVector<SDValue, 4> Pieces;
4873   for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) {
4874     if (elementPairIsContiguous(SVN->getMask(), I)) {
4875       const int Idx = SVN->getMaskElt(I);
4876       int VecIdx = Idx < SrcNumElts ? 0 : 1;
4877       int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts;
4878       SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL,
4879                                     PackVT, SVN->getOperand(VecIdx),
4880                                     DAG.getConstant(EltIdx, SL, MVT::i32));
4881       Pieces.push_back(SubVec);
4882     } else {
4883       const int Idx0 = SVN->getMaskElt(I);
4884       const int Idx1 = SVN->getMaskElt(I + 1);
4885       int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1;
4886       int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1;
4887       int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts;
4888       int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts;
4889 
4890       SDValue Vec0 = SVN->getOperand(VecIdx0);
4891       SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
4892                                  Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32));
4893 
4894       SDValue Vec1 = SVN->getOperand(VecIdx1);
4895       SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
4896                                  Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32));
4897       Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 }));
4898     }
4899   }
4900 
4901   return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces);
4902 }
4903 
4904 SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
4905                                             SelectionDAG &DAG) const {
4906   SDLoc SL(Op);
4907   EVT VT = Op.getValueType();
4908 
4909   if (VT == MVT::v4i16 || VT == MVT::v4f16) {
4910     EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2);
4911 
4912     // Turn into pair of packed build_vectors.
4913     // TODO: Special case for constants that can be materialized with s_mov_b64.
4914     SDValue Lo = DAG.getBuildVector(HalfVT, SL,
4915                                     { Op.getOperand(0), Op.getOperand(1) });
4916     SDValue Hi = DAG.getBuildVector(HalfVT, SL,
4917                                     { Op.getOperand(2), Op.getOperand(3) });
4918 
4919     SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo);
4920     SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi);
4921 
4922     SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi });
4923     return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
4924   }
4925 
4926   assert(VT == MVT::v2f16 || VT == MVT::v2i16);
4927   assert(!Subtarget->hasVOP3PInsts() && "this should be legal");
4928 
4929   SDValue Lo = Op.getOperand(0);
4930   SDValue Hi = Op.getOperand(1);
4931 
4932   // Avoid adding defined bits with the zero_extend.
4933   if (Hi.isUndef()) {
4934     Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4935     SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo);
4936     return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo);
4937   }
4938 
4939   Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
4940   Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
4941 
4942   SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
4943                               DAG.getConstant(16, SL, MVT::i32));
4944   if (Lo.isUndef())
4945     return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi);
4946 
4947   Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4948   Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
4949 
4950   SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
4951   return DAG.getNode(ISD::BITCAST, SL, VT, Or);
4952 }
4953 
4954 bool
4955 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4956   // We can fold offsets for anything that doesn't require a GOT relocation.
4957   return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
4958           GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4959           GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
4960          !shouldEmitGOTReloc(GA->getGlobal());
4961 }
4962 
4963 static SDValue
4964 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
4965                         const SDLoc &DL, unsigned Offset, EVT PtrVT,
4966                         unsigned GAFlags = SIInstrInfo::MO_NONE) {
4967   // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
4968   // lowered to the following code sequence:
4969   //
4970   // For constant address space:
4971   //   s_getpc_b64 s[0:1]
4972   //   s_add_u32 s0, s0, $symbol
4973   //   s_addc_u32 s1, s1, 0
4974   //
4975   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
4976   //   a fixup or relocation is emitted to replace $symbol with a literal
4977   //   constant, which is a pc-relative offset from the encoding of the $symbol
4978   //   operand to the global variable.
4979   //
4980   // For global address space:
4981   //   s_getpc_b64 s[0:1]
4982   //   s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
4983   //   s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
4984   //
4985   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
4986   //   fixups or relocations are emitted to replace $symbol@*@lo and
4987   //   $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
4988   //   which is a 64-bit pc-relative offset from the encoding of the $symbol
4989   //   operand to the global variable.
4990   //
4991   // What we want here is an offset from the value returned by s_getpc
4992   // (which is the address of the s_add_u32 instruction) to the global
4993   // variable, but since the encoding of $symbol starts 4 bytes after the start
4994   // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
4995   // small. This requires us to add 4 to the global variable offset in order to
4996   // compute the correct address.
4997   unsigned LoFlags = GAFlags;
4998   if (LoFlags == SIInstrInfo::MO_NONE)
4999     LoFlags = SIInstrInfo::MO_REL32;
5000   SDValue PtrLo =
5001       DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, LoFlags);
5002   SDValue PtrHi;
5003   if (GAFlags == SIInstrInfo::MO_NONE) {
5004     PtrHi = DAG.getTargetConstant(0, DL, MVT::i32);
5005   } else {
5006     PtrHi =
5007         DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags + 1);
5008   }
5009   return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
5010 }
5011 
5012 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
5013                                              SDValue Op,
5014                                              SelectionDAG &DAG) const {
5015   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
5016   const GlobalValue *GV = GSD->getGlobal();
5017   if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
5018        (!GV->hasExternalLinkage() ||
5019         getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||
5020         getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL)) ||
5021       GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS ||
5022       GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS)
5023     return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
5024 
5025   SDLoc DL(GSD);
5026   EVT PtrVT = Op.getValueType();
5027 
5028   if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
5029     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(),
5030                                             SIInstrInfo::MO_ABS32_LO);
5031     return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA);
5032   }
5033 
5034   if (shouldEmitFixup(GV))
5035     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
5036   else if (shouldEmitPCReloc(GV))
5037     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
5038                                    SIInstrInfo::MO_REL32);
5039 
5040   SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
5041                                             SIInstrInfo::MO_GOTPCREL32);
5042 
5043   Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
5044   PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
5045   const DataLayout &DataLayout = DAG.getDataLayout();
5046   unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
5047   MachinePointerInfo PtrInfo
5048     = MachinePointerInfo::getGOT(DAG.getMachineFunction());
5049 
5050   return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
5051                      MachineMemOperand::MODereferenceable |
5052                          MachineMemOperand::MOInvariant);
5053 }
5054 
5055 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
5056                                    const SDLoc &DL, SDValue V) const {
5057   // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
5058   // the destination register.
5059   //
5060   // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
5061   // so we will end up with redundant moves to m0.
5062   //
5063   // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
5064 
5065   // A Null SDValue creates a glue result.
5066   SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
5067                                   V, Chain);
5068   return SDValue(M0, 0);
5069 }
5070 
5071 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
5072                                                  SDValue Op,
5073                                                  MVT VT,
5074                                                  unsigned Offset) const {
5075   SDLoc SL(Op);
5076   SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
5077                                            DAG.getEntryNode(), Offset, 4, false);
5078   // The local size values will have the hi 16-bits as zero.
5079   return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
5080                      DAG.getValueType(VT));
5081 }
5082 
5083 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
5084                                         EVT VT) {
5085   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
5086                                       "non-hsa intrinsic with hsa target",
5087                                       DL.getDebugLoc());
5088   DAG.getContext()->diagnose(BadIntrin);
5089   return DAG.getUNDEF(VT);
5090 }
5091 
5092 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
5093                                          EVT VT) {
5094   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
5095                                       "intrinsic not supported on subtarget",
5096                                       DL.getDebugLoc());
5097   DAG.getContext()->diagnose(BadIntrin);
5098   return DAG.getUNDEF(VT);
5099 }
5100 
5101 static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
5102                                     ArrayRef<SDValue> Elts) {
5103   assert(!Elts.empty());
5104   MVT Type;
5105   unsigned NumElts;
5106 
5107   if (Elts.size() == 1) {
5108     Type = MVT::f32;
5109     NumElts = 1;
5110   } else if (Elts.size() == 2) {
5111     Type = MVT::v2f32;
5112     NumElts = 2;
5113   } else if (Elts.size() <= 4) {
5114     Type = MVT::v4f32;
5115     NumElts = 4;
5116   } else if (Elts.size() <= 8) {
5117     Type = MVT::v8f32;
5118     NumElts = 8;
5119   } else {
5120     assert(Elts.size() <= 16);
5121     Type = MVT::v16f32;
5122     NumElts = 16;
5123   }
5124 
5125   SmallVector<SDValue, 16> VecElts(NumElts);
5126   for (unsigned i = 0; i < Elts.size(); ++i) {
5127     SDValue Elt = Elts[i];
5128     if (Elt.getValueType() != MVT::f32)
5129       Elt = DAG.getBitcast(MVT::f32, Elt);
5130     VecElts[i] = Elt;
5131   }
5132   for (unsigned i = Elts.size(); i < NumElts; ++i)
5133     VecElts[i] = DAG.getUNDEF(MVT::f32);
5134 
5135   if (NumElts == 1)
5136     return VecElts[0];
5137   return DAG.getBuildVector(Type, DL, VecElts);
5138 }
5139 
5140 static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG,
5141                              SDValue *GLC, SDValue *SLC, SDValue *DLC) {
5142   auto CachePolicyConst = cast<ConstantSDNode>(CachePolicy.getNode());
5143 
5144   uint64_t Value = CachePolicyConst->getZExtValue();
5145   SDLoc DL(CachePolicy);
5146   if (GLC) {
5147     *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
5148     Value &= ~(uint64_t)0x1;
5149   }
5150   if (SLC) {
5151     *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
5152     Value &= ~(uint64_t)0x2;
5153   }
5154   if (DLC) {
5155     *DLC = DAG.getTargetConstant((Value & 0x4) ? 1 : 0, DL, MVT::i32);
5156     Value &= ~(uint64_t)0x4;
5157   }
5158 
5159   return Value == 0;
5160 }
5161 
5162 // Re-construct the required return value for a image load intrinsic.
5163 // This is more complicated due to the optional use TexFailCtrl which means the required
5164 // return type is an aggregate
5165 static SDValue constructRetValue(SelectionDAG &DAG,
5166                                  MachineSDNode *Result,
5167                                  ArrayRef<EVT> ResultTypes,
5168                                  bool IsTexFail, bool Unpacked, bool IsD16,
5169                                  int DMaskPop, int NumVDataDwords,
5170                                  const SDLoc &DL, LLVMContext &Context) {
5171   // Determine the required return type. This is the same regardless of IsTexFail flag
5172   EVT ReqRetVT = ResultTypes[0];
5173   EVT ReqRetEltVT = ReqRetVT.isVector() ? ReqRetVT.getVectorElementType() : ReqRetVT;
5174   int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1;
5175   EVT AdjEltVT = Unpacked && IsD16 ? MVT::i32 : ReqRetEltVT;
5176   EVT AdjVT = Unpacked ? ReqRetNumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, ReqRetNumElts)
5177                                            : AdjEltVT
5178                        : ReqRetVT;
5179 
5180   // Extract data part of the result
5181   // Bitcast the result to the same type as the required return type
5182   int NumElts;
5183   if (IsD16 && !Unpacked)
5184     NumElts = NumVDataDwords << 1;
5185   else
5186     NumElts = NumVDataDwords;
5187 
5188   EVT CastVT = NumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, NumElts)
5189                            : AdjEltVT;
5190 
5191   // Special case for v6f16. Rather than add support for this, use v3i32 to
5192   // extract the data elements
5193   bool V6F16Special = false;
5194   if (NumElts == 6) {
5195     CastVT = EVT::getVectorVT(Context, MVT::i32, NumElts / 2);
5196     DMaskPop >>= 1;
5197     ReqRetNumElts >>= 1;
5198     V6F16Special = true;
5199     AdjVT = MVT::v2i32;
5200   }
5201 
5202   SDValue N = SDValue(Result, 0);
5203   SDValue CastRes = DAG.getNode(ISD::BITCAST, DL, CastVT, N);
5204 
5205   // Iterate over the result
5206   SmallVector<SDValue, 4> BVElts;
5207 
5208   if (CastVT.isVector()) {
5209     DAG.ExtractVectorElements(CastRes, BVElts, 0, DMaskPop);
5210   } else {
5211     BVElts.push_back(CastRes);
5212   }
5213   int ExtraElts = ReqRetNumElts - DMaskPop;
5214   while(ExtraElts--)
5215     BVElts.push_back(DAG.getUNDEF(AdjEltVT));
5216 
5217   SDValue PreTFCRes;
5218   if (ReqRetNumElts > 1) {
5219     SDValue NewVec = DAG.getBuildVector(AdjVT, DL, BVElts);
5220     if (IsD16 && Unpacked)
5221       PreTFCRes = adjustLoadValueTypeImpl(NewVec, ReqRetVT, DL, DAG, Unpacked);
5222     else
5223       PreTFCRes = NewVec;
5224   } else {
5225     PreTFCRes = BVElts[0];
5226   }
5227 
5228   if (V6F16Special)
5229     PreTFCRes = DAG.getNode(ISD::BITCAST, DL, MVT::v4f16, PreTFCRes);
5230 
5231   if (!IsTexFail) {
5232     if (Result->getNumValues() > 1)
5233       return DAG.getMergeValues({PreTFCRes, SDValue(Result, 1)}, DL);
5234     else
5235       return PreTFCRes;
5236   }
5237 
5238   // Extract the TexFail result and insert into aggregate return
5239   SmallVector<SDValue, 1> TFCElt;
5240   DAG.ExtractVectorElements(N, TFCElt, DMaskPop, 1);
5241   SDValue TFCRes = DAG.getNode(ISD::BITCAST, DL, ResultTypes[1], TFCElt[0]);
5242   return DAG.getMergeValues({PreTFCRes, TFCRes, SDValue(Result, 1)}, DL);
5243 }
5244 
5245 static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE,
5246                          SDValue *LWE, bool &IsTexFail) {
5247   auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode());
5248 
5249   uint64_t Value = TexFailCtrlConst->getZExtValue();
5250   if (Value) {
5251     IsTexFail = true;
5252   }
5253 
5254   SDLoc DL(TexFailCtrlConst);
5255   *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
5256   Value &= ~(uint64_t)0x1;
5257   *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
5258   Value &= ~(uint64_t)0x2;
5259 
5260   return Value == 0;
5261 }
5262 
5263 SDValue SITargetLowering::lowerImage(SDValue Op,
5264                                      const AMDGPU::ImageDimIntrinsicInfo *Intr,
5265                                      SelectionDAG &DAG) const {
5266   SDLoc DL(Op);
5267   MachineFunction &MF = DAG.getMachineFunction();
5268   const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>();
5269   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
5270       AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
5271   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
5272   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
5273       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
5274   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
5275       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
5276   unsigned IntrOpcode = Intr->BaseOpcode;
5277   bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10;
5278 
5279   SmallVector<EVT, 3> ResultTypes(Op->value_begin(), Op->value_end());
5280   SmallVector<EVT, 3> OrigResultTypes(Op->value_begin(), Op->value_end());
5281   bool IsD16 = false;
5282   bool IsA16 = false;
5283   SDValue VData;
5284   int NumVDataDwords;
5285   bool AdjustRetType = false;
5286 
5287   unsigned AddrIdx; // Index of first address argument
5288   unsigned DMask;
5289   unsigned DMaskLanes = 0;
5290 
5291   if (BaseOpcode->Atomic) {
5292     VData = Op.getOperand(2);
5293 
5294     bool Is64Bit = VData.getValueType() == MVT::i64;
5295     if (BaseOpcode->AtomicX2) {
5296       SDValue VData2 = Op.getOperand(3);
5297       VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL,
5298                                  {VData, VData2});
5299       if (Is64Bit)
5300         VData = DAG.getBitcast(MVT::v4i32, VData);
5301 
5302       ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32;
5303       DMask = Is64Bit ? 0xf : 0x3;
5304       NumVDataDwords = Is64Bit ? 4 : 2;
5305       AddrIdx = 4;
5306     } else {
5307       DMask = Is64Bit ? 0x3 : 0x1;
5308       NumVDataDwords = Is64Bit ? 2 : 1;
5309       AddrIdx = 3;
5310     }
5311   } else {
5312     unsigned DMaskIdx = BaseOpcode->Store ? 3 : isa<MemSDNode>(Op) ? 2 : 1;
5313     auto DMaskConst = cast<ConstantSDNode>(Op.getOperand(DMaskIdx));
5314     DMask = DMaskConst->getZExtValue();
5315     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
5316 
5317     if (BaseOpcode->Store) {
5318       VData = Op.getOperand(2);
5319 
5320       MVT StoreVT = VData.getSimpleValueType();
5321       if (StoreVT.getScalarType() == MVT::f16) {
5322         if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
5323           return Op; // D16 is unsupported for this instruction
5324 
5325         IsD16 = true;
5326         VData = handleD16VData(VData, DAG);
5327       }
5328 
5329       NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
5330     } else {
5331       // Work out the num dwords based on the dmask popcount and underlying type
5332       // and whether packing is supported.
5333       MVT LoadVT = ResultTypes[0].getSimpleVT();
5334       if (LoadVT.getScalarType() == MVT::f16) {
5335         if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
5336           return Op; // D16 is unsupported for this instruction
5337 
5338         IsD16 = true;
5339       }
5340 
5341       // Confirm that the return type is large enough for the dmask specified
5342       if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) ||
5343           (!LoadVT.isVector() && DMaskLanes > 1))
5344           return Op;
5345 
5346       if (IsD16 && !Subtarget->hasUnpackedD16VMem())
5347         NumVDataDwords = (DMaskLanes + 1) / 2;
5348       else
5349         NumVDataDwords = DMaskLanes;
5350 
5351       AdjustRetType = true;
5352     }
5353 
5354     AddrIdx = DMaskIdx + 1;
5355   }
5356 
5357   unsigned NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0;
5358   unsigned NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0;
5359   unsigned NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0;
5360   unsigned NumVAddrs = BaseOpcode->NumExtraArgs + NumGradients +
5361                        NumCoords + NumLCM;
5362   unsigned NumMIVAddrs = NumVAddrs;
5363 
5364   SmallVector<SDValue, 4> VAddrs;
5365 
5366   // Optimize _L to _LZ when _L is zero
5367   if (LZMappingInfo) {
5368     if (auto ConstantLod =
5369          dyn_cast<ConstantFPSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
5370       if (ConstantLod->isZero() || ConstantLod->isNegative()) {
5371         IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
5372         NumMIVAddrs--;               // remove 'lod'
5373       }
5374     }
5375   }
5376 
5377   // Optimize _mip away, when 'lod' is zero
5378   if (MIPMappingInfo) {
5379     if (auto ConstantLod =
5380          dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
5381       if (ConstantLod->isNullValue()) {
5382         IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
5383         NumMIVAddrs--;               // remove 'lod'
5384       }
5385     }
5386   }
5387 
5388   // Check for 16 bit addresses and pack if true.
5389   unsigned DimIdx = AddrIdx + BaseOpcode->NumExtraArgs;
5390   MVT VAddrVT = Op.getOperand(DimIdx).getSimpleValueType();
5391   const MVT VAddrScalarVT = VAddrVT.getScalarType();
5392   if (((VAddrScalarVT == MVT::f16) || (VAddrScalarVT == MVT::i16)) &&
5393       ST->hasFeature(AMDGPU::FeatureR128A16)) {
5394     IsA16 = true;
5395     const MVT VectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16;
5396     for (unsigned i = AddrIdx; i < (AddrIdx + NumMIVAddrs); ++i) {
5397       SDValue AddrLo, AddrHi;
5398       // Push back extra arguments.
5399       if (i < DimIdx) {
5400         AddrLo = Op.getOperand(i);
5401       } else {
5402         AddrLo = Op.getOperand(i);
5403         // Dz/dh, dz/dv and the last odd coord are packed with undef. Also,
5404         // in 1D, derivatives dx/dh and dx/dv are packed with undef.
5405         if (((i + 1) >= (AddrIdx + NumMIVAddrs)) ||
5406             ((NumGradients / 2) % 2 == 1 &&
5407             (i == DimIdx + (NumGradients / 2) - 1 ||
5408              i == DimIdx + NumGradients - 1))) {
5409           AddrHi = DAG.getUNDEF(MVT::f16);
5410         } else {
5411           AddrHi = Op.getOperand(i + 1);
5412           i++;
5413         }
5414         AddrLo = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VectorVT,
5415                              {AddrLo, AddrHi});
5416         AddrLo = DAG.getBitcast(MVT::i32, AddrLo);
5417       }
5418       VAddrs.push_back(AddrLo);
5419     }
5420   } else {
5421     for (unsigned i = 0; i < NumMIVAddrs; ++i)
5422       VAddrs.push_back(Op.getOperand(AddrIdx + i));
5423   }
5424 
5425   // If the register allocator cannot place the address registers contiguously
5426   // without introducing moves, then using the non-sequential address encoding
5427   // is always preferable, since it saves VALU instructions and is usually a
5428   // wash in terms of code size or even better.
5429   //
5430   // However, we currently have no way of hinting to the register allocator that
5431   // MIMG addresses should be placed contiguously when it is possible to do so,
5432   // so force non-NSA for the common 2-address case as a heuristic.
5433   //
5434   // SIShrinkInstructions will convert NSA encodings to non-NSA after register
5435   // allocation when possible.
5436   bool UseNSA =
5437       ST->hasFeature(AMDGPU::FeatureNSAEncoding) && VAddrs.size() >= 3;
5438   SDValue VAddr;
5439   if (!UseNSA)
5440     VAddr = getBuildDwordsVector(DAG, DL, VAddrs);
5441 
5442   SDValue True = DAG.getTargetConstant(1, DL, MVT::i1);
5443   SDValue False = DAG.getTargetConstant(0, DL, MVT::i1);
5444   unsigned CtrlIdx; // Index of texfailctrl argument
5445   SDValue Unorm;
5446   if (!BaseOpcode->Sampler) {
5447     Unorm = True;
5448     CtrlIdx = AddrIdx + NumVAddrs + 1;
5449   } else {
5450     auto UnormConst =
5451         cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2));
5452 
5453     Unorm = UnormConst->getZExtValue() ? True : False;
5454     CtrlIdx = AddrIdx + NumVAddrs + 3;
5455   }
5456 
5457   SDValue TFE;
5458   SDValue LWE;
5459   SDValue TexFail = Op.getOperand(CtrlIdx);
5460   bool IsTexFail = false;
5461   if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail))
5462     return Op;
5463 
5464   if (IsTexFail) {
5465     if (!DMaskLanes) {
5466       // Expecting to get an error flag since TFC is on - and dmask is 0
5467       // Force dmask to be at least 1 otherwise the instruction will fail
5468       DMask = 0x1;
5469       DMaskLanes = 1;
5470       NumVDataDwords = 1;
5471     }
5472     NumVDataDwords += 1;
5473     AdjustRetType = true;
5474   }
5475 
5476   // Has something earlier tagged that the return type needs adjusting
5477   // This happens if the instruction is a load or has set TexFailCtrl flags
5478   if (AdjustRetType) {
5479     // NumVDataDwords reflects the true number of dwords required in the return type
5480     if (DMaskLanes == 0 && !BaseOpcode->Store) {
5481       // This is a no-op load. This can be eliminated
5482       SDValue Undef = DAG.getUNDEF(Op.getValueType());
5483       if (isa<MemSDNode>(Op))
5484         return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
5485       return Undef;
5486     }
5487 
5488     EVT NewVT = NumVDataDwords > 1 ?
5489                   EVT::getVectorVT(*DAG.getContext(), MVT::f32, NumVDataDwords)
5490                 : MVT::f32;
5491 
5492     ResultTypes[0] = NewVT;
5493     if (ResultTypes.size() == 3) {
5494       // Original result was aggregate type used for TexFailCtrl results
5495       // The actual instruction returns as a vector type which has now been
5496       // created. Remove the aggregate result.
5497       ResultTypes.erase(&ResultTypes[1]);
5498     }
5499   }
5500 
5501   SDValue GLC;
5502   SDValue SLC;
5503   SDValue DLC;
5504   if (BaseOpcode->Atomic) {
5505     GLC = True; // TODO no-return optimization
5506     if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC,
5507                           IsGFX10 ? &DLC : nullptr))
5508       return Op;
5509   } else {
5510     if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC,
5511                           IsGFX10 ? &DLC : nullptr))
5512       return Op;
5513   }
5514 
5515   SmallVector<SDValue, 26> Ops;
5516   if (BaseOpcode->Store || BaseOpcode->Atomic)
5517     Ops.push_back(VData); // vdata
5518   if (UseNSA) {
5519     for (const SDValue &Addr : VAddrs)
5520       Ops.push_back(Addr);
5521   } else {
5522     Ops.push_back(VAddr);
5523   }
5524   Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc
5525   if (BaseOpcode->Sampler)
5526     Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler
5527   Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32));
5528   if (IsGFX10)
5529     Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32));
5530   Ops.push_back(Unorm);
5531   if (IsGFX10)
5532     Ops.push_back(DLC);
5533   Ops.push_back(GLC);
5534   Ops.push_back(SLC);
5535   Ops.push_back(IsA16 &&  // a16 or r128
5536                 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False);
5537   Ops.push_back(TFE); // tfe
5538   Ops.push_back(LWE); // lwe
5539   if (!IsGFX10)
5540     Ops.push_back(DimInfo->DA ? True : False);
5541   if (BaseOpcode->HasD16)
5542     Ops.push_back(IsD16 ? True : False);
5543   if (isa<MemSDNode>(Op))
5544     Ops.push_back(Op.getOperand(0)); // chain
5545 
5546   int NumVAddrDwords =
5547       UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32;
5548   int Opcode = -1;
5549 
5550   if (IsGFX10) {
5551     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
5552                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
5553                                           : AMDGPU::MIMGEncGfx10Default,
5554                                    NumVDataDwords, NumVAddrDwords);
5555   } else {
5556     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5557       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
5558                                      NumVDataDwords, NumVAddrDwords);
5559     if (Opcode == -1)
5560       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
5561                                      NumVDataDwords, NumVAddrDwords);
5562   }
5563   assert(Opcode != -1);
5564 
5565   MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops);
5566   if (auto MemOp = dyn_cast<MemSDNode>(Op)) {
5567     MachineMemOperand *MemRef = MemOp->getMemOperand();
5568     DAG.setNodeMemRefs(NewNode, {MemRef});
5569   }
5570 
5571   if (BaseOpcode->AtomicX2) {
5572     SmallVector<SDValue, 1> Elt;
5573     DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
5574     return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
5575   } else if (!BaseOpcode->Store) {
5576     return constructRetValue(DAG, NewNode,
5577                              OrigResultTypes, IsTexFail,
5578                              Subtarget->hasUnpackedD16VMem(), IsD16,
5579                              DMaskLanes, NumVDataDwords, DL,
5580                              *DAG.getContext());
5581   }
5582 
5583   return SDValue(NewNode, 0);
5584 }
5585 
5586 SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
5587                                        SDValue Offset, SDValue GLC, SDValue DLC,
5588                                        SelectionDAG &DAG) const {
5589   MachineFunction &MF = DAG.getMachineFunction();
5590   MachineMemOperand *MMO = MF.getMachineMemOperand(
5591       MachinePointerInfo(),
5592       MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
5593           MachineMemOperand::MOInvariant,
5594       VT.getStoreSize(), VT.getStoreSize());
5595 
5596   if (!Offset->isDivergent()) {
5597     SDValue Ops[] = {
5598         Rsrc,
5599         Offset, // Offset
5600         GLC,
5601         DLC,
5602     };
5603     return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL,
5604                                    DAG.getVTList(VT), Ops, VT, MMO);
5605   }
5606 
5607   // We have a divergent offset. Emit a MUBUF buffer load instead. We can
5608   // assume that the buffer is unswizzled.
5609   SmallVector<SDValue, 4> Loads;
5610   unsigned NumLoads = 1;
5611   MVT LoadVT = VT.getSimpleVT();
5612   unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1;
5613   assert((LoadVT.getScalarType() == MVT::i32 ||
5614           LoadVT.getScalarType() == MVT::f32) &&
5615          isPowerOf2_32(NumElts));
5616 
5617   if (NumElts == 8 || NumElts == 16) {
5618     NumLoads = NumElts == 16 ? 4 : 2;
5619     LoadVT = MVT::v4i32;
5620   }
5621 
5622   SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue});
5623   unsigned CachePolicy = cast<ConstantSDNode>(GLC)->getZExtValue();
5624   SDValue Ops[] = {
5625       DAG.getEntryNode(),                         // Chain
5626       Rsrc,                                       // rsrc
5627       DAG.getConstant(0, DL, MVT::i32),           // vindex
5628       {},                                         // voffset
5629       {},                                         // soffset
5630       {},                                         // offset
5631       DAG.getConstant(CachePolicy, DL, MVT::i32), // cachepolicy
5632       DAG.getConstant(0, DL, MVT::i1),            // idxen
5633   };
5634 
5635   // Use the alignment to ensure that the required offsets will fit into the
5636   // immediate offsets.
5637   setBufferOffsets(Offset, DAG, &Ops[3], NumLoads > 1 ? 16 * NumLoads : 4);
5638 
5639   uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
5640   for (unsigned i = 0; i < NumLoads; ++i) {
5641     Ops[5] = DAG.getConstant(InstOffset + 16 * i, DL, MVT::i32);
5642     Loads.push_back(DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList,
5643                                             Ops, LoadVT, MMO));
5644   }
5645 
5646   if (VT == MVT::v8i32 || VT == MVT::v16i32)
5647     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads);
5648 
5649   return Loads[0];
5650 }
5651 
5652 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
5653                                                   SelectionDAG &DAG) const {
5654   MachineFunction &MF = DAG.getMachineFunction();
5655   auto MFI = MF.getInfo<SIMachineFunctionInfo>();
5656 
5657   EVT VT = Op.getValueType();
5658   SDLoc DL(Op);
5659   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5660 
5661   // TODO: Should this propagate fast-math-flags?
5662 
5663   switch (IntrinsicID) {
5664   case Intrinsic::amdgcn_implicit_buffer_ptr: {
5665     if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction()))
5666       return emitNonHSAIntrinsicError(DAG, DL, VT);
5667     return getPreloadedValue(DAG, *MFI, VT,
5668                              AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
5669   }
5670   case Intrinsic::amdgcn_dispatch_ptr:
5671   case Intrinsic::amdgcn_queue_ptr: {
5672     if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) {
5673       DiagnosticInfoUnsupported BadIntrin(
5674           MF.getFunction(), "unsupported hsa intrinsic without hsa target",
5675           DL.getDebugLoc());
5676       DAG.getContext()->diagnose(BadIntrin);
5677       return DAG.getUNDEF(VT);
5678     }
5679 
5680     auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
5681       AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
5682     return getPreloadedValue(DAG, *MFI, VT, RegID);
5683   }
5684   case Intrinsic::amdgcn_implicitarg_ptr: {
5685     if (MFI->isEntryFunction())
5686       return getImplicitArgPtr(DAG, DL);
5687     return getPreloadedValue(DAG, *MFI, VT,
5688                              AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
5689   }
5690   case Intrinsic::amdgcn_kernarg_segment_ptr: {
5691     return getPreloadedValue(DAG, *MFI, VT,
5692                              AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
5693   }
5694   case Intrinsic::amdgcn_dispatch_id: {
5695     return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
5696   }
5697   case Intrinsic::amdgcn_rcp:
5698     return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
5699   case Intrinsic::amdgcn_rsq:
5700     return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5701   case Intrinsic::amdgcn_rsq_legacy:
5702     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5703       return emitRemovedIntrinsicError(DAG, DL, VT);
5704 
5705     return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
5706   case Intrinsic::amdgcn_rcp_legacy:
5707     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5708       return emitRemovedIntrinsicError(DAG, DL, VT);
5709     return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
5710   case Intrinsic::amdgcn_rsq_clamp: {
5711     if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
5712       return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
5713 
5714     Type *Type = VT.getTypeForEVT(*DAG.getContext());
5715     APFloat Max = APFloat::getLargest(Type->getFltSemantics());
5716     APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
5717 
5718     SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5719     SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
5720                               DAG.getConstantFP(Max, DL, VT));
5721     return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
5722                        DAG.getConstantFP(Min, DL, VT));
5723   }
5724   case Intrinsic::r600_read_ngroups_x:
5725     if (Subtarget->isAmdHsaOS())
5726       return emitNonHSAIntrinsicError(DAG, DL, VT);
5727 
5728     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5729                                     SI::KernelInputOffsets::NGROUPS_X, 4, false);
5730   case Intrinsic::r600_read_ngroups_y:
5731     if (Subtarget->isAmdHsaOS())
5732       return emitNonHSAIntrinsicError(DAG, DL, VT);
5733 
5734     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5735                                     SI::KernelInputOffsets::NGROUPS_Y, 4, false);
5736   case Intrinsic::r600_read_ngroups_z:
5737     if (Subtarget->isAmdHsaOS())
5738       return emitNonHSAIntrinsicError(DAG, DL, VT);
5739 
5740     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5741                                     SI::KernelInputOffsets::NGROUPS_Z, 4, false);
5742   case Intrinsic::r600_read_global_size_x:
5743     if (Subtarget->isAmdHsaOS())
5744       return emitNonHSAIntrinsicError(DAG, DL, VT);
5745 
5746     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5747                                     SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false);
5748   case Intrinsic::r600_read_global_size_y:
5749     if (Subtarget->isAmdHsaOS())
5750       return emitNonHSAIntrinsicError(DAG, DL, VT);
5751 
5752     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5753                                     SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false);
5754   case Intrinsic::r600_read_global_size_z:
5755     if (Subtarget->isAmdHsaOS())
5756       return emitNonHSAIntrinsicError(DAG, DL, VT);
5757 
5758     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5759                                     SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false);
5760   case Intrinsic::r600_read_local_size_x:
5761     if (Subtarget->isAmdHsaOS())
5762       return emitNonHSAIntrinsicError(DAG, DL, VT);
5763 
5764     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5765                                   SI::KernelInputOffsets::LOCAL_SIZE_X);
5766   case Intrinsic::r600_read_local_size_y:
5767     if (Subtarget->isAmdHsaOS())
5768       return emitNonHSAIntrinsicError(DAG, DL, VT);
5769 
5770     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5771                                   SI::KernelInputOffsets::LOCAL_SIZE_Y);
5772   case Intrinsic::r600_read_local_size_z:
5773     if (Subtarget->isAmdHsaOS())
5774       return emitNonHSAIntrinsicError(DAG, DL, VT);
5775 
5776     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5777                                   SI::KernelInputOffsets::LOCAL_SIZE_Z);
5778   case Intrinsic::amdgcn_workgroup_id_x:
5779   case Intrinsic::r600_read_tgid_x:
5780     return getPreloadedValue(DAG, *MFI, VT,
5781                              AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
5782   case Intrinsic::amdgcn_workgroup_id_y:
5783   case Intrinsic::r600_read_tgid_y:
5784     return getPreloadedValue(DAG, *MFI, VT,
5785                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
5786   case Intrinsic::amdgcn_workgroup_id_z:
5787   case Intrinsic::r600_read_tgid_z:
5788     return getPreloadedValue(DAG, *MFI, VT,
5789                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
5790   case Intrinsic::amdgcn_workitem_id_x:
5791   case Intrinsic::r600_read_tidig_x:
5792     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5793                           SDLoc(DAG.getEntryNode()),
5794                           MFI->getArgInfo().WorkItemIDX);
5795   case Intrinsic::amdgcn_workitem_id_y:
5796   case Intrinsic::r600_read_tidig_y:
5797     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5798                           SDLoc(DAG.getEntryNode()),
5799                           MFI->getArgInfo().WorkItemIDY);
5800   case Intrinsic::amdgcn_workitem_id_z:
5801   case Intrinsic::r600_read_tidig_z:
5802     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5803                           SDLoc(DAG.getEntryNode()),
5804                           MFI->getArgInfo().WorkItemIDZ);
5805   case Intrinsic::amdgcn_wavefrontsize:
5806     return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(),
5807                            SDLoc(Op), MVT::i32);
5808   case Intrinsic::amdgcn_s_buffer_load: {
5809     bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10;
5810     SDValue GLC;
5811     SDValue DLC = DAG.getTargetConstant(0, DL, MVT::i1);
5812     if (!parseCachePolicy(Op.getOperand(3), DAG, &GLC, nullptr,
5813                           IsGFX10 ? &DLC : nullptr))
5814       return Op;
5815     return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), GLC, DLC,
5816                         DAG);
5817   }
5818   case Intrinsic::amdgcn_fdiv_fast:
5819     return lowerFDIV_FAST(Op, DAG);
5820   case Intrinsic::amdgcn_interp_mov: {
5821     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5822     SDValue Glue = M0.getValue(1);
5823     return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
5824                        Op.getOperand(2), Op.getOperand(3), Glue);
5825   }
5826   case Intrinsic::amdgcn_interp_p1: {
5827     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5828     SDValue Glue = M0.getValue(1);
5829     return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
5830                        Op.getOperand(2), Op.getOperand(3), Glue);
5831   }
5832   case Intrinsic::amdgcn_interp_p2: {
5833     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5834     SDValue Glue = SDValue(M0.getNode(), 1);
5835     return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
5836                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
5837                        Glue);
5838   }
5839   case Intrinsic::amdgcn_interp_p1_f16: {
5840     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5841     SDValue Glue = M0.getValue(1);
5842     if (getSubtarget()->getLDSBankCount() == 16) {
5843       // 16 bank LDS
5844       SDValue S = DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32,
5845                               DAG.getConstant(2, DL, MVT::i32), // P0
5846                               Op.getOperand(2), // Attrchan
5847                               Op.getOperand(3), // Attr
5848                               Glue);
5849       SDValue Ops[] = {
5850         Op.getOperand(1), // Src0
5851         Op.getOperand(2), // Attrchan
5852         Op.getOperand(3), // Attr
5853         DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5854         S, // Src2 - holds two f16 values selected by high
5855         DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers
5856         Op.getOperand(4), // high
5857         DAG.getConstant(0, DL, MVT::i1), // $clamp
5858         DAG.getConstant(0, DL, MVT::i32) // $omod
5859       };
5860       return DAG.getNode(AMDGPUISD::INTERP_P1LV_F16, DL, MVT::f32, Ops);
5861     } else {
5862       // 32 bank LDS
5863       SDValue Ops[] = {
5864         Op.getOperand(1), // Src0
5865         Op.getOperand(2), // Attrchan
5866         Op.getOperand(3), // Attr
5867         DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5868         Op.getOperand(4), // high
5869         DAG.getConstant(0, DL, MVT::i1), // $clamp
5870         DAG.getConstant(0, DL, MVT::i32), // $omod
5871         Glue
5872       };
5873       return DAG.getNode(AMDGPUISD::INTERP_P1LL_F16, DL, MVT::f32, Ops);
5874     }
5875   }
5876   case Intrinsic::amdgcn_interp_p2_f16: {
5877     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(6));
5878     SDValue Glue = SDValue(M0.getNode(), 1);
5879     SDValue Ops[] = {
5880       Op.getOperand(2), // Src0
5881       Op.getOperand(3), // Attrchan
5882       Op.getOperand(4), // Attr
5883       DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5884       Op.getOperand(1), // Src2
5885       DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers
5886       Op.getOperand(5), // high
5887       DAG.getConstant(0, DL, MVT::i1), // $clamp
5888       Glue
5889     };
5890     return DAG.getNode(AMDGPUISD::INTERP_P2_F16, DL, MVT::f16, Ops);
5891   }
5892   case Intrinsic::amdgcn_sin:
5893     return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
5894 
5895   case Intrinsic::amdgcn_cos:
5896     return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
5897 
5898   case Intrinsic::amdgcn_mul_u24:
5899     return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2));
5900   case Intrinsic::amdgcn_mul_i24:
5901     return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2));
5902 
5903   case Intrinsic::amdgcn_log_clamp: {
5904     if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
5905       return SDValue();
5906 
5907     DiagnosticInfoUnsupported BadIntrin(
5908       MF.getFunction(), "intrinsic not supported on subtarget",
5909       DL.getDebugLoc());
5910       DAG.getContext()->diagnose(BadIntrin);
5911       return DAG.getUNDEF(VT);
5912   }
5913   case Intrinsic::amdgcn_ldexp:
5914     return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
5915                        Op.getOperand(1), Op.getOperand(2));
5916 
5917   case Intrinsic::amdgcn_fract:
5918     return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
5919 
5920   case Intrinsic::amdgcn_class:
5921     return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
5922                        Op.getOperand(1), Op.getOperand(2));
5923   case Intrinsic::amdgcn_div_fmas:
5924     return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
5925                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5926                        Op.getOperand(4));
5927 
5928   case Intrinsic::amdgcn_div_fixup:
5929     return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
5930                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5931 
5932   case Intrinsic::amdgcn_trig_preop:
5933     return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
5934                        Op.getOperand(1), Op.getOperand(2));
5935   case Intrinsic::amdgcn_div_scale: {
5936     const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3));
5937 
5938     // Translate to the operands expected by the machine instruction. The
5939     // first parameter must be the same as the first instruction.
5940     SDValue Numerator = Op.getOperand(1);
5941     SDValue Denominator = Op.getOperand(2);
5942 
5943     // Note this order is opposite of the machine instruction's operations,
5944     // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
5945     // intrinsic has the numerator as the first operand to match a normal
5946     // division operation.
5947 
5948     SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
5949 
5950     return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
5951                        Denominator, Numerator);
5952   }
5953   case Intrinsic::amdgcn_icmp: {
5954     // There is a Pat that handles this variant, so return it as-is.
5955     if (Op.getOperand(1).getValueType() == MVT::i1 &&
5956         Op.getConstantOperandVal(2) == 0 &&
5957         Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE)
5958       return Op;
5959     return lowerICMPIntrinsic(*this, Op.getNode(), DAG);
5960   }
5961   case Intrinsic::amdgcn_fcmp: {
5962     return lowerFCMPIntrinsic(*this, Op.getNode(), DAG);
5963   }
5964   case Intrinsic::amdgcn_fmed3:
5965     return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
5966                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5967   case Intrinsic::amdgcn_fdot2:
5968     return DAG.getNode(AMDGPUISD::FDOT2, DL, VT,
5969                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5970                        Op.getOperand(4));
5971   case Intrinsic::amdgcn_fmul_legacy:
5972     return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
5973                        Op.getOperand(1), Op.getOperand(2));
5974   case Intrinsic::amdgcn_sffbh:
5975     return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
5976   case Intrinsic::amdgcn_sbfe:
5977     return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
5978                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5979   case Intrinsic::amdgcn_ubfe:
5980     return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
5981                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5982   case Intrinsic::amdgcn_cvt_pkrtz:
5983   case Intrinsic::amdgcn_cvt_pknorm_i16:
5984   case Intrinsic::amdgcn_cvt_pknorm_u16:
5985   case Intrinsic::amdgcn_cvt_pk_i16:
5986   case Intrinsic::amdgcn_cvt_pk_u16: {
5987     // FIXME: Stop adding cast if v2f16/v2i16 are legal.
5988     EVT VT = Op.getValueType();
5989     unsigned Opcode;
5990 
5991     if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
5992       Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
5993     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
5994       Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
5995     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
5996       Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
5997     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
5998       Opcode = AMDGPUISD::CVT_PK_I16_I32;
5999     else
6000       Opcode = AMDGPUISD::CVT_PK_U16_U32;
6001 
6002     if (isTypeLegal(VT))
6003       return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2));
6004 
6005     SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
6006                                Op.getOperand(1), Op.getOperand(2));
6007     return DAG.getNode(ISD::BITCAST, DL, VT, Node);
6008   }
6009   case Intrinsic::amdgcn_fmad_ftz:
6010     return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1),
6011                        Op.getOperand(2), Op.getOperand(3));
6012 
6013   case Intrinsic::amdgcn_if_break:
6014     return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT,
6015                                       Op->getOperand(1), Op->getOperand(2)), 0);
6016 
6017   case Intrinsic::amdgcn_groupstaticsize: {
6018     Triple::OSType OS = getTargetMachine().getTargetTriple().getOS();
6019     if (OS == Triple::AMDHSA || OS == Triple::AMDPAL)
6020       return Op;
6021 
6022     const Module *M = MF.getFunction().getParent();
6023     const GlobalValue *GV =
6024         M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize));
6025     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0,
6026                                             SIInstrInfo::MO_ABS32_LO);
6027     return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0};
6028   }
6029   default:
6030     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6031             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
6032       return lowerImage(Op, ImageDimIntr, DAG);
6033 
6034     return Op;
6035   }
6036 }
6037 
6038 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
6039                                                  SelectionDAG &DAG) const {
6040   unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6041   SDLoc DL(Op);
6042 
6043   switch (IntrID) {
6044   case Intrinsic::amdgcn_ds_ordered_add:
6045   case Intrinsic::amdgcn_ds_ordered_swap: {
6046     MemSDNode *M = cast<MemSDNode>(Op);
6047     SDValue Chain = M->getOperand(0);
6048     SDValue M0 = M->getOperand(2);
6049     SDValue Value = M->getOperand(3);
6050     unsigned IndexOperand = M->getConstantOperandVal(7);
6051     unsigned WaveRelease = M->getConstantOperandVal(8);
6052     unsigned WaveDone = M->getConstantOperandVal(9);
6053     unsigned ShaderType;
6054     unsigned Instruction;
6055 
6056     unsigned OrderedCountIndex = IndexOperand & 0x3f;
6057     IndexOperand &= ~0x3f;
6058     unsigned CountDw = 0;
6059 
6060     if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) {
6061       CountDw = (IndexOperand >> 24) & 0xf;
6062       IndexOperand &= ~(0xf << 24);
6063 
6064       if (CountDw < 1 || CountDw > 4) {
6065         report_fatal_error(
6066             "ds_ordered_count: dword count must be between 1 and 4");
6067       }
6068     }
6069 
6070     if (IndexOperand)
6071       report_fatal_error("ds_ordered_count: bad index operand");
6072 
6073     switch (IntrID) {
6074     case Intrinsic::amdgcn_ds_ordered_add:
6075       Instruction = 0;
6076       break;
6077     case Intrinsic::amdgcn_ds_ordered_swap:
6078       Instruction = 1;
6079       break;
6080     }
6081 
6082     if (WaveDone && !WaveRelease)
6083       report_fatal_error("ds_ordered_count: wave_done requires wave_release");
6084 
6085     switch (DAG.getMachineFunction().getFunction().getCallingConv()) {
6086     case CallingConv::AMDGPU_CS:
6087     case CallingConv::AMDGPU_KERNEL:
6088       ShaderType = 0;
6089       break;
6090     case CallingConv::AMDGPU_PS:
6091       ShaderType = 1;
6092       break;
6093     case CallingConv::AMDGPU_VS:
6094       ShaderType = 2;
6095       break;
6096     case CallingConv::AMDGPU_GS:
6097       ShaderType = 3;
6098       break;
6099     default:
6100       report_fatal_error("ds_ordered_count unsupported for this calling conv");
6101     }
6102 
6103     unsigned Offset0 = OrderedCountIndex << 2;
6104     unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
6105                        (Instruction << 4);
6106 
6107     if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
6108       Offset1 |= (CountDw - 1) << 6;
6109 
6110     unsigned Offset = Offset0 | (Offset1 << 8);
6111 
6112     SDValue Ops[] = {
6113       Chain,
6114       Value,
6115       DAG.getTargetConstant(Offset, DL, MVT::i16),
6116       copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue
6117     };
6118     return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL,
6119                                    M->getVTList(), Ops, M->getMemoryVT(),
6120                                    M->getMemOperand());
6121   }
6122   case Intrinsic::amdgcn_ds_fadd: {
6123     MemSDNode *M = cast<MemSDNode>(Op);
6124     unsigned Opc;
6125     switch (IntrID) {
6126     case Intrinsic::amdgcn_ds_fadd:
6127       Opc = ISD::ATOMIC_LOAD_FADD;
6128       break;
6129     }
6130 
6131     return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(),
6132                          M->getOperand(0), M->getOperand(2), M->getOperand(3),
6133                          M->getMemOperand());
6134   }
6135   case Intrinsic::amdgcn_atomic_inc:
6136   case Intrinsic::amdgcn_atomic_dec:
6137   case Intrinsic::amdgcn_ds_fmin:
6138   case Intrinsic::amdgcn_ds_fmax: {
6139     MemSDNode *M = cast<MemSDNode>(Op);
6140     unsigned Opc;
6141     switch (IntrID) {
6142     case Intrinsic::amdgcn_atomic_inc:
6143       Opc = AMDGPUISD::ATOMIC_INC;
6144       break;
6145     case Intrinsic::amdgcn_atomic_dec:
6146       Opc = AMDGPUISD::ATOMIC_DEC;
6147       break;
6148     case Intrinsic::amdgcn_ds_fmin:
6149       Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
6150       break;
6151     case Intrinsic::amdgcn_ds_fmax:
6152       Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
6153       break;
6154     default:
6155       llvm_unreachable("Unknown intrinsic!");
6156     }
6157     SDValue Ops[] = {
6158       M->getOperand(0), // Chain
6159       M->getOperand(2), // Ptr
6160       M->getOperand(3)  // Value
6161     };
6162 
6163     return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
6164                                    M->getMemoryVT(), M->getMemOperand());
6165   }
6166   case Intrinsic::amdgcn_buffer_load:
6167   case Intrinsic::amdgcn_buffer_load_format: {
6168     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
6169     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6170     unsigned IdxEn = 1;
6171     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
6172       IdxEn = Idx->getZExtValue() != 0;
6173     SDValue Ops[] = {
6174       Op.getOperand(0), // Chain
6175       Op.getOperand(2), // rsrc
6176       Op.getOperand(3), // vindex
6177       SDValue(),        // voffset -- will be set by setBufferOffsets
6178       SDValue(),        // soffset -- will be set by setBufferOffsets
6179       SDValue(),        // offset -- will be set by setBufferOffsets
6180       DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6181       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6182     };
6183 
6184     setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]);
6185     unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
6186         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
6187 
6188     EVT VT = Op.getValueType();
6189     EVT IntVT = VT.changeTypeToInteger();
6190     auto *M = cast<MemSDNode>(Op);
6191     EVT LoadVT = Op.getValueType();
6192 
6193     if (LoadVT.getScalarType() == MVT::f16)
6194       return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
6195                                  M, DAG, Ops);
6196 
6197     // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
6198     if (LoadVT.getScalarType() == MVT::i8 ||
6199         LoadVT.getScalarType() == MVT::i16)
6200       return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
6201 
6202     return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
6203                                M->getMemOperand(), DAG);
6204   }
6205   case Intrinsic::amdgcn_raw_buffer_load:
6206   case Intrinsic::amdgcn_raw_buffer_load_format: {
6207     auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
6208     SDValue Ops[] = {
6209       Op.getOperand(0), // Chain
6210       Op.getOperand(2), // rsrc
6211       DAG.getConstant(0, DL, MVT::i32), // vindex
6212       Offsets.first,    // voffset
6213       Op.getOperand(4), // soffset
6214       Offsets.second,   // offset
6215       Op.getOperand(5), // cachepolicy
6216       DAG.getConstant(0, DL, MVT::i1), // idxen
6217     };
6218 
6219     unsigned Opc = (IntrID == Intrinsic::amdgcn_raw_buffer_load) ?
6220         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
6221 
6222     EVT VT = Op.getValueType();
6223     EVT IntVT = VT.changeTypeToInteger();
6224     auto *M = cast<MemSDNode>(Op);
6225     EVT LoadVT = Op.getValueType();
6226 
6227     if (LoadVT.getScalarType() == MVT::f16)
6228       return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
6229                                  M, DAG, Ops);
6230 
6231     // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
6232     if (LoadVT.getScalarType() == MVT::i8 ||
6233         LoadVT.getScalarType() == MVT::i16)
6234       return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
6235 
6236     return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
6237                                M->getMemOperand(), DAG);
6238   }
6239   case Intrinsic::amdgcn_struct_buffer_load:
6240   case Intrinsic::amdgcn_struct_buffer_load_format: {
6241     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6242     SDValue Ops[] = {
6243       Op.getOperand(0), // Chain
6244       Op.getOperand(2), // rsrc
6245       Op.getOperand(3), // vindex
6246       Offsets.first,    // voffset
6247       Op.getOperand(5), // soffset
6248       Offsets.second,   // offset
6249       Op.getOperand(6), // cachepolicy
6250       DAG.getConstant(1, DL, MVT::i1), // idxen
6251     };
6252 
6253     unsigned Opc = (IntrID == Intrinsic::amdgcn_struct_buffer_load) ?
6254         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
6255 
6256     EVT VT = Op.getValueType();
6257     EVT IntVT = VT.changeTypeToInteger();
6258     auto *M = cast<MemSDNode>(Op);
6259     EVT LoadVT = Op.getValueType();
6260 
6261     if (LoadVT.getScalarType() == MVT::f16)
6262       return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
6263                                  M, DAG, Ops);
6264 
6265     // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
6266     if (LoadVT.getScalarType() == MVT::i8 ||
6267         LoadVT.getScalarType() == MVT::i16)
6268       return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
6269 
6270     return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
6271                                M->getMemOperand(), DAG);
6272   }
6273   case Intrinsic::amdgcn_tbuffer_load: {
6274     MemSDNode *M = cast<MemSDNode>(Op);
6275     EVT LoadVT = Op.getValueType();
6276 
6277     unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6278     unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6279     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6280     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6281     unsigned IdxEn = 1;
6282     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
6283       IdxEn = Idx->getZExtValue() != 0;
6284     SDValue Ops[] = {
6285       Op.getOperand(0),  // Chain
6286       Op.getOperand(2),  // rsrc
6287       Op.getOperand(3),  // vindex
6288       Op.getOperand(4),  // voffset
6289       Op.getOperand(5),  // soffset
6290       Op.getOperand(6),  // offset
6291       DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6292       DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6293       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6294     };
6295 
6296     if (LoadVT.getScalarType() == MVT::f16)
6297       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6298                                  M, DAG, Ops);
6299     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6300                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6301                                DAG);
6302   }
6303   case Intrinsic::amdgcn_raw_tbuffer_load: {
6304     MemSDNode *M = cast<MemSDNode>(Op);
6305     EVT LoadVT = Op.getValueType();
6306     auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
6307 
6308     SDValue Ops[] = {
6309       Op.getOperand(0),  // Chain
6310       Op.getOperand(2),  // rsrc
6311       DAG.getConstant(0, DL, MVT::i32), // vindex
6312       Offsets.first,     // voffset
6313       Op.getOperand(4),  // soffset
6314       Offsets.second,    // offset
6315       Op.getOperand(5),  // format
6316       Op.getOperand(6),  // cachepolicy
6317       DAG.getConstant(0, DL, MVT::i1), // idxen
6318     };
6319 
6320     if (LoadVT.getScalarType() == MVT::f16)
6321       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6322                                  M, DAG, Ops);
6323     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6324                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6325                                DAG);
6326   }
6327   case Intrinsic::amdgcn_struct_tbuffer_load: {
6328     MemSDNode *M = cast<MemSDNode>(Op);
6329     EVT LoadVT = Op.getValueType();
6330     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6331 
6332     SDValue Ops[] = {
6333       Op.getOperand(0),  // Chain
6334       Op.getOperand(2),  // rsrc
6335       Op.getOperand(3),  // vindex
6336       Offsets.first,     // voffset
6337       Op.getOperand(5),  // soffset
6338       Offsets.second,    // offset
6339       Op.getOperand(6),  // format
6340       Op.getOperand(7),  // cachepolicy
6341       DAG.getConstant(1, DL, MVT::i1), // idxen
6342     };
6343 
6344     if (LoadVT.getScalarType() == MVT::f16)
6345       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6346                                  M, DAG, Ops);
6347     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6348                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6349                                DAG);
6350   }
6351   case Intrinsic::amdgcn_buffer_atomic_swap:
6352   case Intrinsic::amdgcn_buffer_atomic_add:
6353   case Intrinsic::amdgcn_buffer_atomic_sub:
6354   case Intrinsic::amdgcn_buffer_atomic_smin:
6355   case Intrinsic::amdgcn_buffer_atomic_umin:
6356   case Intrinsic::amdgcn_buffer_atomic_smax:
6357   case Intrinsic::amdgcn_buffer_atomic_umax:
6358   case Intrinsic::amdgcn_buffer_atomic_and:
6359   case Intrinsic::amdgcn_buffer_atomic_or:
6360   case Intrinsic::amdgcn_buffer_atomic_xor: {
6361     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6362     unsigned IdxEn = 1;
6363     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6364       IdxEn = Idx->getZExtValue() != 0;
6365     SDValue Ops[] = {
6366       Op.getOperand(0), // Chain
6367       Op.getOperand(2), // vdata
6368       Op.getOperand(3), // rsrc
6369       Op.getOperand(4), // vindex
6370       SDValue(),        // voffset -- will be set by setBufferOffsets
6371       SDValue(),        // soffset -- will be set by setBufferOffsets
6372       SDValue(),        // offset -- will be set by setBufferOffsets
6373       DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6374       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6375     };
6376     setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6377     EVT VT = Op.getValueType();
6378 
6379     auto *M = cast<MemSDNode>(Op);
6380     unsigned Opcode = 0;
6381 
6382     switch (IntrID) {
6383     case Intrinsic::amdgcn_buffer_atomic_swap:
6384       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6385       break;
6386     case Intrinsic::amdgcn_buffer_atomic_add:
6387       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6388       break;
6389     case Intrinsic::amdgcn_buffer_atomic_sub:
6390       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6391       break;
6392     case Intrinsic::amdgcn_buffer_atomic_smin:
6393       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6394       break;
6395     case Intrinsic::amdgcn_buffer_atomic_umin:
6396       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6397       break;
6398     case Intrinsic::amdgcn_buffer_atomic_smax:
6399       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6400       break;
6401     case Intrinsic::amdgcn_buffer_atomic_umax:
6402       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6403       break;
6404     case Intrinsic::amdgcn_buffer_atomic_and:
6405       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6406       break;
6407     case Intrinsic::amdgcn_buffer_atomic_or:
6408       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6409       break;
6410     case Intrinsic::amdgcn_buffer_atomic_xor:
6411       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6412       break;
6413     default:
6414       llvm_unreachable("unhandled atomic opcode");
6415     }
6416 
6417     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6418                                    M->getMemOperand());
6419   }
6420   case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6421   case Intrinsic::amdgcn_raw_buffer_atomic_add:
6422   case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6423   case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6424   case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6425   case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6426   case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6427   case Intrinsic::amdgcn_raw_buffer_atomic_and:
6428   case Intrinsic::amdgcn_raw_buffer_atomic_or:
6429   case Intrinsic::amdgcn_raw_buffer_atomic_xor: {
6430     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6431     SDValue Ops[] = {
6432       Op.getOperand(0), // Chain
6433       Op.getOperand(2), // vdata
6434       Op.getOperand(3), // rsrc
6435       DAG.getConstant(0, DL, MVT::i32), // vindex
6436       Offsets.first,    // voffset
6437       Op.getOperand(5), // soffset
6438       Offsets.second,   // offset
6439       Op.getOperand(6), // cachepolicy
6440       DAG.getConstant(0, DL, MVT::i1), // idxen
6441     };
6442     EVT VT = Op.getValueType();
6443 
6444     auto *M = cast<MemSDNode>(Op);
6445     unsigned Opcode = 0;
6446 
6447     switch (IntrID) {
6448     case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6449       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6450       break;
6451     case Intrinsic::amdgcn_raw_buffer_atomic_add:
6452       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6453       break;
6454     case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6455       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6456       break;
6457     case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6458       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6459       break;
6460     case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6461       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6462       break;
6463     case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6464       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6465       break;
6466     case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6467       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6468       break;
6469     case Intrinsic::amdgcn_raw_buffer_atomic_and:
6470       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6471       break;
6472     case Intrinsic::amdgcn_raw_buffer_atomic_or:
6473       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6474       break;
6475     case Intrinsic::amdgcn_raw_buffer_atomic_xor:
6476       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6477       break;
6478     default:
6479       llvm_unreachable("unhandled atomic opcode");
6480     }
6481 
6482     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6483                                    M->getMemOperand());
6484   }
6485   case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6486   case Intrinsic::amdgcn_struct_buffer_atomic_add:
6487   case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6488   case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6489   case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6490   case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6491   case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6492   case Intrinsic::amdgcn_struct_buffer_atomic_and:
6493   case Intrinsic::amdgcn_struct_buffer_atomic_or:
6494   case Intrinsic::amdgcn_struct_buffer_atomic_xor: {
6495     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6496     SDValue Ops[] = {
6497       Op.getOperand(0), // Chain
6498       Op.getOperand(2), // vdata
6499       Op.getOperand(3), // rsrc
6500       Op.getOperand(4), // vindex
6501       Offsets.first,    // voffset
6502       Op.getOperand(6), // soffset
6503       Offsets.second,   // offset
6504       Op.getOperand(7), // cachepolicy
6505       DAG.getConstant(1, DL, MVT::i1), // idxen
6506     };
6507     EVT VT = Op.getValueType();
6508 
6509     auto *M = cast<MemSDNode>(Op);
6510     unsigned Opcode = 0;
6511 
6512     switch (IntrID) {
6513     case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6514       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6515       break;
6516     case Intrinsic::amdgcn_struct_buffer_atomic_add:
6517       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6518       break;
6519     case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6520       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6521       break;
6522     case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6523       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6524       break;
6525     case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6526       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6527       break;
6528     case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6529       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6530       break;
6531     case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6532       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6533       break;
6534     case Intrinsic::amdgcn_struct_buffer_atomic_and:
6535       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6536       break;
6537     case Intrinsic::amdgcn_struct_buffer_atomic_or:
6538       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6539       break;
6540     case Intrinsic::amdgcn_struct_buffer_atomic_xor:
6541       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6542       break;
6543     default:
6544       llvm_unreachable("unhandled atomic opcode");
6545     }
6546 
6547     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6548                                    M->getMemOperand());
6549   }
6550   case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
6551     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6552     unsigned IdxEn = 1;
6553     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5)))
6554       IdxEn = Idx->getZExtValue() != 0;
6555     SDValue Ops[] = {
6556       Op.getOperand(0), // Chain
6557       Op.getOperand(2), // src
6558       Op.getOperand(3), // cmp
6559       Op.getOperand(4), // rsrc
6560       Op.getOperand(5), // vindex
6561       SDValue(),        // voffset -- will be set by setBufferOffsets
6562       SDValue(),        // soffset -- will be set by setBufferOffsets
6563       SDValue(),        // offset -- will be set by setBufferOffsets
6564       DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6565       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6566     };
6567     setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]);
6568     EVT VT = Op.getValueType();
6569     auto *M = cast<MemSDNode>(Op);
6570 
6571     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6572                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6573   }
6574   case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: {
6575     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6576     SDValue Ops[] = {
6577       Op.getOperand(0), // Chain
6578       Op.getOperand(2), // src
6579       Op.getOperand(3), // cmp
6580       Op.getOperand(4), // rsrc
6581       DAG.getConstant(0, DL, MVT::i32), // vindex
6582       Offsets.first,    // voffset
6583       Op.getOperand(6), // soffset
6584       Offsets.second,   // offset
6585       Op.getOperand(7), // cachepolicy
6586       DAG.getConstant(0, DL, MVT::i1), // idxen
6587     };
6588     EVT VT = Op.getValueType();
6589     auto *M = cast<MemSDNode>(Op);
6590 
6591     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6592                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6593   }
6594   case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: {
6595     auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG);
6596     SDValue Ops[] = {
6597       Op.getOperand(0), // Chain
6598       Op.getOperand(2), // src
6599       Op.getOperand(3), // cmp
6600       Op.getOperand(4), // rsrc
6601       Op.getOperand(5), // vindex
6602       Offsets.first,    // voffset
6603       Op.getOperand(7), // soffset
6604       Offsets.second,   // offset
6605       Op.getOperand(8), // cachepolicy
6606       DAG.getConstant(1, DL, MVT::i1), // idxen
6607     };
6608     EVT VT = Op.getValueType();
6609     auto *M = cast<MemSDNode>(Op);
6610 
6611     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6612                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6613   }
6614 
6615   default:
6616     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6617             AMDGPU::getImageDimIntrinsicInfo(IntrID))
6618       return lowerImage(Op, ImageDimIntr, DAG);
6619 
6620     return SDValue();
6621   }
6622 }
6623 
6624 // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to
6625 // dwordx4 if on SI.
6626 SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL,
6627                                               SDVTList VTList,
6628                                               ArrayRef<SDValue> Ops, EVT MemVT,
6629                                               MachineMemOperand *MMO,
6630                                               SelectionDAG &DAG) const {
6631   EVT VT = VTList.VTs[0];
6632   EVT WidenedVT = VT;
6633   EVT WidenedMemVT = MemVT;
6634   if (!Subtarget->hasDwordx3LoadStores() &&
6635       (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) {
6636     WidenedVT = EVT::getVectorVT(*DAG.getContext(),
6637                                  WidenedVT.getVectorElementType(), 4);
6638     WidenedMemVT = EVT::getVectorVT(*DAG.getContext(),
6639                                     WidenedMemVT.getVectorElementType(), 4);
6640     MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16);
6641   }
6642 
6643   assert(VTList.NumVTs == 2);
6644   SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]);
6645 
6646   auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops,
6647                                        WidenedMemVT, MMO);
6648   if (WidenedVT != VT) {
6649     auto Extract = DAG.getNode(
6650         ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp,
6651         DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
6652     NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL);
6653   }
6654   return NewOp;
6655 }
6656 
6657 SDValue SITargetLowering::handleD16VData(SDValue VData,
6658                                          SelectionDAG &DAG) const {
6659   EVT StoreVT = VData.getValueType();
6660 
6661   // No change for f16 and legal vector D16 types.
6662   if (!StoreVT.isVector())
6663     return VData;
6664 
6665   SDLoc DL(VData);
6666   assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
6667 
6668   if (Subtarget->hasUnpackedD16VMem()) {
6669     // We need to unpack the packed data to store.
6670     EVT IntStoreVT = StoreVT.changeTypeToInteger();
6671     SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
6672 
6673     EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
6674                                         StoreVT.getVectorNumElements());
6675     SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
6676     return DAG.UnrollVectorOp(ZExt.getNode());
6677   }
6678 
6679   assert(isTypeLegal(StoreVT));
6680   return VData;
6681 }
6682 
6683 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
6684                                               SelectionDAG &DAG) const {
6685   SDLoc DL(Op);
6686   SDValue Chain = Op.getOperand(0);
6687   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6688   MachineFunction &MF = DAG.getMachineFunction();
6689 
6690   switch (IntrinsicID) {
6691   case Intrinsic::amdgcn_exp: {
6692     const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6693     const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6694     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
6695     const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
6696 
6697     const SDValue Ops[] = {
6698       Chain,
6699       DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6700       DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8),  // en
6701       Op.getOperand(4), // src0
6702       Op.getOperand(5), // src1
6703       Op.getOperand(6), // src2
6704       Op.getOperand(7), // src3
6705       DAG.getTargetConstant(0, DL, MVT::i1), // compr
6706       DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6707     };
6708 
6709     unsigned Opc = Done->isNullValue() ?
6710       AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6711     return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6712   }
6713   case Intrinsic::amdgcn_exp_compr: {
6714     const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6715     const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6716     SDValue Src0 = Op.getOperand(4);
6717     SDValue Src1 = Op.getOperand(5);
6718     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
6719     const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
6720 
6721     SDValue Undef = DAG.getUNDEF(MVT::f32);
6722     const SDValue Ops[] = {
6723       Chain,
6724       DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6725       DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8),  // en
6726       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
6727       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
6728       Undef, // src2
6729       Undef, // src3
6730       DAG.getTargetConstant(1, DL, MVT::i1), // compr
6731       DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6732     };
6733 
6734     unsigned Opc = Done->isNullValue() ?
6735       AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6736     return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6737   }
6738   case Intrinsic::amdgcn_s_sendmsg:
6739   case Intrinsic::amdgcn_s_sendmsghalt: {
6740     unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ?
6741       AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT;
6742     Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
6743     SDValue Glue = Chain.getValue(1);
6744     return DAG.getNode(NodeOp, DL, MVT::Other, Chain,
6745                        Op.getOperand(2), Glue);
6746   }
6747   case Intrinsic::amdgcn_init_exec: {
6748     return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
6749                        Op.getOperand(2));
6750   }
6751   case Intrinsic::amdgcn_init_exec_from_input: {
6752     return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
6753                        Op.getOperand(2), Op.getOperand(3));
6754   }
6755   case Intrinsic::amdgcn_s_barrier: {
6756     if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
6757       const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
6758       unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
6759       if (WGSize <= ST.getWavefrontSize())
6760         return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
6761                                           Op.getOperand(0)), 0);
6762     }
6763     return SDValue();
6764   };
6765   case Intrinsic::amdgcn_tbuffer_store: {
6766     SDValue VData = Op.getOperand(2);
6767     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6768     if (IsD16)
6769       VData = handleD16VData(VData, DAG);
6770     unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6771     unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6772     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6773     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue();
6774     unsigned IdxEn = 1;
6775     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6776       IdxEn = Idx->getZExtValue() != 0;
6777     SDValue Ops[] = {
6778       Chain,
6779       VData,             // vdata
6780       Op.getOperand(3),  // rsrc
6781       Op.getOperand(4),  // vindex
6782       Op.getOperand(5),  // voffset
6783       Op.getOperand(6),  // soffset
6784       Op.getOperand(7),  // offset
6785       DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6786       DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6787       DAG.getConstant(IdxEn, DL, MVT::i1), // idexen
6788     };
6789     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6790                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6791     MemSDNode *M = cast<MemSDNode>(Op);
6792     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6793                                    M->getMemoryVT(), M->getMemOperand());
6794   }
6795 
6796   case Intrinsic::amdgcn_struct_tbuffer_store: {
6797     SDValue VData = Op.getOperand(2);
6798     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6799     if (IsD16)
6800       VData = handleD16VData(VData, DAG);
6801     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6802     SDValue Ops[] = {
6803       Chain,
6804       VData,             // vdata
6805       Op.getOperand(3),  // rsrc
6806       Op.getOperand(4),  // vindex
6807       Offsets.first,     // voffset
6808       Op.getOperand(6),  // soffset
6809       Offsets.second,    // offset
6810       Op.getOperand(7),  // format
6811       Op.getOperand(8),  // cachepolicy
6812       DAG.getConstant(1, DL, MVT::i1), // idexen
6813     };
6814     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6815                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6816     MemSDNode *M = cast<MemSDNode>(Op);
6817     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6818                                    M->getMemoryVT(), M->getMemOperand());
6819   }
6820 
6821   case Intrinsic::amdgcn_raw_tbuffer_store: {
6822     SDValue VData = Op.getOperand(2);
6823     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6824     if (IsD16)
6825       VData = handleD16VData(VData, DAG);
6826     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6827     SDValue Ops[] = {
6828       Chain,
6829       VData,             // vdata
6830       Op.getOperand(3),  // rsrc
6831       DAG.getConstant(0, DL, MVT::i32), // vindex
6832       Offsets.first,     // voffset
6833       Op.getOperand(5),  // soffset
6834       Offsets.second,    // offset
6835       Op.getOperand(6),  // format
6836       Op.getOperand(7),  // cachepolicy
6837       DAG.getConstant(0, DL, MVT::i1), // idexen
6838     };
6839     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6840                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6841     MemSDNode *M = cast<MemSDNode>(Op);
6842     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6843                                    M->getMemoryVT(), M->getMemOperand());
6844   }
6845 
6846   case Intrinsic::amdgcn_buffer_store:
6847   case Intrinsic::amdgcn_buffer_store_format: {
6848     SDValue VData = Op.getOperand(2);
6849     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6850     if (IsD16)
6851       VData = handleD16VData(VData, DAG);
6852     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6853     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6854     unsigned IdxEn = 1;
6855     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6856       IdxEn = Idx->getZExtValue() != 0;
6857     SDValue Ops[] = {
6858       Chain,
6859       VData,
6860       Op.getOperand(3), // rsrc
6861       Op.getOperand(4), // vindex
6862       SDValue(), // voffset -- will be set by setBufferOffsets
6863       SDValue(), // soffset -- will be set by setBufferOffsets
6864       SDValue(), // offset -- will be set by setBufferOffsets
6865       DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6866       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6867     };
6868     setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6869     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
6870                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6871     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6872     MemSDNode *M = cast<MemSDNode>(Op);
6873 
6874     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6875     EVT VDataType = VData.getValueType().getScalarType();
6876     if (VDataType == MVT::i8 || VDataType == MVT::i16)
6877       return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6878 
6879     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6880                                    M->getMemoryVT(), M->getMemOperand());
6881   }
6882 
6883   case Intrinsic::amdgcn_raw_buffer_store:
6884   case Intrinsic::amdgcn_raw_buffer_store_format: {
6885     SDValue VData = Op.getOperand(2);
6886     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6887     if (IsD16)
6888       VData = handleD16VData(VData, DAG);
6889     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6890     SDValue Ops[] = {
6891       Chain,
6892       VData,
6893       Op.getOperand(3), // rsrc
6894       DAG.getConstant(0, DL, MVT::i32), // vindex
6895       Offsets.first,    // voffset
6896       Op.getOperand(5), // soffset
6897       Offsets.second,   // offset
6898       Op.getOperand(6), // cachepolicy
6899       DAG.getConstant(0, DL, MVT::i1), // idxen
6900     };
6901     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_raw_buffer_store ?
6902                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6903     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6904     MemSDNode *M = cast<MemSDNode>(Op);
6905 
6906     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6907     EVT VDataType = VData.getValueType().getScalarType();
6908     if (VDataType == MVT::i8 || VDataType == MVT::i16)
6909       return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6910 
6911     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6912                                    M->getMemoryVT(), M->getMemOperand());
6913   }
6914 
6915   case Intrinsic::amdgcn_struct_buffer_store:
6916   case Intrinsic::amdgcn_struct_buffer_store_format: {
6917     SDValue VData = Op.getOperand(2);
6918     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6919     if (IsD16)
6920       VData = handleD16VData(VData, DAG);
6921     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6922     SDValue Ops[] = {
6923       Chain,
6924       VData,
6925       Op.getOperand(3), // rsrc
6926       Op.getOperand(4), // vindex
6927       Offsets.first,    // voffset
6928       Op.getOperand(6), // soffset
6929       Offsets.second,   // offset
6930       Op.getOperand(7), // cachepolicy
6931       DAG.getConstant(1, DL, MVT::i1), // idxen
6932     };
6933     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ?
6934                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6935     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6936     MemSDNode *M = cast<MemSDNode>(Op);
6937 
6938     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6939     EVT VDataType = VData.getValueType().getScalarType();
6940     if (VDataType == MVT::i8 || VDataType == MVT::i16)
6941       return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6942 
6943     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6944                                    M->getMemoryVT(), M->getMemOperand());
6945   }
6946 
6947   case Intrinsic::amdgcn_buffer_atomic_fadd: {
6948     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6949     unsigned IdxEn = 1;
6950     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6951       IdxEn = Idx->getZExtValue() != 0;
6952     SDValue Ops[] = {
6953       Chain,
6954       Op.getOperand(2), // vdata
6955       Op.getOperand(3), // rsrc
6956       Op.getOperand(4), // vindex
6957       SDValue(),        // voffset -- will be set by setBufferOffsets
6958       SDValue(),        // soffset -- will be set by setBufferOffsets
6959       SDValue(),        // offset -- will be set by setBufferOffsets
6960       DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6961       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6962     };
6963     setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6964     EVT VT = Op.getOperand(2).getValueType();
6965 
6966     auto *M = cast<MemSDNode>(Op);
6967     unsigned Opcode = VT.isVector() ? AMDGPUISD::BUFFER_ATOMIC_PK_FADD
6968                                     : AMDGPUISD::BUFFER_ATOMIC_FADD;
6969 
6970     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6971                                    M->getMemOperand());
6972   }
6973 
6974   case Intrinsic::amdgcn_global_atomic_fadd: {
6975     SDValue Ops[] = {
6976       Chain,
6977       Op.getOperand(2), // ptr
6978       Op.getOperand(3)  // vdata
6979     };
6980     EVT VT = Op.getOperand(3).getValueType();
6981 
6982     auto *M = cast<MemSDNode>(Op);
6983     unsigned Opcode = VT.isVector() ? AMDGPUISD::ATOMIC_PK_FADD
6984                                     : AMDGPUISD::ATOMIC_FADD;
6985 
6986     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6987                                    M->getMemOperand());
6988   }
6989 
6990   case Intrinsic::amdgcn_end_cf:
6991     return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other,
6992                                       Op->getOperand(2), Chain), 0);
6993 
6994   default: {
6995     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6996             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
6997       return lowerImage(Op, ImageDimIntr, DAG);
6998 
6999     return Op;
7000   }
7001   }
7002 }
7003 
7004 // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args:
7005 // offset (the offset that is included in bounds checking and swizzling, to be
7006 // split between the instruction's voffset and immoffset fields) and soffset
7007 // (the offset that is excluded from bounds checking and swizzling, to go in
7008 // the instruction's soffset field).  This function takes the first kind of
7009 // offset and figures out how to split it between voffset and immoffset.
7010 std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets(
7011     SDValue Offset, SelectionDAG &DAG) const {
7012   SDLoc DL(Offset);
7013   const unsigned MaxImm = 4095;
7014   SDValue N0 = Offset;
7015   ConstantSDNode *C1 = nullptr;
7016 
7017   if ((C1 = dyn_cast<ConstantSDNode>(N0)))
7018     N0 = SDValue();
7019   else if (DAG.isBaseWithConstantOffset(N0)) {
7020     C1 = cast<ConstantSDNode>(N0.getOperand(1));
7021     N0 = N0.getOperand(0);
7022   }
7023 
7024   if (C1) {
7025     unsigned ImmOffset = C1->getZExtValue();
7026     // If the immediate value is too big for the immoffset field, put the value
7027     // and -4096 into the immoffset field so that the value that is copied/added
7028     // for the voffset field is a multiple of 4096, and it stands more chance
7029     // of being CSEd with the copy/add for another similar load/store.
7030     // However, do not do that rounding down to a multiple of 4096 if that is a
7031     // negative number, as it appears to be illegal to have a negative offset
7032     // in the vgpr, even if adding the immediate offset makes it positive.
7033     unsigned Overflow = ImmOffset & ~MaxImm;
7034     ImmOffset -= Overflow;
7035     if ((int32_t)Overflow < 0) {
7036       Overflow += ImmOffset;
7037       ImmOffset = 0;
7038     }
7039     C1 = cast<ConstantSDNode>(DAG.getConstant(ImmOffset, DL, MVT::i32));
7040     if (Overflow) {
7041       auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32);
7042       if (!N0)
7043         N0 = OverflowVal;
7044       else {
7045         SDValue Ops[] = { N0, OverflowVal };
7046         N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops);
7047       }
7048     }
7049   }
7050   if (!N0)
7051     N0 = DAG.getConstant(0, DL, MVT::i32);
7052   if (!C1)
7053     C1 = cast<ConstantSDNode>(DAG.getConstant(0, DL, MVT::i32));
7054   return {N0, SDValue(C1, 0)};
7055 }
7056 
7057 // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the
7058 // three offsets (voffset, soffset and instoffset) into the SDValue[3] array
7059 // pointed to by Offsets.
7060 void SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
7061                                         SelectionDAG &DAG, SDValue *Offsets,
7062                                         unsigned Align) const {
7063   SDLoc DL(CombinedOffset);
7064   if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) {
7065     uint32_t Imm = C->getZExtValue();
7066     uint32_t SOffset, ImmOffset;
7067     if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, Align)) {
7068       Offsets[0] = DAG.getConstant(0, DL, MVT::i32);
7069       Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
7070       Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32);
7071       return;
7072     }
7073   }
7074   if (DAG.isBaseWithConstantOffset(CombinedOffset)) {
7075     SDValue N0 = CombinedOffset.getOperand(0);
7076     SDValue N1 = CombinedOffset.getOperand(1);
7077     uint32_t SOffset, ImmOffset;
7078     int Offset = cast<ConstantSDNode>(N1)->getSExtValue();
7079     if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset,
7080                                                 Subtarget, Align)) {
7081       Offsets[0] = N0;
7082       Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
7083       Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32);
7084       return;
7085     }
7086   }
7087   Offsets[0] = CombinedOffset;
7088   Offsets[1] = DAG.getConstant(0, DL, MVT::i32);
7089   Offsets[2] = DAG.getConstant(0, DL, MVT::i32);
7090 }
7091 
7092 // Handle 8 bit and 16 bit buffer loads
7093 SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG,
7094                                                      EVT LoadVT, SDLoc DL,
7095                                                      ArrayRef<SDValue> Ops,
7096                                                      MemSDNode *M) const {
7097   EVT IntVT = LoadVT.changeTypeToInteger();
7098   unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ?
7099          AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT;
7100 
7101   SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other);
7102   SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList,
7103                                                Ops, IntVT,
7104                                                M->getMemOperand());
7105   SDValue BufferLoadTrunc = DAG.getNode(ISD::TRUNCATE, DL,
7106                                         LoadVT.getScalarType(), BufferLoad);
7107   return DAG.getMergeValues({BufferLoadTrunc, BufferLoad.getValue(1)}, DL);
7108 }
7109 
7110 // Handle 8 bit and 16 bit buffer stores
7111 SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG,
7112                                                       EVT VDataType, SDLoc DL,
7113                                                       SDValue Ops[],
7114                                                       MemSDNode *M) const {
7115   SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]);
7116   Ops[1] = BufferStoreExt;
7117   unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE :
7118                                  AMDGPUISD::BUFFER_STORE_SHORT;
7119   ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9);
7120   return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType,
7121                                      M->getMemOperand());
7122 }
7123 
7124 static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
7125                                  ISD::LoadExtType ExtType, SDValue Op,
7126                                  const SDLoc &SL, EVT VT) {
7127   if (VT.bitsLT(Op.getValueType()))
7128     return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
7129 
7130   switch (ExtType) {
7131   case ISD::SEXTLOAD:
7132     return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
7133   case ISD::ZEXTLOAD:
7134     return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
7135   case ISD::EXTLOAD:
7136     return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
7137   case ISD::NON_EXTLOAD:
7138     return Op;
7139   }
7140 
7141   llvm_unreachable("invalid ext type");
7142 }
7143 
7144 SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
7145   SelectionDAG &DAG = DCI.DAG;
7146   if (Ld->getAlignment() < 4 || Ld->isDivergent())
7147     return SDValue();
7148 
7149   // FIXME: Constant loads should all be marked invariant.
7150   unsigned AS = Ld->getAddressSpace();
7151   if (AS != AMDGPUAS::CONSTANT_ADDRESS &&
7152       AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
7153       (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
7154     return SDValue();
7155 
7156   // Don't do this early, since it may interfere with adjacent load merging for
7157   // illegal types. We can avoid losing alignment information for exotic types
7158   // pre-legalize.
7159   EVT MemVT = Ld->getMemoryVT();
7160   if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
7161       MemVT.getSizeInBits() >= 32)
7162     return SDValue();
7163 
7164   SDLoc SL(Ld);
7165 
7166   assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
7167          "unexpected vector extload");
7168 
7169   // TODO: Drop only high part of range.
7170   SDValue Ptr = Ld->getBasePtr();
7171   SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
7172                                 MVT::i32, SL, Ld->getChain(), Ptr,
7173                                 Ld->getOffset(),
7174                                 Ld->getPointerInfo(), MVT::i32,
7175                                 Ld->getAlignment(),
7176                                 Ld->getMemOperand()->getFlags(),
7177                                 Ld->getAAInfo(),
7178                                 nullptr); // Drop ranges
7179 
7180   EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
7181   if (MemVT.isFloatingPoint()) {
7182     assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
7183            "unexpected fp extload");
7184     TruncVT = MemVT.changeTypeToInteger();
7185   }
7186 
7187   SDValue Cvt = NewLoad;
7188   if (Ld->getExtensionType() == ISD::SEXTLOAD) {
7189     Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
7190                       DAG.getValueType(TruncVT));
7191   } else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
7192              Ld->getExtensionType() == ISD::NON_EXTLOAD) {
7193     Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
7194   } else {
7195     assert(Ld->getExtensionType() == ISD::EXTLOAD);
7196   }
7197 
7198   EVT VT = Ld->getValueType(0);
7199   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
7200 
7201   DCI.AddToWorklist(Cvt.getNode());
7202 
7203   // We may need to handle exotic cases, such as i16->i64 extloads, so insert
7204   // the appropriate extension from the 32-bit load.
7205   Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
7206   DCI.AddToWorklist(Cvt.getNode());
7207 
7208   // Handle conversion back to floating point if necessary.
7209   Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
7210 
7211   return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
7212 }
7213 
7214 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7215   SDLoc DL(Op);
7216   LoadSDNode *Load = cast<LoadSDNode>(Op);
7217   ISD::LoadExtType ExtType = Load->getExtensionType();
7218   EVT MemVT = Load->getMemoryVT();
7219 
7220   if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
7221     if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
7222       return SDValue();
7223 
7224     // FIXME: Copied from PPC
7225     // First, load into 32 bits, then truncate to 1 bit.
7226 
7227     SDValue Chain = Load->getChain();
7228     SDValue BasePtr = Load->getBasePtr();
7229     MachineMemOperand *MMO = Load->getMemOperand();
7230 
7231     EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
7232 
7233     SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
7234                                    BasePtr, RealMemVT, MMO);
7235 
7236     if (!MemVT.isVector()) {
7237       SDValue Ops[] = {
7238         DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
7239         NewLD.getValue(1)
7240       };
7241 
7242       return DAG.getMergeValues(Ops, DL);
7243     }
7244 
7245     SmallVector<SDValue, 3> Elts;
7246     for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) {
7247       SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD,
7248                                 DAG.getConstant(I, DL, MVT::i32));
7249 
7250       Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt));
7251     }
7252 
7253     SDValue Ops[] = {
7254       DAG.getBuildVector(MemVT, DL, Elts),
7255       NewLD.getValue(1)
7256     };
7257 
7258     return DAG.getMergeValues(Ops, DL);
7259   }
7260 
7261   if (!MemVT.isVector())
7262     return SDValue();
7263 
7264   assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
7265          "Custom lowering for non-i32 vectors hasn't been implemented.");
7266 
7267   if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
7268                           *Load->getMemOperand())) {
7269     SDValue Ops[2];
7270     std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
7271     return DAG.getMergeValues(Ops, DL);
7272   }
7273 
7274   unsigned Alignment = Load->getAlignment();
7275   unsigned AS = Load->getAddressSpace();
7276   if (Subtarget->hasLDSMisalignedBug() &&
7277       AS == AMDGPUAS::FLAT_ADDRESS &&
7278       Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) {
7279     return SplitVectorLoad(Op, DAG);
7280   }
7281 
7282   MachineFunction &MF = DAG.getMachineFunction();
7283   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
7284   // If there is a possibilty that flat instruction access scratch memory
7285   // then we need to use the same legalization rules we use for private.
7286   if (AS == AMDGPUAS::FLAT_ADDRESS)
7287     AS = MFI->hasFlatScratchInit() ?
7288          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
7289 
7290   unsigned NumElements = MemVT.getVectorNumElements();
7291 
7292   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7293       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
7294     if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) {
7295       if (MemVT.isPow2VectorType())
7296         return SDValue();
7297       if (NumElements == 3)
7298         return WidenVectorLoad(Op, DAG);
7299       return SplitVectorLoad(Op, DAG);
7300     }
7301     // Non-uniform loads will be selected to MUBUF instructions, so they
7302     // have the same legalization requirements as global and private
7303     // loads.
7304     //
7305   }
7306 
7307   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7308       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
7309       AS == AMDGPUAS::GLOBAL_ADDRESS) {
7310     if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
7311         !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
7312         Alignment >= 4 && NumElements < 32) {
7313       if (MemVT.isPow2VectorType())
7314         return SDValue();
7315       if (NumElements == 3)
7316         return WidenVectorLoad(Op, DAG);
7317       return SplitVectorLoad(Op, DAG);
7318     }
7319     // Non-uniform loads will be selected to MUBUF instructions, so they
7320     // have the same legalization requirements as global and private
7321     // loads.
7322     //
7323   }
7324   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7325       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
7326       AS == AMDGPUAS::GLOBAL_ADDRESS ||
7327       AS == AMDGPUAS::FLAT_ADDRESS) {
7328     if (NumElements > 4)
7329       return SplitVectorLoad(Op, DAG);
7330     // v3 loads not supported on SI.
7331     if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7332       return WidenVectorLoad(Op, DAG);
7333     // v3 and v4 loads are supported for private and global memory.
7334     return SDValue();
7335   }
7336   if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
7337     // Depending on the setting of the private_element_size field in the
7338     // resource descriptor, we can only make private accesses up to a certain
7339     // size.
7340     switch (Subtarget->getMaxPrivateElementSize()) {
7341     case 4:
7342       return scalarizeVectorLoad(Load, DAG);
7343     case 8:
7344       if (NumElements > 2)
7345         return SplitVectorLoad(Op, DAG);
7346       return SDValue();
7347     case 16:
7348       // Same as global/flat
7349       if (NumElements > 4)
7350         return SplitVectorLoad(Op, DAG);
7351       // v3 loads not supported on SI.
7352       if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7353         return WidenVectorLoad(Op, DAG);
7354       return SDValue();
7355     default:
7356       llvm_unreachable("unsupported private_element_size");
7357     }
7358   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
7359     // Use ds_read_b128 if possible.
7360     if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
7361         MemVT.getStoreSize() == 16)
7362       return SDValue();
7363 
7364     if (NumElements > 2)
7365       return SplitVectorLoad(Op, DAG);
7366 
7367     // SI has a hardware bug in the LDS / GDS boounds checking: if the base
7368     // address is negative, then the instruction is incorrectly treated as
7369     // out-of-bounds even if base + offsets is in bounds. Split vectorized
7370     // loads here to avoid emitting ds_read2_b32. We may re-combine the
7371     // load later in the SILoadStoreOptimizer.
7372     if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
7373         NumElements == 2 && MemVT.getStoreSize() == 8 &&
7374         Load->getAlignment() < 8) {
7375       return SplitVectorLoad(Op, DAG);
7376     }
7377   }
7378   return SDValue();
7379 }
7380 
7381 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
7382   EVT VT = Op.getValueType();
7383   assert(VT.getSizeInBits() == 64);
7384 
7385   SDLoc DL(Op);
7386   SDValue Cond = Op.getOperand(0);
7387 
7388   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
7389   SDValue One = DAG.getConstant(1, DL, MVT::i32);
7390 
7391   SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
7392   SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
7393 
7394   SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
7395   SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
7396 
7397   SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
7398 
7399   SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
7400   SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
7401 
7402   SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
7403 
7404   SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
7405   return DAG.getNode(ISD::BITCAST, DL, VT, Res);
7406 }
7407 
7408 // Catch division cases where we can use shortcuts with rcp and rsq
7409 // instructions.
7410 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
7411                                               SelectionDAG &DAG) const {
7412   SDLoc SL(Op);
7413   SDValue LHS = Op.getOperand(0);
7414   SDValue RHS = Op.getOperand(1);
7415   EVT VT = Op.getValueType();
7416   const SDNodeFlags Flags = Op->getFlags();
7417   bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || Flags.hasAllowReciprocal();
7418 
7419   if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
7420     return SDValue();
7421 
7422   if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
7423     if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
7424       if (CLHS->isExactlyValue(1.0)) {
7425         // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
7426         // the CI documentation has a worst case error of 1 ulp.
7427         // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
7428         // use it as long as we aren't trying to use denormals.
7429         //
7430         // v_rcp_f16 and v_rsq_f16 DO support denormals.
7431 
7432         // 1.0 / sqrt(x) -> rsq(x)
7433 
7434         // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
7435         // error seems really high at 2^29 ULP.
7436         if (RHS.getOpcode() == ISD::FSQRT)
7437           return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
7438 
7439         // 1.0 / x -> rcp(x)
7440         return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
7441       }
7442 
7443       // Same as for 1.0, but expand the sign out of the constant.
7444       if (CLHS->isExactlyValue(-1.0)) {
7445         // -1.0 / x -> rcp (fneg x)
7446         SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
7447         return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
7448       }
7449     }
7450   }
7451 
7452   if (Unsafe) {
7453     // Turn into multiply by the reciprocal.
7454     // x / y -> x * (1.0 / y)
7455     SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
7456     return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
7457   }
7458 
7459   return SDValue();
7460 }
7461 
7462 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
7463                           EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
7464   if (GlueChain->getNumValues() <= 1) {
7465     return DAG.getNode(Opcode, SL, VT, A, B);
7466   }
7467 
7468   assert(GlueChain->getNumValues() == 3);
7469 
7470   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
7471   switch (Opcode) {
7472   default: llvm_unreachable("no chain equivalent for opcode");
7473   case ISD::FMUL:
7474     Opcode = AMDGPUISD::FMUL_W_CHAIN;
7475     break;
7476   }
7477 
7478   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
7479                      GlueChain.getValue(2));
7480 }
7481 
7482 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
7483                            EVT VT, SDValue A, SDValue B, SDValue C,
7484                            SDValue GlueChain) {
7485   if (GlueChain->getNumValues() <= 1) {
7486     return DAG.getNode(Opcode, SL, VT, A, B, C);
7487   }
7488 
7489   assert(GlueChain->getNumValues() == 3);
7490 
7491   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
7492   switch (Opcode) {
7493   default: llvm_unreachable("no chain equivalent for opcode");
7494   case ISD::FMA:
7495     Opcode = AMDGPUISD::FMA_W_CHAIN;
7496     break;
7497   }
7498 
7499   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
7500                      GlueChain.getValue(2));
7501 }
7502 
7503 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
7504   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
7505     return FastLowered;
7506 
7507   SDLoc SL(Op);
7508   SDValue Src0 = Op.getOperand(0);
7509   SDValue Src1 = Op.getOperand(1);
7510 
7511   SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
7512   SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
7513 
7514   SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
7515   SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
7516 
7517   SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
7518   SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
7519 
7520   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
7521 }
7522 
7523 // Faster 2.5 ULP division that does not support denormals.
7524 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
7525   SDLoc SL(Op);
7526   SDValue LHS = Op.getOperand(1);
7527   SDValue RHS = Op.getOperand(2);
7528 
7529   SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
7530 
7531   const APFloat K0Val(BitsToFloat(0x6f800000));
7532   const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
7533 
7534   const APFloat K1Val(BitsToFloat(0x2f800000));
7535   const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
7536 
7537   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
7538 
7539   EVT SetCCVT =
7540     getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
7541 
7542   SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
7543 
7544   SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
7545 
7546   // TODO: Should this propagate fast-math-flags?
7547   r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
7548 
7549   // rcp does not support denormals.
7550   SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
7551 
7552   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
7553 
7554   return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
7555 }
7556 
7557 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
7558   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
7559     return FastLowered;
7560 
7561   SDLoc SL(Op);
7562   SDValue LHS = Op.getOperand(0);
7563   SDValue RHS = Op.getOperand(1);
7564 
7565   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
7566 
7567   SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
7568 
7569   SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7570                                           RHS, RHS, LHS);
7571   SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7572                                         LHS, RHS, LHS);
7573 
7574   // Denominator is scaled to not be denormal, so using rcp is ok.
7575   SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
7576                                   DenominatorScaled);
7577   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
7578                                      DenominatorScaled);
7579 
7580   const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
7581                                (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
7582                                (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
7583 
7584   const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
7585 
7586   if (!Subtarget->hasFP32Denormals()) {
7587     SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
7588     const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
7589                                                       SL, MVT::i32);
7590     SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
7591                                        DAG.getEntryNode(),
7592                                        EnableDenormValue, BitField);
7593     SDValue Ops[3] = {
7594       NegDivScale0,
7595       EnableDenorm.getValue(0),
7596       EnableDenorm.getValue(1)
7597     };
7598 
7599     NegDivScale0 = DAG.getMergeValues(Ops, SL);
7600   }
7601 
7602   SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
7603                              ApproxRcp, One, NegDivScale0);
7604 
7605   SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
7606                              ApproxRcp, Fma0);
7607 
7608   SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
7609                            Fma1, Fma1);
7610 
7611   SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
7612                              NumeratorScaled, Mul);
7613 
7614   SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
7615 
7616   SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
7617                              NumeratorScaled, Fma3);
7618 
7619   if (!Subtarget->hasFP32Denormals()) {
7620     const SDValue DisableDenormValue =
7621         DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
7622     SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
7623                                         Fma4.getValue(1),
7624                                         DisableDenormValue,
7625                                         BitField,
7626                                         Fma4.getValue(2));
7627 
7628     SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
7629                                       DisableDenorm, DAG.getRoot());
7630     DAG.setRoot(OutputChain);
7631   }
7632 
7633   SDValue Scale = NumeratorScaled.getValue(1);
7634   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
7635                              Fma4, Fma1, Fma3, Scale);
7636 
7637   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
7638 }
7639 
7640 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
7641   if (DAG.getTarget().Options.UnsafeFPMath)
7642     return lowerFastUnsafeFDIV(Op, DAG);
7643 
7644   SDLoc SL(Op);
7645   SDValue X = Op.getOperand(0);
7646   SDValue Y = Op.getOperand(1);
7647 
7648   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
7649 
7650   SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
7651 
7652   SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
7653 
7654   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
7655 
7656   SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
7657 
7658   SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
7659 
7660   SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
7661 
7662   SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
7663 
7664   SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
7665 
7666   SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
7667   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
7668 
7669   SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
7670                              NegDivScale0, Mul, DivScale1);
7671 
7672   SDValue Scale;
7673 
7674   if (!Subtarget->hasUsableDivScaleConditionOutput()) {
7675     // Workaround a hardware bug on SI where the condition output from div_scale
7676     // is not usable.
7677 
7678     const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
7679 
7680     // Figure out if the scale to use for div_fmas.
7681     SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
7682     SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
7683     SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
7684     SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
7685 
7686     SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
7687     SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
7688 
7689     SDValue Scale0Hi
7690       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
7691     SDValue Scale1Hi
7692       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
7693 
7694     SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
7695     SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
7696     Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
7697   } else {
7698     Scale = DivScale1.getValue(1);
7699   }
7700 
7701   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
7702                              Fma4, Fma3, Mul, Scale);
7703 
7704   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
7705 }
7706 
7707 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
7708   EVT VT = Op.getValueType();
7709 
7710   if (VT == MVT::f32)
7711     return LowerFDIV32(Op, DAG);
7712 
7713   if (VT == MVT::f64)
7714     return LowerFDIV64(Op, DAG);
7715 
7716   if (VT == MVT::f16)
7717     return LowerFDIV16(Op, DAG);
7718 
7719   llvm_unreachable("Unexpected type for fdiv");
7720 }
7721 
7722 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7723   SDLoc DL(Op);
7724   StoreSDNode *Store = cast<StoreSDNode>(Op);
7725   EVT VT = Store->getMemoryVT();
7726 
7727   if (VT == MVT::i1) {
7728     return DAG.getTruncStore(Store->getChain(), DL,
7729        DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
7730        Store->getBasePtr(), MVT::i1, Store->getMemOperand());
7731   }
7732 
7733   assert(VT.isVector() &&
7734          Store->getValue().getValueType().getScalarType() == MVT::i32);
7735 
7736   if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
7737                           *Store->getMemOperand())) {
7738     return expandUnalignedStore(Store, DAG);
7739   }
7740 
7741   unsigned AS = Store->getAddressSpace();
7742   if (Subtarget->hasLDSMisalignedBug() &&
7743       AS == AMDGPUAS::FLAT_ADDRESS &&
7744       Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) {
7745     return SplitVectorStore(Op, DAG);
7746   }
7747 
7748   MachineFunction &MF = DAG.getMachineFunction();
7749   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
7750   // If there is a possibilty that flat instruction access scratch memory
7751   // then we need to use the same legalization rules we use for private.
7752   if (AS == AMDGPUAS::FLAT_ADDRESS)
7753     AS = MFI->hasFlatScratchInit() ?
7754          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
7755 
7756   unsigned NumElements = VT.getVectorNumElements();
7757   if (AS == AMDGPUAS::GLOBAL_ADDRESS ||
7758       AS == AMDGPUAS::FLAT_ADDRESS) {
7759     if (NumElements > 4)
7760       return SplitVectorStore(Op, DAG);
7761     // v3 stores not supported on SI.
7762     if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7763       return SplitVectorStore(Op, DAG);
7764     return SDValue();
7765   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
7766     switch (Subtarget->getMaxPrivateElementSize()) {
7767     case 4:
7768       return scalarizeVectorStore(Store, DAG);
7769     case 8:
7770       if (NumElements > 2)
7771         return SplitVectorStore(Op, DAG);
7772       return SDValue();
7773     case 16:
7774       if (NumElements > 4 || NumElements == 3)
7775         return SplitVectorStore(Op, DAG);
7776       return SDValue();
7777     default:
7778       llvm_unreachable("unsupported private_element_size");
7779     }
7780   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
7781     // Use ds_write_b128 if possible.
7782     if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
7783         VT.getStoreSize() == 16 && NumElements != 3)
7784       return SDValue();
7785 
7786     if (NumElements > 2)
7787       return SplitVectorStore(Op, DAG);
7788 
7789     // SI has a hardware bug in the LDS / GDS boounds checking: if the base
7790     // address is negative, then the instruction is incorrectly treated as
7791     // out-of-bounds even if base + offsets is in bounds. Split vectorized
7792     // stores here to avoid emitting ds_write2_b32. We may re-combine the
7793     // store later in the SILoadStoreOptimizer.
7794     if (!Subtarget->hasUsableDSOffset() &&
7795         NumElements == 2 && VT.getStoreSize() == 8 &&
7796         Store->getAlignment() < 8) {
7797       return SplitVectorStore(Op, DAG);
7798     }
7799 
7800     return SDValue();
7801   } else {
7802     llvm_unreachable("unhandled address space");
7803   }
7804 }
7805 
7806 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
7807   SDLoc DL(Op);
7808   EVT VT = Op.getValueType();
7809   SDValue Arg = Op.getOperand(0);
7810   SDValue TrigVal;
7811 
7812   // TODO: Should this propagate fast-math-flags?
7813 
7814   SDValue OneOver2Pi = DAG.getConstantFP(0.5 / M_PI, DL, VT);
7815 
7816   if (Subtarget->hasTrigReducedRange()) {
7817     SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7818     TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal);
7819   } else {
7820     TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7821   }
7822 
7823   switch (Op.getOpcode()) {
7824   case ISD::FCOS:
7825     return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal);
7826   case ISD::FSIN:
7827     return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal);
7828   default:
7829     llvm_unreachable("Wrong trig opcode");
7830   }
7831 }
7832 
7833 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
7834   AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
7835   assert(AtomicNode->isCompareAndSwap());
7836   unsigned AS = AtomicNode->getAddressSpace();
7837 
7838   // No custom lowering required for local address space
7839   if (!isFlatGlobalAddrSpace(AS))
7840     return Op;
7841 
7842   // Non-local address space requires custom lowering for atomic compare
7843   // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
7844   SDLoc DL(Op);
7845   SDValue ChainIn = Op.getOperand(0);
7846   SDValue Addr = Op.getOperand(1);
7847   SDValue Old = Op.getOperand(2);
7848   SDValue New = Op.getOperand(3);
7849   EVT VT = Op.getValueType();
7850   MVT SimpleVT = VT.getSimpleVT();
7851   MVT VecType = MVT::getVectorVT(SimpleVT, 2);
7852 
7853   SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
7854   SDValue Ops[] = { ChainIn, Addr, NewOld };
7855 
7856   return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
7857                                  Ops, VT, AtomicNode->getMemOperand());
7858 }
7859 
7860 //===----------------------------------------------------------------------===//
7861 // Custom DAG optimizations
7862 //===----------------------------------------------------------------------===//
7863 
7864 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
7865                                                      DAGCombinerInfo &DCI) const {
7866   EVT VT = N->getValueType(0);
7867   EVT ScalarVT = VT.getScalarType();
7868   if (ScalarVT != MVT::f32)
7869     return SDValue();
7870 
7871   SelectionDAG &DAG = DCI.DAG;
7872   SDLoc DL(N);
7873 
7874   SDValue Src = N->getOperand(0);
7875   EVT SrcVT = Src.getValueType();
7876 
7877   // TODO: We could try to match extracting the higher bytes, which would be
7878   // easier if i8 vectors weren't promoted to i32 vectors, particularly after
7879   // types are legalized. v4i8 -> v4f32 is probably the only case to worry
7880   // about in practice.
7881   if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
7882     if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
7883       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
7884       DCI.AddToWorklist(Cvt.getNode());
7885       return Cvt;
7886     }
7887   }
7888 
7889   return SDValue();
7890 }
7891 
7892 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
7893 
7894 // This is a variant of
7895 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
7896 //
7897 // The normal DAG combiner will do this, but only if the add has one use since
7898 // that would increase the number of instructions.
7899 //
7900 // This prevents us from seeing a constant offset that can be folded into a
7901 // memory instruction's addressing mode. If we know the resulting add offset of
7902 // a pointer can be folded into an addressing offset, we can replace the pointer
7903 // operand with the add of new constant offset. This eliminates one of the uses,
7904 // and may allow the remaining use to also be simplified.
7905 //
7906 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
7907                                                unsigned AddrSpace,
7908                                                EVT MemVT,
7909                                                DAGCombinerInfo &DCI) const {
7910   SDValue N0 = N->getOperand(0);
7911   SDValue N1 = N->getOperand(1);
7912 
7913   // We only do this to handle cases where it's profitable when there are
7914   // multiple uses of the add, so defer to the standard combine.
7915   if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
7916       N0->hasOneUse())
7917     return SDValue();
7918 
7919   const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
7920   if (!CN1)
7921     return SDValue();
7922 
7923   const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
7924   if (!CAdd)
7925     return SDValue();
7926 
7927   // If the resulting offset is too large, we can't fold it into the addressing
7928   // mode offset.
7929   APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
7930   Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
7931 
7932   AddrMode AM;
7933   AM.HasBaseReg = true;
7934   AM.BaseOffs = Offset.getSExtValue();
7935   if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
7936     return SDValue();
7937 
7938   SelectionDAG &DAG = DCI.DAG;
7939   SDLoc SL(N);
7940   EVT VT = N->getValueType(0);
7941 
7942   SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
7943   SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
7944 
7945   SDNodeFlags Flags;
7946   Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
7947                           (N0.getOpcode() == ISD::OR ||
7948                            N0->getFlags().hasNoUnsignedWrap()));
7949 
7950   return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
7951 }
7952 
7953 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
7954                                                   DAGCombinerInfo &DCI) const {
7955   SDValue Ptr = N->getBasePtr();
7956   SelectionDAG &DAG = DCI.DAG;
7957   SDLoc SL(N);
7958 
7959   // TODO: We could also do this for multiplies.
7960   if (Ptr.getOpcode() == ISD::SHL) {
7961     SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(),  N->getAddressSpace(),
7962                                           N->getMemoryVT(), DCI);
7963     if (NewPtr) {
7964       SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
7965 
7966       NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
7967       return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
7968     }
7969   }
7970 
7971   return SDValue();
7972 }
7973 
7974 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
7975   return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
7976          (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
7977          (Opc == ISD::XOR && Val == 0);
7978 }
7979 
7980 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
7981 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
7982 // integer combine opportunities since most 64-bit operations are decomposed
7983 // this way.  TODO: We won't want this for SALU especially if it is an inline
7984 // immediate.
7985 SDValue SITargetLowering::splitBinaryBitConstantOp(
7986   DAGCombinerInfo &DCI,
7987   const SDLoc &SL,
7988   unsigned Opc, SDValue LHS,
7989   const ConstantSDNode *CRHS) const {
7990   uint64_t Val = CRHS->getZExtValue();
7991   uint32_t ValLo = Lo_32(Val);
7992   uint32_t ValHi = Hi_32(Val);
7993   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7994 
7995     if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
7996          bitOpWithConstantIsReducible(Opc, ValHi)) ||
7997         (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
7998     // If we need to materialize a 64-bit immediate, it will be split up later
7999     // anyway. Avoid creating the harder to understand 64-bit immediate
8000     // materialization.
8001     return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
8002   }
8003 
8004   return SDValue();
8005 }
8006 
8007 // Returns true if argument is a boolean value which is not serialized into
8008 // memory or argument and does not require v_cmdmask_b32 to be deserialized.
8009 static bool isBoolSGPR(SDValue V) {
8010   if (V.getValueType() != MVT::i1)
8011     return false;
8012   switch (V.getOpcode()) {
8013   default: break;
8014   case ISD::SETCC:
8015   case ISD::AND:
8016   case ISD::OR:
8017   case ISD::XOR:
8018   case AMDGPUISD::FP_CLASS:
8019     return true;
8020   }
8021   return false;
8022 }
8023 
8024 // If a constant has all zeroes or all ones within each byte return it.
8025 // Otherwise return 0.
8026 static uint32_t getConstantPermuteMask(uint32_t C) {
8027   // 0xff for any zero byte in the mask
8028   uint32_t ZeroByteMask = 0;
8029   if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
8030   if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
8031   if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
8032   if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
8033   uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte
8034   if ((NonZeroByteMask & C) != NonZeroByteMask)
8035     return 0; // Partial bytes selected.
8036   return C;
8037 }
8038 
8039 // Check if a node selects whole bytes from its operand 0 starting at a byte
8040 // boundary while masking the rest. Returns select mask as in the v_perm_b32
8041 // or -1 if not succeeded.
8042 // Note byte select encoding:
8043 // value 0-3 selects corresponding source byte;
8044 // value 0xc selects zero;
8045 // value 0xff selects 0xff.
8046 static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
8047   assert(V.getValueSizeInBits() == 32);
8048 
8049   if (V.getNumOperands() != 2)
8050     return ~0;
8051 
8052   ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
8053   if (!N1)
8054     return ~0;
8055 
8056   uint32_t C = N1->getZExtValue();
8057 
8058   switch (V.getOpcode()) {
8059   default:
8060     break;
8061   case ISD::AND:
8062     if (uint32_t ConstMask = getConstantPermuteMask(C)) {
8063       return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
8064     }
8065     break;
8066 
8067   case ISD::OR:
8068     if (uint32_t ConstMask = getConstantPermuteMask(C)) {
8069       return (0x03020100 & ~ConstMask) | ConstMask;
8070     }
8071     break;
8072 
8073   case ISD::SHL:
8074     if (C % 8)
8075       return ~0;
8076 
8077     return uint32_t((0x030201000c0c0c0cull << C) >> 32);
8078 
8079   case ISD::SRL:
8080     if (C % 8)
8081       return ~0;
8082 
8083     return uint32_t(0x0c0c0c0c03020100ull >> C);
8084   }
8085 
8086   return ~0;
8087 }
8088 
8089 SDValue SITargetLowering::performAndCombine(SDNode *N,
8090                                             DAGCombinerInfo &DCI) const {
8091   if (DCI.isBeforeLegalize())
8092     return SDValue();
8093 
8094   SelectionDAG &DAG = DCI.DAG;
8095   EVT VT = N->getValueType(0);
8096   SDValue LHS = N->getOperand(0);
8097   SDValue RHS = N->getOperand(1);
8098 
8099 
8100   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
8101   if (VT == MVT::i64 && CRHS) {
8102     if (SDValue Split
8103         = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
8104       return Split;
8105   }
8106 
8107   if (CRHS && VT == MVT::i32) {
8108     // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
8109     // nb = number of trailing zeroes in mask
8110     // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
8111     // given that we are selecting 8 or 16 bit fields starting at byte boundary.
8112     uint64_t Mask = CRHS->getZExtValue();
8113     unsigned Bits = countPopulation(Mask);
8114     if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
8115         (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
8116       if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
8117         unsigned Shift = CShift->getZExtValue();
8118         unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
8119         unsigned Offset = NB + Shift;
8120         if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
8121           SDLoc SL(N);
8122           SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
8123                                     LHS->getOperand(0),
8124                                     DAG.getConstant(Offset, SL, MVT::i32),
8125                                     DAG.getConstant(Bits, SL, MVT::i32));
8126           EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
8127           SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
8128                                     DAG.getValueType(NarrowVT));
8129           SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
8130                                     DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
8131           return Shl;
8132         }
8133       }
8134     }
8135 
8136     // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
8137     if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
8138         isa<ConstantSDNode>(LHS.getOperand(2))) {
8139       uint32_t Sel = getConstantPermuteMask(Mask);
8140       if (!Sel)
8141         return SDValue();
8142 
8143       // Select 0xc for all zero bytes
8144       Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
8145       SDLoc DL(N);
8146       return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
8147                          LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
8148     }
8149   }
8150 
8151   // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
8152   // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
8153   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
8154     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8155     ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
8156 
8157     SDValue X = LHS.getOperand(0);
8158     SDValue Y = RHS.getOperand(0);
8159     if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
8160       return SDValue();
8161 
8162     if (LCC == ISD::SETO) {
8163       if (X != LHS.getOperand(1))
8164         return SDValue();
8165 
8166       if (RCC == ISD::SETUNE) {
8167         const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
8168         if (!C1 || !C1->isInfinity() || C1->isNegative())
8169           return SDValue();
8170 
8171         const uint32_t Mask = SIInstrFlags::N_NORMAL |
8172                               SIInstrFlags::N_SUBNORMAL |
8173                               SIInstrFlags::N_ZERO |
8174                               SIInstrFlags::P_ZERO |
8175                               SIInstrFlags::P_SUBNORMAL |
8176                               SIInstrFlags::P_NORMAL;
8177 
8178         static_assert(((~(SIInstrFlags::S_NAN |
8179                           SIInstrFlags::Q_NAN |
8180                           SIInstrFlags::N_INFINITY |
8181                           SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
8182                       "mask not equal");
8183 
8184         SDLoc DL(N);
8185         return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
8186                            X, DAG.getConstant(Mask, DL, MVT::i32));
8187       }
8188     }
8189   }
8190 
8191   if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS)
8192     std::swap(LHS, RHS);
8193 
8194   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS &&
8195       RHS.hasOneUse()) {
8196     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8197     // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan)
8198     // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan)
8199     const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8200     if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask &&
8201         (RHS.getOperand(0) == LHS.getOperand(0) &&
8202          LHS.getOperand(0) == LHS.getOperand(1))) {
8203       const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN;
8204       unsigned NewMask = LCC == ISD::SETO ?
8205         Mask->getZExtValue() & ~OrdMask :
8206         Mask->getZExtValue() & OrdMask;
8207 
8208       SDLoc DL(N);
8209       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0),
8210                          DAG.getConstant(NewMask, DL, MVT::i32));
8211     }
8212   }
8213 
8214   if (VT == MVT::i32 &&
8215       (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
8216     // and x, (sext cc from i1) => select cc, x, 0
8217     if (RHS.getOpcode() != ISD::SIGN_EXTEND)
8218       std::swap(LHS, RHS);
8219     if (isBoolSGPR(RHS.getOperand(0)))
8220       return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
8221                            LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
8222   }
8223 
8224   // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
8225   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8226   if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
8227       N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
8228     uint32_t LHSMask = getPermuteMask(DAG, LHS);
8229     uint32_t RHSMask = getPermuteMask(DAG, RHS);
8230     if (LHSMask != ~0u && RHSMask != ~0u) {
8231       // Canonicalize the expression in an attempt to have fewer unique masks
8232       // and therefore fewer registers used to hold the masks.
8233       if (LHSMask > RHSMask) {
8234         std::swap(LHSMask, RHSMask);
8235         std::swap(LHS, RHS);
8236       }
8237 
8238       // Select 0xc for each lane used from source operand. Zero has 0xc mask
8239       // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
8240       uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8241       uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8242 
8243       // Check of we need to combine values from two sources within a byte.
8244       if (!(LHSUsedLanes & RHSUsedLanes) &&
8245           // If we select high and lower word keep it for SDWA.
8246           // TODO: teach SDWA to work with v_perm_b32 and remove the check.
8247           !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
8248         // Each byte in each mask is either selector mask 0-3, or has higher
8249         // bits set in either of masks, which can be 0xff for 0xff or 0x0c for
8250         // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise
8251         // mask which is not 0xff wins. By anding both masks we have a correct
8252         // result except that 0x0c shall be corrected to give 0x0c only.
8253         uint32_t Mask = LHSMask & RHSMask;
8254         for (unsigned I = 0; I < 32; I += 8) {
8255           uint32_t ByteSel = 0xff << I;
8256           if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
8257             Mask &= (0x0c << I) & 0xffffffff;
8258         }
8259 
8260         // Add 4 to each active LHS lane. It will not affect any existing 0xff
8261         // or 0x0c.
8262         uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
8263         SDLoc DL(N);
8264 
8265         return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
8266                            LHS.getOperand(0), RHS.getOperand(0),
8267                            DAG.getConstant(Sel, DL, MVT::i32));
8268       }
8269     }
8270   }
8271 
8272   return SDValue();
8273 }
8274 
8275 SDValue SITargetLowering::performOrCombine(SDNode *N,
8276                                            DAGCombinerInfo &DCI) const {
8277   SelectionDAG &DAG = DCI.DAG;
8278   SDValue LHS = N->getOperand(0);
8279   SDValue RHS = N->getOperand(1);
8280 
8281   EVT VT = N->getValueType(0);
8282   if (VT == MVT::i1) {
8283     // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
8284     if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
8285         RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
8286       SDValue Src = LHS.getOperand(0);
8287       if (Src != RHS.getOperand(0))
8288         return SDValue();
8289 
8290       const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
8291       const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8292       if (!CLHS || !CRHS)
8293         return SDValue();
8294 
8295       // Only 10 bits are used.
8296       static const uint32_t MaxMask = 0x3ff;
8297 
8298       uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
8299       SDLoc DL(N);
8300       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
8301                          Src, DAG.getConstant(NewMask, DL, MVT::i32));
8302     }
8303 
8304     return SDValue();
8305   }
8306 
8307   // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
8308   if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
8309       LHS.getOpcode() == AMDGPUISD::PERM &&
8310       isa<ConstantSDNode>(LHS.getOperand(2))) {
8311     uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
8312     if (!Sel)
8313       return SDValue();
8314 
8315     Sel |= LHS.getConstantOperandVal(2);
8316     SDLoc DL(N);
8317     return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
8318                        LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
8319   }
8320 
8321   // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
8322   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8323   if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
8324       N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
8325     uint32_t LHSMask = getPermuteMask(DAG, LHS);
8326     uint32_t RHSMask = getPermuteMask(DAG, RHS);
8327     if (LHSMask != ~0u && RHSMask != ~0u) {
8328       // Canonicalize the expression in an attempt to have fewer unique masks
8329       // and therefore fewer registers used to hold the masks.
8330       if (LHSMask > RHSMask) {
8331         std::swap(LHSMask, RHSMask);
8332         std::swap(LHS, RHS);
8333       }
8334 
8335       // Select 0xc for each lane used from source operand. Zero has 0xc mask
8336       // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
8337       uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8338       uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8339 
8340       // Check of we need to combine values from two sources within a byte.
8341       if (!(LHSUsedLanes & RHSUsedLanes) &&
8342           // If we select high and lower word keep it for SDWA.
8343           // TODO: teach SDWA to work with v_perm_b32 and remove the check.
8344           !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
8345         // Kill zero bytes selected by other mask. Zero value is 0xc.
8346         LHSMask &= ~RHSUsedLanes;
8347         RHSMask &= ~LHSUsedLanes;
8348         // Add 4 to each active LHS lane
8349         LHSMask |= LHSUsedLanes & 0x04040404;
8350         // Combine masks
8351         uint32_t Sel = LHSMask | RHSMask;
8352         SDLoc DL(N);
8353 
8354         return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
8355                            LHS.getOperand(0), RHS.getOperand(0),
8356                            DAG.getConstant(Sel, DL, MVT::i32));
8357       }
8358     }
8359   }
8360 
8361   if (VT != MVT::i64)
8362     return SDValue();
8363 
8364   // TODO: This could be a generic combine with a predicate for extracting the
8365   // high half of an integer being free.
8366 
8367   // (or i64:x, (zero_extend i32:y)) ->
8368   //   i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
8369   if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
8370       RHS.getOpcode() != ISD::ZERO_EXTEND)
8371     std::swap(LHS, RHS);
8372 
8373   if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
8374     SDValue ExtSrc = RHS.getOperand(0);
8375     EVT SrcVT = ExtSrc.getValueType();
8376     if (SrcVT == MVT::i32) {
8377       SDLoc SL(N);
8378       SDValue LowLHS, HiBits;
8379       std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
8380       SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
8381 
8382       DCI.AddToWorklist(LowOr.getNode());
8383       DCI.AddToWorklist(HiBits.getNode());
8384 
8385       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
8386                                 LowOr, HiBits);
8387       return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
8388     }
8389   }
8390 
8391   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
8392   if (CRHS) {
8393     if (SDValue Split
8394           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
8395       return Split;
8396   }
8397 
8398   return SDValue();
8399 }
8400 
8401 SDValue SITargetLowering::performXorCombine(SDNode *N,
8402                                             DAGCombinerInfo &DCI) const {
8403   EVT VT = N->getValueType(0);
8404   if (VT != MVT::i64)
8405     return SDValue();
8406 
8407   SDValue LHS = N->getOperand(0);
8408   SDValue RHS = N->getOperand(1);
8409 
8410   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
8411   if (CRHS) {
8412     if (SDValue Split
8413           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
8414       return Split;
8415   }
8416 
8417   return SDValue();
8418 }
8419 
8420 // Instructions that will be lowered with a final instruction that zeros the
8421 // high result bits.
8422 // XXX - probably only need to list legal operations.
8423 static bool fp16SrcZerosHighBits(unsigned Opc) {
8424   switch (Opc) {
8425   case ISD::FADD:
8426   case ISD::FSUB:
8427   case ISD::FMUL:
8428   case ISD::FDIV:
8429   case ISD::FREM:
8430   case ISD::FMA:
8431   case ISD::FMAD:
8432   case ISD::FCANONICALIZE:
8433   case ISD::FP_ROUND:
8434   case ISD::UINT_TO_FP:
8435   case ISD::SINT_TO_FP:
8436   case ISD::FABS:
8437     // Fabs is lowered to a bit operation, but it's an and which will clear the
8438     // high bits anyway.
8439   case ISD::FSQRT:
8440   case ISD::FSIN:
8441   case ISD::FCOS:
8442   case ISD::FPOWI:
8443   case ISD::FPOW:
8444   case ISD::FLOG:
8445   case ISD::FLOG2:
8446   case ISD::FLOG10:
8447   case ISD::FEXP:
8448   case ISD::FEXP2:
8449   case ISD::FCEIL:
8450   case ISD::FTRUNC:
8451   case ISD::FRINT:
8452   case ISD::FNEARBYINT:
8453   case ISD::FROUND:
8454   case ISD::FFLOOR:
8455   case ISD::FMINNUM:
8456   case ISD::FMAXNUM:
8457   case AMDGPUISD::FRACT:
8458   case AMDGPUISD::CLAMP:
8459   case AMDGPUISD::COS_HW:
8460   case AMDGPUISD::SIN_HW:
8461   case AMDGPUISD::FMIN3:
8462   case AMDGPUISD::FMAX3:
8463   case AMDGPUISD::FMED3:
8464   case AMDGPUISD::FMAD_FTZ:
8465   case AMDGPUISD::RCP:
8466   case AMDGPUISD::RSQ:
8467   case AMDGPUISD::RCP_IFLAG:
8468   case AMDGPUISD::LDEXP:
8469     return true;
8470   default:
8471     // fcopysign, select and others may be lowered to 32-bit bit operations
8472     // which don't zero the high bits.
8473     return false;
8474   }
8475 }
8476 
8477 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
8478                                                    DAGCombinerInfo &DCI) const {
8479   if (!Subtarget->has16BitInsts() ||
8480       DCI.getDAGCombineLevel() < AfterLegalizeDAG)
8481     return SDValue();
8482 
8483   EVT VT = N->getValueType(0);
8484   if (VT != MVT::i32)
8485     return SDValue();
8486 
8487   SDValue Src = N->getOperand(0);
8488   if (Src.getValueType() != MVT::i16)
8489     return SDValue();
8490 
8491   // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
8492   // FIXME: It is not universally true that the high bits are zeroed on gfx9.
8493   if (Src.getOpcode() == ISD::BITCAST) {
8494     SDValue BCSrc = Src.getOperand(0);
8495     if (BCSrc.getValueType() == MVT::f16 &&
8496         fp16SrcZerosHighBits(BCSrc.getOpcode()))
8497       return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
8498   }
8499 
8500   return SDValue();
8501 }
8502 
8503 SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N,
8504                                                         DAGCombinerInfo &DCI)
8505                                                         const {
8506   SDValue Src = N->getOperand(0);
8507   auto *VTSign = cast<VTSDNode>(N->getOperand(1));
8508 
8509   if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE &&
8510       VTSign->getVT() == MVT::i8) ||
8511       (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT &&
8512       VTSign->getVT() == MVT::i16)) &&
8513       Src.hasOneUse()) {
8514     auto *M = cast<MemSDNode>(Src);
8515     SDValue Ops[] = {
8516       Src.getOperand(0), // Chain
8517       Src.getOperand(1), // rsrc
8518       Src.getOperand(2), // vindex
8519       Src.getOperand(3), // voffset
8520       Src.getOperand(4), // soffset
8521       Src.getOperand(5), // offset
8522       Src.getOperand(6),
8523       Src.getOperand(7)
8524     };
8525     // replace with BUFFER_LOAD_BYTE/SHORT
8526     SDVTList ResList = DCI.DAG.getVTList(MVT::i32,
8527                                          Src.getOperand(0).getValueType());
8528     unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ?
8529                    AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT;
8530     SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N),
8531                                                           ResList,
8532                                                           Ops, M->getMemoryVT(),
8533                                                           M->getMemOperand());
8534     return DCI.DAG.getMergeValues({BufferLoadSignExt,
8535                                   BufferLoadSignExt.getValue(1)}, SDLoc(N));
8536   }
8537   return SDValue();
8538 }
8539 
8540 SDValue SITargetLowering::performClassCombine(SDNode *N,
8541                                               DAGCombinerInfo &DCI) const {
8542   SelectionDAG &DAG = DCI.DAG;
8543   SDValue Mask = N->getOperand(1);
8544 
8545   // fp_class x, 0 -> false
8546   if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
8547     if (CMask->isNullValue())
8548       return DAG.getConstant(0, SDLoc(N), MVT::i1);
8549   }
8550 
8551   if (N->getOperand(0).isUndef())
8552     return DAG.getUNDEF(MVT::i1);
8553 
8554   return SDValue();
8555 }
8556 
8557 SDValue SITargetLowering::performRcpCombine(SDNode *N,
8558                                             DAGCombinerInfo &DCI) const {
8559   EVT VT = N->getValueType(0);
8560   SDValue N0 = N->getOperand(0);
8561 
8562   if (N0.isUndef())
8563     return N0;
8564 
8565   if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP ||
8566                          N0.getOpcode() == ISD::SINT_TO_FP)) {
8567     return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0,
8568                            N->getFlags());
8569   }
8570 
8571   return AMDGPUTargetLowering::performRcpCombine(N, DCI);
8572 }
8573 
8574 bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
8575                                        unsigned MaxDepth) const {
8576   unsigned Opcode = Op.getOpcode();
8577   if (Opcode == ISD::FCANONICALIZE)
8578     return true;
8579 
8580   if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8581     auto F = CFP->getValueAPF();
8582     if (F.isNaN() && F.isSignaling())
8583       return false;
8584     return !F.isDenormal() || denormalsEnabledForType(Op.getValueType());
8585   }
8586 
8587   // If source is a result of another standard FP operation it is already in
8588   // canonical form.
8589   if (MaxDepth == 0)
8590     return false;
8591 
8592   switch (Opcode) {
8593   // These will flush denorms if required.
8594   case ISD::FADD:
8595   case ISD::FSUB:
8596   case ISD::FMUL:
8597   case ISD::FCEIL:
8598   case ISD::FFLOOR:
8599   case ISD::FMA:
8600   case ISD::FMAD:
8601   case ISD::FSQRT:
8602   case ISD::FDIV:
8603   case ISD::FREM:
8604   case ISD::FP_ROUND:
8605   case ISD::FP_EXTEND:
8606   case AMDGPUISD::FMUL_LEGACY:
8607   case AMDGPUISD::FMAD_FTZ:
8608   case AMDGPUISD::RCP:
8609   case AMDGPUISD::RSQ:
8610   case AMDGPUISD::RSQ_CLAMP:
8611   case AMDGPUISD::RCP_LEGACY:
8612   case AMDGPUISD::RSQ_LEGACY:
8613   case AMDGPUISD::RCP_IFLAG:
8614   case AMDGPUISD::TRIG_PREOP:
8615   case AMDGPUISD::DIV_SCALE:
8616   case AMDGPUISD::DIV_FMAS:
8617   case AMDGPUISD::DIV_FIXUP:
8618   case AMDGPUISD::FRACT:
8619   case AMDGPUISD::LDEXP:
8620   case AMDGPUISD::CVT_PKRTZ_F16_F32:
8621   case AMDGPUISD::CVT_F32_UBYTE0:
8622   case AMDGPUISD::CVT_F32_UBYTE1:
8623   case AMDGPUISD::CVT_F32_UBYTE2:
8624   case AMDGPUISD::CVT_F32_UBYTE3:
8625     return true;
8626 
8627   // It can/will be lowered or combined as a bit operation.
8628   // Need to check their input recursively to handle.
8629   case ISD::FNEG:
8630   case ISD::FABS:
8631   case ISD::FCOPYSIGN:
8632     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
8633 
8634   case ISD::FSIN:
8635   case ISD::FCOS:
8636   case ISD::FSINCOS:
8637     return Op.getValueType().getScalarType() != MVT::f16;
8638 
8639   case ISD::FMINNUM:
8640   case ISD::FMAXNUM:
8641   case ISD::FMINNUM_IEEE:
8642   case ISD::FMAXNUM_IEEE:
8643   case AMDGPUISD::CLAMP:
8644   case AMDGPUISD::FMED3:
8645   case AMDGPUISD::FMAX3:
8646   case AMDGPUISD::FMIN3: {
8647     // FIXME: Shouldn't treat the generic operations different based these.
8648     // However, we aren't really required to flush the result from
8649     // minnum/maxnum..
8650 
8651     // snans will be quieted, so we only need to worry about denormals.
8652     if (Subtarget->supportsMinMaxDenormModes() ||
8653         denormalsEnabledForType(Op.getValueType()))
8654       return true;
8655 
8656     // Flushing may be required.
8657     // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such
8658     // targets need to check their input recursively.
8659 
8660     // FIXME: Does this apply with clamp? It's implemented with max.
8661     for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) {
8662       if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1))
8663         return false;
8664     }
8665 
8666     return true;
8667   }
8668   case ISD::SELECT: {
8669     return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) &&
8670            isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1);
8671   }
8672   case ISD::BUILD_VECTOR: {
8673     for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
8674       SDValue SrcOp = Op.getOperand(i);
8675       if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1))
8676         return false;
8677     }
8678 
8679     return true;
8680   }
8681   case ISD::EXTRACT_VECTOR_ELT:
8682   case ISD::EXTRACT_SUBVECTOR: {
8683     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
8684   }
8685   case ISD::INSERT_VECTOR_ELT: {
8686     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
8687            isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
8688   }
8689   case ISD::UNDEF:
8690     // Could be anything.
8691     return false;
8692 
8693   case ISD::BITCAST: {
8694     // Hack round the mess we make when legalizing extract_vector_elt
8695     SDValue Src = Op.getOperand(0);
8696     if (Src.getValueType() == MVT::i16 &&
8697         Src.getOpcode() == ISD::TRUNCATE) {
8698       SDValue TruncSrc = Src.getOperand(0);
8699       if (TruncSrc.getValueType() == MVT::i32 &&
8700           TruncSrc.getOpcode() == ISD::BITCAST &&
8701           TruncSrc.getOperand(0).getValueType() == MVT::v2f16) {
8702         return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1);
8703       }
8704     }
8705 
8706     return false;
8707   }
8708   case ISD::INTRINSIC_WO_CHAIN: {
8709     unsigned IntrinsicID
8710       = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8711     // TODO: Handle more intrinsics
8712     switch (IntrinsicID) {
8713     case Intrinsic::amdgcn_cvt_pkrtz:
8714     case Intrinsic::amdgcn_cubeid:
8715     case Intrinsic::amdgcn_frexp_mant:
8716     case Intrinsic::amdgcn_fdot2:
8717       return true;
8718     default:
8719       break;
8720     }
8721 
8722     LLVM_FALLTHROUGH;
8723   }
8724   default:
8725     return denormalsEnabledForType(Op.getValueType()) &&
8726            DAG.isKnownNeverSNaN(Op);
8727   }
8728 
8729   llvm_unreachable("invalid operation");
8730 }
8731 
8732 // Constant fold canonicalize.
8733 SDValue SITargetLowering::getCanonicalConstantFP(
8734   SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const {
8735   // Flush denormals to 0 if not enabled.
8736   if (C.isDenormal() && !denormalsEnabledForType(VT))
8737     return DAG.getConstantFP(0.0, SL, VT);
8738 
8739   if (C.isNaN()) {
8740     APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
8741     if (C.isSignaling()) {
8742       // Quiet a signaling NaN.
8743       // FIXME: Is this supposed to preserve payload bits?
8744       return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8745     }
8746 
8747     // Make sure it is the canonical NaN bitpattern.
8748     //
8749     // TODO: Can we use -1 as the canonical NaN value since it's an inline
8750     // immediate?
8751     if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
8752       return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8753   }
8754 
8755   // Already canonical.
8756   return DAG.getConstantFP(C, SL, VT);
8757 }
8758 
8759 static bool vectorEltWillFoldAway(SDValue Op) {
8760   return Op.isUndef() || isa<ConstantFPSDNode>(Op);
8761 }
8762 
8763 SDValue SITargetLowering::performFCanonicalizeCombine(
8764   SDNode *N,
8765   DAGCombinerInfo &DCI) const {
8766   SelectionDAG &DAG = DCI.DAG;
8767   SDValue N0 = N->getOperand(0);
8768   EVT VT = N->getValueType(0);
8769 
8770   // fcanonicalize undef -> qnan
8771   if (N0.isUndef()) {
8772     APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT));
8773     return DAG.getConstantFP(QNaN, SDLoc(N), VT);
8774   }
8775 
8776   if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) {
8777     EVT VT = N->getValueType(0);
8778     return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF());
8779   }
8780 
8781   // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x),
8782   //                                                   (fcanonicalize k)
8783   //
8784   // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0
8785 
8786   // TODO: This could be better with wider vectors that will be split to v2f16,
8787   // and to consider uses since there aren't that many packed operations.
8788   if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 &&
8789       isTypeLegal(MVT::v2f16)) {
8790     SDLoc SL(N);
8791     SDValue NewElts[2];
8792     SDValue Lo = N0.getOperand(0);
8793     SDValue Hi = N0.getOperand(1);
8794     EVT EltVT = Lo.getValueType();
8795 
8796     if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) {
8797       for (unsigned I = 0; I != 2; ++I) {
8798         SDValue Op = N0.getOperand(I);
8799         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8800           NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT,
8801                                               CFP->getValueAPF());
8802         } else if (Op.isUndef()) {
8803           // Handled below based on what the other operand is.
8804           NewElts[I] = Op;
8805         } else {
8806           NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op);
8807         }
8808       }
8809 
8810       // If one half is undef, and one is constant, perfer a splat vector rather
8811       // than the normal qNaN. If it's a register, prefer 0.0 since that's
8812       // cheaper to use and may be free with a packed operation.
8813       if (NewElts[0].isUndef()) {
8814         if (isa<ConstantFPSDNode>(NewElts[1]))
8815           NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ?
8816             NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT);
8817       }
8818 
8819       if (NewElts[1].isUndef()) {
8820         NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ?
8821           NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT);
8822       }
8823 
8824       return DAG.getBuildVector(VT, SL, NewElts);
8825     }
8826   }
8827 
8828   unsigned SrcOpc = N0.getOpcode();
8829 
8830   // If it's free to do so, push canonicalizes further up the source, which may
8831   // find a canonical source.
8832   //
8833   // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for
8834   // sNaNs.
8835   if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) {
8836     auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
8837     if (CRHS && N0.hasOneUse()) {
8838       SDLoc SL(N);
8839       SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT,
8840                                    N0.getOperand(0));
8841       SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF());
8842       DCI.AddToWorklist(Canon0.getNode());
8843 
8844       return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1);
8845     }
8846   }
8847 
8848   return isCanonicalized(DAG, N0) ? N0 : SDValue();
8849 }
8850 
8851 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
8852   switch (Opc) {
8853   case ISD::FMAXNUM:
8854   case ISD::FMAXNUM_IEEE:
8855     return AMDGPUISD::FMAX3;
8856   case ISD::SMAX:
8857     return AMDGPUISD::SMAX3;
8858   case ISD::UMAX:
8859     return AMDGPUISD::UMAX3;
8860   case ISD::FMINNUM:
8861   case ISD::FMINNUM_IEEE:
8862     return AMDGPUISD::FMIN3;
8863   case ISD::SMIN:
8864     return AMDGPUISD::SMIN3;
8865   case ISD::UMIN:
8866     return AMDGPUISD::UMIN3;
8867   default:
8868     llvm_unreachable("Not a min/max opcode");
8869   }
8870 }
8871 
8872 SDValue SITargetLowering::performIntMed3ImmCombine(
8873   SelectionDAG &DAG, const SDLoc &SL,
8874   SDValue Op0, SDValue Op1, bool Signed) const {
8875   ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
8876   if (!K1)
8877     return SDValue();
8878 
8879   ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
8880   if (!K0)
8881     return SDValue();
8882 
8883   if (Signed) {
8884     if (K0->getAPIntValue().sge(K1->getAPIntValue()))
8885       return SDValue();
8886   } else {
8887     if (K0->getAPIntValue().uge(K1->getAPIntValue()))
8888       return SDValue();
8889   }
8890 
8891   EVT VT = K0->getValueType(0);
8892   unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
8893   if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
8894     return DAG.getNode(Med3Opc, SL, VT,
8895                        Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
8896   }
8897 
8898   // If there isn't a 16-bit med3 operation, convert to 32-bit.
8899   MVT NVT = MVT::i32;
8900   unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
8901 
8902   SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
8903   SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
8904   SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
8905 
8906   SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
8907   return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
8908 }
8909 
8910 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
8911   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
8912     return C;
8913 
8914   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
8915     if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
8916       return C;
8917   }
8918 
8919   return nullptr;
8920 }
8921 
8922 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
8923                                                   const SDLoc &SL,
8924                                                   SDValue Op0,
8925                                                   SDValue Op1) const {
8926   ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
8927   if (!K1)
8928     return SDValue();
8929 
8930   ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
8931   if (!K0)
8932     return SDValue();
8933 
8934   // Ordered >= (although NaN inputs should have folded away by now).
8935   APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
8936   if (Cmp == APFloat::cmpGreaterThan)
8937     return SDValue();
8938 
8939   const MachineFunction &MF = DAG.getMachineFunction();
8940   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
8941 
8942   // TODO: Check IEEE bit enabled?
8943   EVT VT = Op0.getValueType();
8944   if (Info->getMode().DX10Clamp) {
8945     // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
8946     // hardware fmed3 behavior converting to a min.
8947     // FIXME: Should this be allowing -0.0?
8948     if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
8949       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
8950   }
8951 
8952   // med3 for f16 is only available on gfx9+, and not available for v2f16.
8953   if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
8954     // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
8955     // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
8956     // then give the other result, which is different from med3 with a NaN
8957     // input.
8958     SDValue Var = Op0.getOperand(0);
8959     if (!DAG.isKnownNeverSNaN(Var))
8960       return SDValue();
8961 
8962     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8963 
8964     if ((!K0->hasOneUse() ||
8965          TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) &&
8966         (!K1->hasOneUse() ||
8967          TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) {
8968       return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
8969                          Var, SDValue(K0, 0), SDValue(K1, 0));
8970     }
8971   }
8972 
8973   return SDValue();
8974 }
8975 
8976 SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
8977                                                DAGCombinerInfo &DCI) const {
8978   SelectionDAG &DAG = DCI.DAG;
8979 
8980   EVT VT = N->getValueType(0);
8981   unsigned Opc = N->getOpcode();
8982   SDValue Op0 = N->getOperand(0);
8983   SDValue Op1 = N->getOperand(1);
8984 
8985   // Only do this if the inner op has one use since this will just increases
8986   // register pressure for no benefit.
8987 
8988   if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
8989       !VT.isVector() &&
8990       (VT == MVT::i32 || VT == MVT::f32 ||
8991        ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) {
8992     // max(max(a, b), c) -> max3(a, b, c)
8993     // min(min(a, b), c) -> min3(a, b, c)
8994     if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
8995       SDLoc DL(N);
8996       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
8997                          DL,
8998                          N->getValueType(0),
8999                          Op0.getOperand(0),
9000                          Op0.getOperand(1),
9001                          Op1);
9002     }
9003 
9004     // Try commuted.
9005     // max(a, max(b, c)) -> max3(a, b, c)
9006     // min(a, min(b, c)) -> min3(a, b, c)
9007     if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
9008       SDLoc DL(N);
9009       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
9010                          DL,
9011                          N->getValueType(0),
9012                          Op0,
9013                          Op1.getOperand(0),
9014                          Op1.getOperand(1));
9015     }
9016   }
9017 
9018   // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
9019   if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
9020     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
9021       return Med3;
9022   }
9023 
9024   if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
9025     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
9026       return Med3;
9027   }
9028 
9029   // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
9030   if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
9031        (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) ||
9032        (Opc == AMDGPUISD::FMIN_LEGACY &&
9033         Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
9034       (VT == MVT::f32 || VT == MVT::f64 ||
9035        (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
9036        (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
9037       Op0.hasOneUse()) {
9038     if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
9039       return Res;
9040   }
9041 
9042   return SDValue();
9043 }
9044 
9045 static bool isClampZeroToOne(SDValue A, SDValue B) {
9046   if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
9047     if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
9048       // FIXME: Should this be allowing -0.0?
9049       return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
9050              (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
9051     }
9052   }
9053 
9054   return false;
9055 }
9056 
9057 // FIXME: Should only worry about snans for version with chain.
9058 SDValue SITargetLowering::performFMed3Combine(SDNode *N,
9059                                               DAGCombinerInfo &DCI) const {
9060   EVT VT = N->getValueType(0);
9061   // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
9062   // NaNs. With a NaN input, the order of the operands may change the result.
9063 
9064   SelectionDAG &DAG = DCI.DAG;
9065   SDLoc SL(N);
9066 
9067   SDValue Src0 = N->getOperand(0);
9068   SDValue Src1 = N->getOperand(1);
9069   SDValue Src2 = N->getOperand(2);
9070 
9071   if (isClampZeroToOne(Src0, Src1)) {
9072     // const_a, const_b, x -> clamp is safe in all cases including signaling
9073     // nans.
9074     // FIXME: Should this be allowing -0.0?
9075     return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
9076   }
9077 
9078   const MachineFunction &MF = DAG.getMachineFunction();
9079   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
9080 
9081   // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
9082   // handling no dx10-clamp?
9083   if (Info->getMode().DX10Clamp) {
9084     // If NaNs is clamped to 0, we are free to reorder the inputs.
9085 
9086     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
9087       std::swap(Src0, Src1);
9088 
9089     if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
9090       std::swap(Src1, Src2);
9091 
9092     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
9093       std::swap(Src0, Src1);
9094 
9095     if (isClampZeroToOne(Src1, Src2))
9096       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
9097   }
9098 
9099   return SDValue();
9100 }
9101 
9102 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
9103                                                  DAGCombinerInfo &DCI) const {
9104   SDValue Src0 = N->getOperand(0);
9105   SDValue Src1 = N->getOperand(1);
9106   if (Src0.isUndef() && Src1.isUndef())
9107     return DCI.DAG.getUNDEF(N->getValueType(0));
9108   return SDValue();
9109 }
9110 
9111 SDValue SITargetLowering::performExtractVectorEltCombine(
9112   SDNode *N, DAGCombinerInfo &DCI) const {
9113   SDValue Vec = N->getOperand(0);
9114   SelectionDAG &DAG = DCI.DAG;
9115 
9116   EVT VecVT = Vec.getValueType();
9117   EVT EltVT = VecVT.getVectorElementType();
9118 
9119   if ((Vec.getOpcode() == ISD::FNEG ||
9120        Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
9121     SDLoc SL(N);
9122     EVT EltVT = N->getValueType(0);
9123     SDValue Idx = N->getOperand(1);
9124     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9125                               Vec.getOperand(0), Idx);
9126     return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
9127   }
9128 
9129   // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
9130   //    =>
9131   // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
9132   // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
9133   // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
9134   if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
9135     SDLoc SL(N);
9136     EVT EltVT = N->getValueType(0);
9137     SDValue Idx = N->getOperand(1);
9138     unsigned Opc = Vec.getOpcode();
9139 
9140     switch(Opc) {
9141     default:
9142       break;
9143       // TODO: Support other binary operations.
9144     case ISD::FADD:
9145     case ISD::FSUB:
9146     case ISD::FMUL:
9147     case ISD::ADD:
9148     case ISD::UMIN:
9149     case ISD::UMAX:
9150     case ISD::SMIN:
9151     case ISD::SMAX:
9152     case ISD::FMAXNUM:
9153     case ISD::FMINNUM:
9154     case ISD::FMAXNUM_IEEE:
9155     case ISD::FMINNUM_IEEE: {
9156       SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9157                                  Vec.getOperand(0), Idx);
9158       SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9159                                  Vec.getOperand(1), Idx);
9160 
9161       DCI.AddToWorklist(Elt0.getNode());
9162       DCI.AddToWorklist(Elt1.getNode());
9163       return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags());
9164     }
9165     }
9166   }
9167 
9168   unsigned VecSize = VecVT.getSizeInBits();
9169   unsigned EltSize = EltVT.getSizeInBits();
9170 
9171   // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx)
9172   // This elminates non-constant index and subsequent movrel or scratch access.
9173   // Sub-dword vectors of size 2 dword or less have better implementation.
9174   // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
9175   // instructions.
9176   if (VecSize <= 256 && (VecSize > 64 || EltSize >= 32) &&
9177       !isa<ConstantSDNode>(N->getOperand(1))) {
9178     SDLoc SL(N);
9179     SDValue Idx = N->getOperand(1);
9180     EVT IdxVT = Idx.getValueType();
9181     SDValue V;
9182     for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
9183       SDValue IC = DAG.getConstant(I, SL, IdxVT);
9184       SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
9185       if (I == 0)
9186         V = Elt;
9187       else
9188         V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ);
9189     }
9190     return V;
9191   }
9192 
9193   if (!DCI.isBeforeLegalize())
9194     return SDValue();
9195 
9196   // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
9197   // elements. This exposes more load reduction opportunities by replacing
9198   // multiple small extract_vector_elements with a single 32-bit extract.
9199   auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
9200   if (isa<MemSDNode>(Vec) &&
9201       EltSize <= 16 &&
9202       EltVT.isByteSized() &&
9203       VecSize > 32 &&
9204       VecSize % 32 == 0 &&
9205       Idx) {
9206     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
9207 
9208     unsigned BitIndex = Idx->getZExtValue() * EltSize;
9209     unsigned EltIdx = BitIndex / 32;
9210     unsigned LeftoverBitIdx = BitIndex % 32;
9211     SDLoc SL(N);
9212 
9213     SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
9214     DCI.AddToWorklist(Cast.getNode());
9215 
9216     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
9217                               DAG.getConstant(EltIdx, SL, MVT::i32));
9218     DCI.AddToWorklist(Elt.getNode());
9219     SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
9220                               DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
9221     DCI.AddToWorklist(Srl.getNode());
9222 
9223     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
9224     DCI.AddToWorklist(Trunc.getNode());
9225     return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
9226   }
9227 
9228   return SDValue();
9229 }
9230 
9231 SDValue
9232 SITargetLowering::performInsertVectorEltCombine(SDNode *N,
9233                                                 DAGCombinerInfo &DCI) const {
9234   SDValue Vec = N->getOperand(0);
9235   SDValue Idx = N->getOperand(2);
9236   EVT VecVT = Vec.getValueType();
9237   EVT EltVT = VecVT.getVectorElementType();
9238   unsigned VecSize = VecVT.getSizeInBits();
9239   unsigned EltSize = EltVT.getSizeInBits();
9240 
9241   // INSERT_VECTOR_ELT (<n x e>, var-idx)
9242   // => BUILD_VECTOR n x select (e, const-idx)
9243   // This elminates non-constant index and subsequent movrel or scratch access.
9244   // Sub-dword vectors of size 2 dword or less have better implementation.
9245   // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
9246   // instructions.
9247   if (isa<ConstantSDNode>(Idx) ||
9248       VecSize > 256 || (VecSize <= 64 && EltSize < 32))
9249     return SDValue();
9250 
9251   SelectionDAG &DAG = DCI.DAG;
9252   SDLoc SL(N);
9253   SDValue Ins = N->getOperand(1);
9254   EVT IdxVT = Idx.getValueType();
9255 
9256   SmallVector<SDValue, 16> Ops;
9257   for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
9258     SDValue IC = DAG.getConstant(I, SL, IdxVT);
9259     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
9260     SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ);
9261     Ops.push_back(V);
9262   }
9263 
9264   return DAG.getBuildVector(VecVT, SL, Ops);
9265 }
9266 
9267 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
9268                                           const SDNode *N0,
9269                                           const SDNode *N1) const {
9270   EVT VT = N0->getValueType(0);
9271 
9272   // Only do this if we are not trying to support denormals. v_mad_f32 does not
9273   // support denormals ever.
9274   if (((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
9275        (VT == MVT::f16 && !Subtarget->hasFP16Denormals() &&
9276         getSubtarget()->hasMadF16())) &&
9277        isOperationLegal(ISD::FMAD, VT))
9278     return ISD::FMAD;
9279 
9280   const TargetOptions &Options = DAG.getTarget().Options;
9281   if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
9282        (N0->getFlags().hasAllowContract() &&
9283         N1->getFlags().hasAllowContract())) &&
9284       isFMAFasterThanFMulAndFAdd(VT)) {
9285     return ISD::FMA;
9286   }
9287 
9288   return 0;
9289 }
9290 
9291 // For a reassociatable opcode perform:
9292 // op x, (op y, z) -> op (op x, z), y, if x and z are uniform
9293 SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
9294                                                SelectionDAG &DAG) const {
9295   EVT VT = N->getValueType(0);
9296   if (VT != MVT::i32 && VT != MVT::i64)
9297     return SDValue();
9298 
9299   unsigned Opc = N->getOpcode();
9300   SDValue Op0 = N->getOperand(0);
9301   SDValue Op1 = N->getOperand(1);
9302 
9303   if (!(Op0->isDivergent() ^ Op1->isDivergent()))
9304     return SDValue();
9305 
9306   if (Op0->isDivergent())
9307     std::swap(Op0, Op1);
9308 
9309   if (Op1.getOpcode() != Opc || !Op1.hasOneUse())
9310     return SDValue();
9311 
9312   SDValue Op2 = Op1.getOperand(1);
9313   Op1 = Op1.getOperand(0);
9314   if (!(Op1->isDivergent() ^ Op2->isDivergent()))
9315     return SDValue();
9316 
9317   if (Op1->isDivergent())
9318     std::swap(Op1, Op2);
9319 
9320   // If either operand is constant this will conflict with
9321   // DAGCombiner::ReassociateOps().
9322   if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) ||
9323       DAG.isConstantIntBuildVectorOrConstantInt(Op1))
9324     return SDValue();
9325 
9326   SDLoc SL(N);
9327   SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1);
9328   return DAG.getNode(Opc, SL, VT, Add1, Op2);
9329 }
9330 
9331 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
9332                            EVT VT,
9333                            SDValue N0, SDValue N1, SDValue N2,
9334                            bool Signed) {
9335   unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
9336   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
9337   SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
9338   return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
9339 }
9340 
9341 SDValue SITargetLowering::performAddCombine(SDNode *N,
9342                                             DAGCombinerInfo &DCI) const {
9343   SelectionDAG &DAG = DCI.DAG;
9344   EVT VT = N->getValueType(0);
9345   SDLoc SL(N);
9346   SDValue LHS = N->getOperand(0);
9347   SDValue RHS = N->getOperand(1);
9348 
9349   if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
9350       && Subtarget->hasMad64_32() &&
9351       !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
9352       VT.getScalarSizeInBits() <= 64) {
9353     if (LHS.getOpcode() != ISD::MUL)
9354       std::swap(LHS, RHS);
9355 
9356     SDValue MulLHS = LHS.getOperand(0);
9357     SDValue MulRHS = LHS.getOperand(1);
9358     SDValue AddRHS = RHS;
9359 
9360     // TODO: Maybe restrict if SGPR inputs.
9361     if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
9362         numBitsUnsigned(MulRHS, DAG) <= 32) {
9363       MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
9364       MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
9365       AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
9366       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
9367     }
9368 
9369     if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
9370       MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
9371       MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
9372       AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
9373       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
9374     }
9375 
9376     return SDValue();
9377   }
9378 
9379   if (SDValue V = reassociateScalarOps(N, DAG)) {
9380     return V;
9381   }
9382 
9383   if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
9384     return SDValue();
9385 
9386   // add x, zext (setcc) => addcarry x, 0, setcc
9387   // add x, sext (setcc) => subcarry x, 0, setcc
9388   unsigned Opc = LHS.getOpcode();
9389   if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
9390       Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
9391     std::swap(RHS, LHS);
9392 
9393   Opc = RHS.getOpcode();
9394   switch (Opc) {
9395   default: break;
9396   case ISD::ZERO_EXTEND:
9397   case ISD::SIGN_EXTEND:
9398   case ISD::ANY_EXTEND: {
9399     auto Cond = RHS.getOperand(0);
9400     if (!isBoolSGPR(Cond))
9401       break;
9402     SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
9403     SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
9404     Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
9405     return DAG.getNode(Opc, SL, VTList, Args);
9406   }
9407   case ISD::ADDCARRY: {
9408     // add x, (addcarry y, 0, cc) => addcarry x, y, cc
9409     auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
9410     if (!C || C->getZExtValue() != 0) break;
9411     SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
9412     return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
9413   }
9414   }
9415   return SDValue();
9416 }
9417 
9418 SDValue SITargetLowering::performSubCombine(SDNode *N,
9419                                             DAGCombinerInfo &DCI) const {
9420   SelectionDAG &DAG = DCI.DAG;
9421   EVT VT = N->getValueType(0);
9422 
9423   if (VT != MVT::i32)
9424     return SDValue();
9425 
9426   SDLoc SL(N);
9427   SDValue LHS = N->getOperand(0);
9428   SDValue RHS = N->getOperand(1);
9429 
9430   if (LHS.getOpcode() == ISD::SUBCARRY) {
9431     // sub (subcarry x, 0, cc), y => subcarry x, y, cc
9432     auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
9433     if (!C || !C->isNullValue())
9434       return SDValue();
9435     SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
9436     return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
9437   }
9438   return SDValue();
9439 }
9440 
9441 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
9442   DAGCombinerInfo &DCI) const {
9443 
9444   if (N->getValueType(0) != MVT::i32)
9445     return SDValue();
9446 
9447   auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
9448   if (!C || C->getZExtValue() != 0)
9449     return SDValue();
9450 
9451   SelectionDAG &DAG = DCI.DAG;
9452   SDValue LHS = N->getOperand(0);
9453 
9454   // addcarry (add x, y), 0, cc => addcarry x, y, cc
9455   // subcarry (sub x, y), 0, cc => subcarry x, y, cc
9456   unsigned LHSOpc = LHS.getOpcode();
9457   unsigned Opc = N->getOpcode();
9458   if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
9459       (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
9460     SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
9461     return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
9462   }
9463   return SDValue();
9464 }
9465 
9466 SDValue SITargetLowering::performFAddCombine(SDNode *N,
9467                                              DAGCombinerInfo &DCI) const {
9468   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9469     return SDValue();
9470 
9471   SelectionDAG &DAG = DCI.DAG;
9472   EVT VT = N->getValueType(0);
9473 
9474   SDLoc SL(N);
9475   SDValue LHS = N->getOperand(0);
9476   SDValue RHS = N->getOperand(1);
9477 
9478   // These should really be instruction patterns, but writing patterns with
9479   // source modiifiers is a pain.
9480 
9481   // fadd (fadd (a, a), b) -> mad 2.0, a, b
9482   if (LHS.getOpcode() == ISD::FADD) {
9483     SDValue A = LHS.getOperand(0);
9484     if (A == LHS.getOperand(1)) {
9485       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
9486       if (FusedOp != 0) {
9487         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9488         return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
9489       }
9490     }
9491   }
9492 
9493   // fadd (b, fadd (a, a)) -> mad 2.0, a, b
9494   if (RHS.getOpcode() == ISD::FADD) {
9495     SDValue A = RHS.getOperand(0);
9496     if (A == RHS.getOperand(1)) {
9497       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
9498       if (FusedOp != 0) {
9499         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9500         return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
9501       }
9502     }
9503   }
9504 
9505   return SDValue();
9506 }
9507 
9508 SDValue SITargetLowering::performFSubCombine(SDNode *N,
9509                                              DAGCombinerInfo &DCI) const {
9510   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9511     return SDValue();
9512 
9513   SelectionDAG &DAG = DCI.DAG;
9514   SDLoc SL(N);
9515   EVT VT = N->getValueType(0);
9516   assert(!VT.isVector());
9517 
9518   // Try to get the fneg to fold into the source modifier. This undoes generic
9519   // DAG combines and folds them into the mad.
9520   //
9521   // Only do this if we are not trying to support denormals. v_mad_f32 does
9522   // not support denormals ever.
9523   SDValue LHS = N->getOperand(0);
9524   SDValue RHS = N->getOperand(1);
9525   if (LHS.getOpcode() == ISD::FADD) {
9526     // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
9527     SDValue A = LHS.getOperand(0);
9528     if (A == LHS.getOperand(1)) {
9529       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
9530       if (FusedOp != 0){
9531         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9532         SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
9533 
9534         return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
9535       }
9536     }
9537   }
9538 
9539   if (RHS.getOpcode() == ISD::FADD) {
9540     // (fsub c, (fadd a, a)) -> mad -2.0, a, c
9541 
9542     SDValue A = RHS.getOperand(0);
9543     if (A == RHS.getOperand(1)) {
9544       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
9545       if (FusedOp != 0){
9546         const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
9547         return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
9548       }
9549     }
9550   }
9551 
9552   return SDValue();
9553 }
9554 
9555 SDValue SITargetLowering::performFMACombine(SDNode *N,
9556                                             DAGCombinerInfo &DCI) const {
9557   SelectionDAG &DAG = DCI.DAG;
9558   EVT VT = N->getValueType(0);
9559   SDLoc SL(N);
9560 
9561   if (!Subtarget->hasDot2Insts() || VT != MVT::f32)
9562     return SDValue();
9563 
9564   // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) ->
9565   //   FDOT2((V2F16)S0, (V2F16)S1, (F32)z))
9566   SDValue Op1 = N->getOperand(0);
9567   SDValue Op2 = N->getOperand(1);
9568   SDValue FMA = N->getOperand(2);
9569 
9570   if (FMA.getOpcode() != ISD::FMA ||
9571       Op1.getOpcode() != ISD::FP_EXTEND ||
9572       Op2.getOpcode() != ISD::FP_EXTEND)
9573     return SDValue();
9574 
9575   // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
9576   // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract
9577   // is sufficient to allow generaing fdot2.
9578   const TargetOptions &Options = DAG.getTarget().Options;
9579   if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
9580       (N->getFlags().hasAllowContract() &&
9581        FMA->getFlags().hasAllowContract())) {
9582     Op1 = Op1.getOperand(0);
9583     Op2 = Op2.getOperand(0);
9584     if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9585         Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9586       return SDValue();
9587 
9588     SDValue Vec1 = Op1.getOperand(0);
9589     SDValue Idx1 = Op1.getOperand(1);
9590     SDValue Vec2 = Op2.getOperand(0);
9591 
9592     SDValue FMAOp1 = FMA.getOperand(0);
9593     SDValue FMAOp2 = FMA.getOperand(1);
9594     SDValue FMAAcc = FMA.getOperand(2);
9595 
9596     if (FMAOp1.getOpcode() != ISD::FP_EXTEND ||
9597         FMAOp2.getOpcode() != ISD::FP_EXTEND)
9598       return SDValue();
9599 
9600     FMAOp1 = FMAOp1.getOperand(0);
9601     FMAOp2 = FMAOp2.getOperand(0);
9602     if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9603         FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9604       return SDValue();
9605 
9606     SDValue Vec3 = FMAOp1.getOperand(0);
9607     SDValue Vec4 = FMAOp2.getOperand(0);
9608     SDValue Idx2 = FMAOp1.getOperand(1);
9609 
9610     if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) ||
9611         // Idx1 and Idx2 cannot be the same.
9612         Idx1 == Idx2)
9613       return SDValue();
9614 
9615     if (Vec1 == Vec2 || Vec3 == Vec4)
9616       return SDValue();
9617 
9618     if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16)
9619       return SDValue();
9620 
9621     if ((Vec1 == Vec3 && Vec2 == Vec4) ||
9622         (Vec1 == Vec4 && Vec2 == Vec3)) {
9623       return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc,
9624                          DAG.getTargetConstant(0, SL, MVT::i1));
9625     }
9626   }
9627   return SDValue();
9628 }
9629 
9630 SDValue SITargetLowering::performSetCCCombine(SDNode *N,
9631                                               DAGCombinerInfo &DCI) const {
9632   SelectionDAG &DAG = DCI.DAG;
9633   SDLoc SL(N);
9634 
9635   SDValue LHS = N->getOperand(0);
9636   SDValue RHS = N->getOperand(1);
9637   EVT VT = LHS.getValueType();
9638   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
9639 
9640   auto CRHS = dyn_cast<ConstantSDNode>(RHS);
9641   if (!CRHS) {
9642     CRHS = dyn_cast<ConstantSDNode>(LHS);
9643     if (CRHS) {
9644       std::swap(LHS, RHS);
9645       CC = getSetCCSwappedOperands(CC);
9646     }
9647   }
9648 
9649   if (CRHS) {
9650     if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
9651         isBoolSGPR(LHS.getOperand(0))) {
9652       // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
9653       // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
9654       // setcc (sext from i1 cc),  0, eq|sge|ule) => not cc => xor cc, -1
9655       // setcc (sext from i1 cc),  0, ne|ugt|slt) => cc
9656       if ((CRHS->isAllOnesValue() &&
9657            (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
9658           (CRHS->isNullValue() &&
9659            (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
9660         return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9661                            DAG.getConstant(-1, SL, MVT::i1));
9662       if ((CRHS->isAllOnesValue() &&
9663            (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
9664           (CRHS->isNullValue() &&
9665            (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
9666         return LHS.getOperand(0);
9667     }
9668 
9669     uint64_t CRHSVal = CRHS->getZExtValue();
9670     if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
9671         LHS.getOpcode() == ISD::SELECT &&
9672         isa<ConstantSDNode>(LHS.getOperand(1)) &&
9673         isa<ConstantSDNode>(LHS.getOperand(2)) &&
9674         LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) &&
9675         isBoolSGPR(LHS.getOperand(0))) {
9676       // Given CT != FT:
9677       // setcc (select cc, CT, CF), CF, eq => xor cc, -1
9678       // setcc (select cc, CT, CF), CF, ne => cc
9679       // setcc (select cc, CT, CF), CT, ne => xor cc, -1
9680       // setcc (select cc, CT, CF), CT, eq => cc
9681       uint64_t CT = LHS.getConstantOperandVal(1);
9682       uint64_t CF = LHS.getConstantOperandVal(2);
9683 
9684       if ((CF == CRHSVal && CC == ISD::SETEQ) ||
9685           (CT == CRHSVal && CC == ISD::SETNE))
9686         return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9687                            DAG.getConstant(-1, SL, MVT::i1));
9688       if ((CF == CRHSVal && CC == ISD::SETNE) ||
9689           (CT == CRHSVal && CC == ISD::SETEQ))
9690         return LHS.getOperand(0);
9691     }
9692   }
9693 
9694   if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
9695                                            VT != MVT::f16))
9696     return SDValue();
9697 
9698   // Match isinf/isfinite pattern
9699   // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
9700   // (fcmp one (fabs x), inf) -> (fp_class x,
9701   // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero)
9702   if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) {
9703     const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
9704     if (!CRHS)
9705       return SDValue();
9706 
9707     const APFloat &APF = CRHS->getValueAPF();
9708     if (APF.isInfinity() && !APF.isNegative()) {
9709       const unsigned IsInfMask = SIInstrFlags::P_INFINITY |
9710                                  SIInstrFlags::N_INFINITY;
9711       const unsigned IsFiniteMask = SIInstrFlags::N_ZERO |
9712                                     SIInstrFlags::P_ZERO |
9713                                     SIInstrFlags::N_NORMAL |
9714                                     SIInstrFlags::P_NORMAL |
9715                                     SIInstrFlags::N_SUBNORMAL |
9716                                     SIInstrFlags::P_SUBNORMAL;
9717       unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask;
9718       return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
9719                          DAG.getConstant(Mask, SL, MVT::i32));
9720     }
9721   }
9722 
9723   return SDValue();
9724 }
9725 
9726 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
9727                                                      DAGCombinerInfo &DCI) const {
9728   SelectionDAG &DAG = DCI.DAG;
9729   SDLoc SL(N);
9730   unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
9731 
9732   SDValue Src = N->getOperand(0);
9733   SDValue Srl = N->getOperand(0);
9734   if (Srl.getOpcode() == ISD::ZERO_EXTEND)
9735     Srl = Srl.getOperand(0);
9736 
9737   // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
9738   if (Srl.getOpcode() == ISD::SRL) {
9739     // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
9740     // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
9741     // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
9742 
9743     if (const ConstantSDNode *C =
9744         dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
9745       Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
9746                                EVT(MVT::i32));
9747 
9748       unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
9749       if (SrcOffset < 32 && SrcOffset % 8 == 0) {
9750         return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
9751                            MVT::f32, Srl);
9752       }
9753     }
9754   }
9755 
9756   APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
9757 
9758   KnownBits Known;
9759   TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
9760                                         !DCI.isBeforeLegalizeOps());
9761   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9762   if (TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
9763     DCI.CommitTargetLoweringOpt(TLO);
9764   }
9765 
9766   return SDValue();
9767 }
9768 
9769 SDValue SITargetLowering::performClampCombine(SDNode *N,
9770                                               DAGCombinerInfo &DCI) const {
9771   ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
9772   if (!CSrc)
9773     return SDValue();
9774 
9775   const MachineFunction &MF = DCI.DAG.getMachineFunction();
9776   const APFloat &F = CSrc->getValueAPF();
9777   APFloat Zero = APFloat::getZero(F.getSemantics());
9778   APFloat::cmpResult Cmp0 = F.compare(Zero);
9779   if (Cmp0 == APFloat::cmpLessThan ||
9780       (Cmp0 == APFloat::cmpUnordered &&
9781        MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) {
9782     return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
9783   }
9784 
9785   APFloat One(F.getSemantics(), "1.0");
9786   APFloat::cmpResult Cmp1 = F.compare(One);
9787   if (Cmp1 == APFloat::cmpGreaterThan)
9788     return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
9789 
9790   return SDValue(CSrc, 0);
9791 }
9792 
9793 
9794 SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
9795                                             DAGCombinerInfo &DCI) const {
9796   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
9797     return SDValue();
9798   switch (N->getOpcode()) {
9799   default:
9800     return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
9801   case ISD::ADD:
9802     return performAddCombine(N, DCI);
9803   case ISD::SUB:
9804     return performSubCombine(N, DCI);
9805   case ISD::ADDCARRY:
9806   case ISD::SUBCARRY:
9807     return performAddCarrySubCarryCombine(N, DCI);
9808   case ISD::FADD:
9809     return performFAddCombine(N, DCI);
9810   case ISD::FSUB:
9811     return performFSubCombine(N, DCI);
9812   case ISD::SETCC:
9813     return performSetCCCombine(N, DCI);
9814   case ISD::FMAXNUM:
9815   case ISD::FMINNUM:
9816   case ISD::FMAXNUM_IEEE:
9817   case ISD::FMINNUM_IEEE:
9818   case ISD::SMAX:
9819   case ISD::SMIN:
9820   case ISD::UMAX:
9821   case ISD::UMIN:
9822   case AMDGPUISD::FMIN_LEGACY:
9823   case AMDGPUISD::FMAX_LEGACY:
9824     return performMinMaxCombine(N, DCI);
9825   case ISD::FMA:
9826     return performFMACombine(N, DCI);
9827   case ISD::LOAD: {
9828     if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
9829       return Widended;
9830     LLVM_FALLTHROUGH;
9831   }
9832   case ISD::STORE:
9833   case ISD::ATOMIC_LOAD:
9834   case ISD::ATOMIC_STORE:
9835   case ISD::ATOMIC_CMP_SWAP:
9836   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
9837   case ISD::ATOMIC_SWAP:
9838   case ISD::ATOMIC_LOAD_ADD:
9839   case ISD::ATOMIC_LOAD_SUB:
9840   case ISD::ATOMIC_LOAD_AND:
9841   case ISD::ATOMIC_LOAD_OR:
9842   case ISD::ATOMIC_LOAD_XOR:
9843   case ISD::ATOMIC_LOAD_NAND:
9844   case ISD::ATOMIC_LOAD_MIN:
9845   case ISD::ATOMIC_LOAD_MAX:
9846   case ISD::ATOMIC_LOAD_UMIN:
9847   case ISD::ATOMIC_LOAD_UMAX:
9848   case ISD::ATOMIC_LOAD_FADD:
9849   case AMDGPUISD::ATOMIC_INC:
9850   case AMDGPUISD::ATOMIC_DEC:
9851   case AMDGPUISD::ATOMIC_LOAD_FMIN:
9852   case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
9853     if (DCI.isBeforeLegalize())
9854       break;
9855     return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
9856   case ISD::AND:
9857     return performAndCombine(N, DCI);
9858   case ISD::OR:
9859     return performOrCombine(N, DCI);
9860   case ISD::XOR:
9861     return performXorCombine(N, DCI);
9862   case ISD::ZERO_EXTEND:
9863     return performZeroExtendCombine(N, DCI);
9864   case ISD::SIGN_EXTEND_INREG:
9865     return performSignExtendInRegCombine(N , DCI);
9866   case AMDGPUISD::FP_CLASS:
9867     return performClassCombine(N, DCI);
9868   case ISD::FCANONICALIZE:
9869     return performFCanonicalizeCombine(N, DCI);
9870   case AMDGPUISD::RCP:
9871     return performRcpCombine(N, DCI);
9872   case AMDGPUISD::FRACT:
9873   case AMDGPUISD::RSQ:
9874   case AMDGPUISD::RCP_LEGACY:
9875   case AMDGPUISD::RSQ_LEGACY:
9876   case AMDGPUISD::RCP_IFLAG:
9877   case AMDGPUISD::RSQ_CLAMP:
9878   case AMDGPUISD::LDEXP: {
9879     SDValue Src = N->getOperand(0);
9880     if (Src.isUndef())
9881       return Src;
9882     break;
9883   }
9884   case ISD::SINT_TO_FP:
9885   case ISD::UINT_TO_FP:
9886     return performUCharToFloatCombine(N, DCI);
9887   case AMDGPUISD::CVT_F32_UBYTE0:
9888   case AMDGPUISD::CVT_F32_UBYTE1:
9889   case AMDGPUISD::CVT_F32_UBYTE2:
9890   case AMDGPUISD::CVT_F32_UBYTE3:
9891     return performCvtF32UByteNCombine(N, DCI);
9892   case AMDGPUISD::FMED3:
9893     return performFMed3Combine(N, DCI);
9894   case AMDGPUISD::CVT_PKRTZ_F16_F32:
9895     return performCvtPkRTZCombine(N, DCI);
9896   case AMDGPUISD::CLAMP:
9897     return performClampCombine(N, DCI);
9898   case ISD::SCALAR_TO_VECTOR: {
9899     SelectionDAG &DAG = DCI.DAG;
9900     EVT VT = N->getValueType(0);
9901 
9902     // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
9903     if (VT == MVT::v2i16 || VT == MVT::v2f16) {
9904       SDLoc SL(N);
9905       SDValue Src = N->getOperand(0);
9906       EVT EltVT = Src.getValueType();
9907       if (EltVT == MVT::f16)
9908         Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
9909 
9910       SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
9911       return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
9912     }
9913 
9914     break;
9915   }
9916   case ISD::EXTRACT_VECTOR_ELT:
9917     return performExtractVectorEltCombine(N, DCI);
9918   case ISD::INSERT_VECTOR_ELT:
9919     return performInsertVectorEltCombine(N, DCI);
9920   }
9921   return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
9922 }
9923 
9924 /// Helper function for adjustWritemask
9925 static unsigned SubIdx2Lane(unsigned Idx) {
9926   switch (Idx) {
9927   default: return 0;
9928   case AMDGPU::sub0: return 0;
9929   case AMDGPU::sub1: return 1;
9930   case AMDGPU::sub2: return 2;
9931   case AMDGPU::sub3: return 3;
9932   case AMDGPU::sub4: return 4; // Possible with TFE/LWE
9933   }
9934 }
9935 
9936 /// Adjust the writemask of MIMG instructions
9937 SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
9938                                           SelectionDAG &DAG) const {
9939   unsigned Opcode = Node->getMachineOpcode();
9940 
9941   // Subtract 1 because the vdata output is not a MachineSDNode operand.
9942   int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1;
9943   if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
9944     return Node; // not implemented for D16
9945 
9946   SDNode *Users[5] = { nullptr };
9947   unsigned Lane = 0;
9948   unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
9949   unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
9950   unsigned NewDmask = 0;
9951   unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1;
9952   unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1;
9953   bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) ||
9954                   Node->getConstantOperandVal(LWEIdx)) ? 1 : 0;
9955   unsigned TFCLane = 0;
9956   bool HasChain = Node->getNumValues() > 1;
9957 
9958   if (OldDmask == 0) {
9959     // These are folded out, but on the chance it happens don't assert.
9960     return Node;
9961   }
9962 
9963   unsigned OldBitsSet = countPopulation(OldDmask);
9964   // Work out which is the TFE/LWE lane if that is enabled.
9965   if (UsesTFC) {
9966     TFCLane = OldBitsSet;
9967   }
9968 
9969   // Try to figure out the used register components
9970   for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
9971        I != E; ++I) {
9972 
9973     // Don't look at users of the chain.
9974     if (I.getUse().getResNo() != 0)
9975       continue;
9976 
9977     // Abort if we can't understand the usage
9978     if (!I->isMachineOpcode() ||
9979         I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
9980       return Node;
9981 
9982     // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
9983     // Note that subregs are packed, i.e. Lane==0 is the first bit set
9984     // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
9985     // set, etc.
9986     Lane = SubIdx2Lane(I->getConstantOperandVal(1));
9987 
9988     // Check if the use is for the TFE/LWE generated result at VGPRn+1.
9989     if (UsesTFC && Lane == TFCLane) {
9990       Users[Lane] = *I;
9991     } else {
9992       // Set which texture component corresponds to the lane.
9993       unsigned Comp;
9994       for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) {
9995         Comp = countTrailingZeros(Dmask);
9996         Dmask &= ~(1 << Comp);
9997       }
9998 
9999       // Abort if we have more than one user per component.
10000       if (Users[Lane])
10001         return Node;
10002 
10003       Users[Lane] = *I;
10004       NewDmask |= 1 << Comp;
10005     }
10006   }
10007 
10008   // Don't allow 0 dmask, as hardware assumes one channel enabled.
10009   bool NoChannels = !NewDmask;
10010   if (NoChannels) {
10011     if (!UsesTFC) {
10012       // No uses of the result and not using TFC. Then do nothing.
10013       return Node;
10014     }
10015     // If the original dmask has one channel - then nothing to do
10016     if (OldBitsSet == 1)
10017       return Node;
10018     // Use an arbitrary dmask - required for the instruction to work
10019     NewDmask = 1;
10020   }
10021   // Abort if there's no change
10022   if (NewDmask == OldDmask)
10023     return Node;
10024 
10025   unsigned BitsSet = countPopulation(NewDmask);
10026 
10027   // Check for TFE or LWE - increase the number of channels by one to account
10028   // for the extra return value
10029   // This will need adjustment for D16 if this is also included in
10030   // adjustWriteMask (this function) but at present D16 are excluded.
10031   unsigned NewChannels = BitsSet + UsesTFC;
10032 
10033   int NewOpcode =
10034       AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels);
10035   assert(NewOpcode != -1 &&
10036          NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
10037          "failed to find equivalent MIMG op");
10038 
10039   // Adjust the writemask in the node
10040   SmallVector<SDValue, 12> Ops;
10041   Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
10042   Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
10043   Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
10044 
10045   MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
10046 
10047   MVT ResultVT = NewChannels == 1 ?
10048     SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 :
10049                            NewChannels == 5 ? 8 : NewChannels);
10050   SDVTList NewVTList = HasChain ?
10051     DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
10052 
10053 
10054   MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
10055                                               NewVTList, Ops);
10056 
10057   if (HasChain) {
10058     // Update chain.
10059     DAG.setNodeMemRefs(NewNode, Node->memoperands());
10060     DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
10061   }
10062 
10063   if (NewChannels == 1) {
10064     assert(Node->hasNUsesOfValue(1, 0));
10065     SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
10066                                       SDLoc(Node), Users[Lane]->getValueType(0),
10067                                       SDValue(NewNode, 0));
10068     DAG.ReplaceAllUsesWith(Users[Lane], Copy);
10069     return nullptr;
10070   }
10071 
10072   // Update the users of the node with the new indices
10073   for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) {
10074     SDNode *User = Users[i];
10075     if (!User) {
10076       // Handle the special case of NoChannels. We set NewDmask to 1 above, but
10077       // Users[0] is still nullptr because channel 0 doesn't really have a use.
10078       if (i || !NoChannels)
10079         continue;
10080     } else {
10081       SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
10082       DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
10083     }
10084 
10085     switch (Idx) {
10086     default: break;
10087     case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
10088     case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
10089     case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
10090     case AMDGPU::sub3: Idx = AMDGPU::sub4; break;
10091     }
10092   }
10093 
10094   DAG.RemoveDeadNode(Node);
10095   return nullptr;
10096 }
10097 
10098 static bool isFrameIndexOp(SDValue Op) {
10099   if (Op.getOpcode() == ISD::AssertZext)
10100     Op = Op.getOperand(0);
10101 
10102   return isa<FrameIndexSDNode>(Op);
10103 }
10104 
10105 /// Legalize target independent instructions (e.g. INSERT_SUBREG)
10106 /// with frame index operands.
10107 /// LLVM assumes that inputs are to these instructions are registers.
10108 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
10109                                                         SelectionDAG &DAG) const {
10110   if (Node->getOpcode() == ISD::CopyToReg) {
10111     RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
10112     SDValue SrcVal = Node->getOperand(2);
10113 
10114     // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
10115     // to try understanding copies to physical registers.
10116     if (SrcVal.getValueType() == MVT::i1 &&
10117         TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
10118       SDLoc SL(Node);
10119       MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
10120       SDValue VReg = DAG.getRegister(
10121         MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
10122 
10123       SDNode *Glued = Node->getGluedNode();
10124       SDValue ToVReg
10125         = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
10126                          SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
10127       SDValue ToResultReg
10128         = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
10129                            VReg, ToVReg.getValue(1));
10130       DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
10131       DAG.RemoveDeadNode(Node);
10132       return ToResultReg.getNode();
10133     }
10134   }
10135 
10136   SmallVector<SDValue, 8> Ops;
10137   for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
10138     if (!isFrameIndexOp(Node->getOperand(i))) {
10139       Ops.push_back(Node->getOperand(i));
10140       continue;
10141     }
10142 
10143     SDLoc DL(Node);
10144     Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
10145                                      Node->getOperand(i).getValueType(),
10146                                      Node->getOperand(i)), 0));
10147   }
10148 
10149   return DAG.UpdateNodeOperands(Node, Ops);
10150 }
10151 
10152 /// Fold the instructions after selecting them.
10153 /// Returns null if users were already updated.
10154 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
10155                                           SelectionDAG &DAG) const {
10156   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10157   unsigned Opcode = Node->getMachineOpcode();
10158 
10159   if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
10160       !TII->isGather4(Opcode)) {
10161     return adjustWritemask(Node, DAG);
10162   }
10163 
10164   if (Opcode == AMDGPU::INSERT_SUBREG ||
10165       Opcode == AMDGPU::REG_SEQUENCE) {
10166     legalizeTargetIndependentNode(Node, DAG);
10167     return Node;
10168   }
10169 
10170   switch (Opcode) {
10171   case AMDGPU::V_DIV_SCALE_F32:
10172   case AMDGPU::V_DIV_SCALE_F64: {
10173     // Satisfy the operand register constraint when one of the inputs is
10174     // undefined. Ordinarily each undef value will have its own implicit_def of
10175     // a vreg, so force these to use a single register.
10176     SDValue Src0 = Node->getOperand(0);
10177     SDValue Src1 = Node->getOperand(1);
10178     SDValue Src2 = Node->getOperand(2);
10179 
10180     if ((Src0.isMachineOpcode() &&
10181          Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
10182         (Src0 == Src1 || Src0 == Src2))
10183       break;
10184 
10185     MVT VT = Src0.getValueType().getSimpleVT();
10186     const TargetRegisterClass *RC =
10187         getRegClassFor(VT, Src0.getNode()->isDivergent());
10188 
10189     MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
10190     SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
10191 
10192     SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
10193                                       UndefReg, Src0, SDValue());
10194 
10195     // src0 must be the same register as src1 or src2, even if the value is
10196     // undefined, so make sure we don't violate this constraint.
10197     if (Src0.isMachineOpcode() &&
10198         Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
10199       if (Src1.isMachineOpcode() &&
10200           Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
10201         Src0 = Src1;
10202       else if (Src2.isMachineOpcode() &&
10203                Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
10204         Src0 = Src2;
10205       else {
10206         assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
10207         Src0 = UndefReg;
10208         Src1 = UndefReg;
10209       }
10210     } else
10211       break;
10212 
10213     SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
10214     for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
10215       Ops.push_back(Node->getOperand(I));
10216 
10217     Ops.push_back(ImpDef.getValue(1));
10218     return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
10219   }
10220   case AMDGPU::V_PERMLANE16_B32:
10221   case AMDGPU::V_PERMLANEX16_B32: {
10222     ConstantSDNode *FI = cast<ConstantSDNode>(Node->getOperand(0));
10223     ConstantSDNode *BC = cast<ConstantSDNode>(Node->getOperand(2));
10224     if (!FI->getZExtValue() && !BC->getZExtValue())
10225       break;
10226     SDValue VDstIn = Node->getOperand(6);
10227     if (VDstIn.isMachineOpcode()
10228         && VDstIn.getMachineOpcode() == AMDGPU::IMPLICIT_DEF)
10229       break;
10230     MachineSDNode *ImpDef = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
10231                                                SDLoc(Node), MVT::i32);
10232     SmallVector<SDValue, 8> Ops = { SDValue(FI, 0), Node->getOperand(1),
10233                                     SDValue(BC, 0), Node->getOperand(3),
10234                                     Node->getOperand(4), Node->getOperand(5),
10235                                     SDValue(ImpDef, 0), Node->getOperand(7) };
10236     return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
10237   }
10238   default:
10239     break;
10240   }
10241 
10242   return Node;
10243 }
10244 
10245 /// Assign the register class depending on the number of
10246 /// bits set in the writemask
10247 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
10248                                                      SDNode *Node) const {
10249   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10250 
10251   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
10252 
10253   if (TII->isVOP3(MI.getOpcode())) {
10254     // Make sure constant bus requirements are respected.
10255     TII->legalizeOperandsVOP3(MRI, MI);
10256 
10257     // Prefer VGPRs over AGPRs in mAI instructions where possible.
10258     // This saves a chain-copy of registers and better ballance register
10259     // use between vgpr and agpr as agpr tuples tend to be big.
10260     if (const MCOperandInfo *OpInfo = MI.getDesc().OpInfo) {
10261       unsigned Opc = MI.getOpcode();
10262       const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
10263       for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
10264                       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) {
10265         if (I == -1)
10266           break;
10267         MachineOperand &Op = MI.getOperand(I);
10268         if ((OpInfo[I].RegClass != llvm::AMDGPU::AV_64RegClassID &&
10269              OpInfo[I].RegClass != llvm::AMDGPU::AV_32RegClassID) ||
10270             !TargetRegisterInfo::isVirtualRegister(Op.getReg()) ||
10271             !TRI->isAGPR(MRI, Op.getReg()))
10272           continue;
10273         auto *Src = MRI.getUniqueVRegDef(Op.getReg());
10274         if (!Src || !Src->isCopy() ||
10275             !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg()))
10276           continue;
10277         auto *RC = TRI->getRegClassForReg(MRI, Op.getReg());
10278         auto *NewRC = TRI->getEquivalentVGPRClass(RC);
10279         // All uses of agpr64 and agpr32 can also accept vgpr except for
10280         // v_accvgpr_read, but we do not produce agpr reads during selection,
10281         // so no use checks are needed.
10282         MRI.setRegClass(Op.getReg(), NewRC);
10283       }
10284     }
10285 
10286     return;
10287   }
10288 
10289   // Replace unused atomics with the no return version.
10290   int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
10291   if (NoRetAtomicOp != -1) {
10292     if (!Node->hasAnyUseOfValue(0)) {
10293       MI.setDesc(TII->get(NoRetAtomicOp));
10294       MI.RemoveOperand(0);
10295       return;
10296     }
10297 
10298     // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
10299     // instruction, because the return type of these instructions is a vec2 of
10300     // the memory type, so it can be tied to the input operand.
10301     // This means these instructions always have a use, so we need to add a
10302     // special case to check if the atomic has only one extract_subreg use,
10303     // which itself has no uses.
10304     if ((Node->hasNUsesOfValue(1, 0) &&
10305          Node->use_begin()->isMachineOpcode() &&
10306          Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
10307          !Node->use_begin()->hasAnyUseOfValue(0))) {
10308       unsigned Def = MI.getOperand(0).getReg();
10309 
10310       // Change this into a noret atomic.
10311       MI.setDesc(TII->get(NoRetAtomicOp));
10312       MI.RemoveOperand(0);
10313 
10314       // If we only remove the def operand from the atomic instruction, the
10315       // extract_subreg will be left with a use of a vreg without a def.
10316       // So we need to insert an implicit_def to avoid machine verifier
10317       // errors.
10318       BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
10319               TII->get(AMDGPU::IMPLICIT_DEF), Def);
10320     }
10321     return;
10322   }
10323 }
10324 
10325 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
10326                               uint64_t Val) {
10327   SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
10328   return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
10329 }
10330 
10331 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
10332                                                 const SDLoc &DL,
10333                                                 SDValue Ptr) const {
10334   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10335 
10336   // Build the half of the subregister with the constants before building the
10337   // full 128-bit register. If we are building multiple resource descriptors,
10338   // this will allow CSEing of the 2-component register.
10339   const SDValue Ops0[] = {
10340     DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
10341     buildSMovImm32(DAG, DL, 0),
10342     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
10343     buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
10344     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
10345   };
10346 
10347   SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
10348                                                 MVT::v2i32, Ops0), 0);
10349 
10350   // Combine the constants and the pointer.
10351   const SDValue Ops1[] = {
10352     DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
10353     Ptr,
10354     DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
10355     SubRegHi,
10356     DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
10357   };
10358 
10359   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
10360 }
10361 
10362 /// Return a resource descriptor with the 'Add TID' bit enabled
10363 ///        The TID (Thread ID) is multiplied by the stride value (bits [61:48]
10364 ///        of the resource descriptor) to create an offset, which is added to
10365 ///        the resource pointer.
10366 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
10367                                            SDValue Ptr, uint32_t RsrcDword1,
10368                                            uint64_t RsrcDword2And3) const {
10369   SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
10370   SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
10371   if (RsrcDword1) {
10372     PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
10373                                      DAG.getConstant(RsrcDword1, DL, MVT::i32)),
10374                     0);
10375   }
10376 
10377   SDValue DataLo = buildSMovImm32(DAG, DL,
10378                                   RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
10379   SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
10380 
10381   const SDValue Ops[] = {
10382     DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
10383     PtrLo,
10384     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
10385     PtrHi,
10386     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
10387     DataLo,
10388     DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
10389     DataHi,
10390     DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
10391   };
10392 
10393   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
10394 }
10395 
10396 //===----------------------------------------------------------------------===//
10397 //                         SI Inline Assembly Support
10398 //===----------------------------------------------------------------------===//
10399 
10400 std::pair<unsigned, const TargetRegisterClass *>
10401 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
10402                                                StringRef Constraint,
10403                                                MVT VT) const {
10404   const TargetRegisterClass *RC = nullptr;
10405   if (Constraint.size() == 1) {
10406     switch (Constraint[0]) {
10407     default:
10408       return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10409     case 's':
10410     case 'r':
10411       switch (VT.getSizeInBits()) {
10412       default:
10413         return std::make_pair(0U, nullptr);
10414       case 32:
10415       case 16:
10416         RC = &AMDGPU::SReg_32_XM0RegClass;
10417         break;
10418       case 64:
10419         RC = &AMDGPU::SGPR_64RegClass;
10420         break;
10421       case 96:
10422         RC = &AMDGPU::SReg_96RegClass;
10423         break;
10424       case 128:
10425         RC = &AMDGPU::SReg_128RegClass;
10426         break;
10427       case 160:
10428         RC = &AMDGPU::SReg_160RegClass;
10429         break;
10430       case 256:
10431         RC = &AMDGPU::SReg_256RegClass;
10432         break;
10433       case 512:
10434         RC = &AMDGPU::SReg_512RegClass;
10435         break;
10436       }
10437       break;
10438     case 'v':
10439       switch (VT.getSizeInBits()) {
10440       default:
10441         return std::make_pair(0U, nullptr);
10442       case 32:
10443       case 16:
10444         RC = &AMDGPU::VGPR_32RegClass;
10445         break;
10446       case 64:
10447         RC = &AMDGPU::VReg_64RegClass;
10448         break;
10449       case 96:
10450         RC = &AMDGPU::VReg_96RegClass;
10451         break;
10452       case 128:
10453         RC = &AMDGPU::VReg_128RegClass;
10454         break;
10455       case 160:
10456         RC = &AMDGPU::VReg_160RegClass;
10457         break;
10458       case 256:
10459         RC = &AMDGPU::VReg_256RegClass;
10460         break;
10461       case 512:
10462         RC = &AMDGPU::VReg_512RegClass;
10463         break;
10464       }
10465       break;
10466     case 'a':
10467       switch (VT.getSizeInBits()) {
10468       default:
10469         return std::make_pair(0U, nullptr);
10470       case 32:
10471       case 16:
10472         RC = &AMDGPU::AGPR_32RegClass;
10473         break;
10474       case 64:
10475         RC = &AMDGPU::AReg_64RegClass;
10476         break;
10477       case 128:
10478         RC = &AMDGPU::AReg_128RegClass;
10479         break;
10480       case 512:
10481         RC = &AMDGPU::AReg_512RegClass;
10482         break;
10483       case 1024:
10484         RC = &AMDGPU::AReg_1024RegClass;
10485         // v32 types are not legal but we support them here.
10486         return std::make_pair(0U, RC);
10487       }
10488       break;
10489     }
10490     // We actually support i128, i16 and f16 as inline parameters
10491     // even if they are not reported as legal
10492     if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
10493                VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
10494       return std::make_pair(0U, RC);
10495   }
10496 
10497   if (Constraint.size() > 1) {
10498     if (Constraint[1] == 'v') {
10499       RC = &AMDGPU::VGPR_32RegClass;
10500     } else if (Constraint[1] == 's') {
10501       RC = &AMDGPU::SGPR_32RegClass;
10502     } else if (Constraint[1] == 'a') {
10503       RC = &AMDGPU::AGPR_32RegClass;
10504     }
10505 
10506     if (RC) {
10507       uint32_t Idx;
10508       bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
10509       if (!Failed && Idx < RC->getNumRegs())
10510         return std::make_pair(RC->getRegister(Idx), RC);
10511     }
10512   }
10513   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10514 }
10515 
10516 SITargetLowering::ConstraintType
10517 SITargetLowering::getConstraintType(StringRef Constraint) const {
10518   if (Constraint.size() == 1) {
10519     switch (Constraint[0]) {
10520     default: break;
10521     case 's':
10522     case 'v':
10523     case 'a':
10524       return C_RegisterClass;
10525     }
10526   }
10527   return TargetLowering::getConstraintType(Constraint);
10528 }
10529 
10530 // Figure out which registers should be reserved for stack access. Only after
10531 // the function is legalized do we know all of the non-spill stack objects or if
10532 // calls are present.
10533 void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
10534   MachineRegisterInfo &MRI = MF.getRegInfo();
10535   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10536   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
10537   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
10538 
10539   if (Info->isEntryFunction()) {
10540     // Callable functions have fixed registers used for stack access.
10541     reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
10542   }
10543 
10544   assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
10545                              Info->getStackPtrOffsetReg()));
10546   if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG)
10547     MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
10548 
10549   // We need to worry about replacing the default register with itself in case
10550   // of MIR testcases missing the MFI.
10551   if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG)
10552     MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
10553 
10554   if (Info->getFrameOffsetReg() != AMDGPU::FP_REG)
10555     MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
10556 
10557   if (Info->getScratchWaveOffsetReg() != AMDGPU::SCRATCH_WAVE_OFFSET_REG) {
10558     MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
10559                        Info->getScratchWaveOffsetReg());
10560   }
10561 
10562   Info->limitOccupancy(MF);
10563 
10564   if (ST.isWave32() && !MF.empty()) {
10565     // Add VCC_HI def because many instructions marked as imp-use VCC where
10566     // we may only define VCC_LO. If nothing defines VCC_HI we may end up
10567     // having a use of undef.
10568 
10569     const SIInstrInfo *TII = ST.getInstrInfo();
10570     DebugLoc DL;
10571 
10572     MachineBasicBlock &MBB = MF.front();
10573     MachineBasicBlock::iterator I = MBB.getFirstNonDebugInstr();
10574     BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), AMDGPU::VCC_HI);
10575 
10576     for (auto &MBB : MF) {
10577       for (auto &MI : MBB) {
10578         TII->fixImplicitOperands(MI);
10579       }
10580     }
10581   }
10582 
10583   TargetLoweringBase::finalizeLowering(MF);
10584 }
10585 
10586 void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
10587                                                      KnownBits &Known,
10588                                                      const APInt &DemandedElts,
10589                                                      const SelectionDAG &DAG,
10590                                                      unsigned Depth) const {
10591   TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
10592                                                 DAG, Depth);
10593 
10594   // Set the high bits to zero based on the maximum allowed scratch size per
10595   // wave. We can't use vaddr in MUBUF instructions if we don't know the address
10596   // calculation won't overflow, so assume the sign bit is never set.
10597   Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
10598 }
10599 
10600 unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
10601   const unsigned PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
10602   const unsigned CacheLineAlign = 6; // log2(64)
10603 
10604   // Pre-GFX10 target did not benefit from loop alignment
10605   if (!ML || DisableLoopAlignment ||
10606       (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
10607       getSubtarget()->hasInstFwdPrefetchBug())
10608     return PrefAlign;
10609 
10610   // On GFX10 I$ is 4 x 64 bytes cache lines.
10611   // By default prefetcher keeps one cache line behind and reads two ahead.
10612   // We can modify it with S_INST_PREFETCH for larger loops to have two lines
10613   // behind and one ahead.
10614   // Therefor we can benefit from aligning loop headers if loop fits 192 bytes.
10615   // If loop fits 64 bytes it always spans no more than two cache lines and
10616   // does not need an alignment.
10617   // Else if loop is less or equal 128 bytes we do not need to modify prefetch,
10618   // Else if loop is less or equal 192 bytes we need two lines behind.
10619 
10620   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10621   const MachineBasicBlock *Header = ML->getHeader();
10622   if (Header->getAlignment() != PrefAlign)
10623     return Header->getAlignment(); // Already processed.
10624 
10625   unsigned LoopSize = 0;
10626   for (const MachineBasicBlock *MBB : ML->blocks()) {
10627     // If inner loop block is aligned assume in average half of the alignment
10628     // size to be added as nops.
10629     if (MBB != Header)
10630       LoopSize += (1 << MBB->getAlignment()) / 2;
10631 
10632     for (const MachineInstr &MI : *MBB) {
10633       LoopSize += TII->getInstSizeInBytes(MI);
10634       if (LoopSize > 192)
10635         return PrefAlign;
10636     }
10637   }
10638 
10639   if (LoopSize <= 64)
10640     return PrefAlign;
10641 
10642   if (LoopSize <= 128)
10643     return CacheLineAlign;
10644 
10645   // If any of parent loops is surrounded by prefetch instructions do not
10646   // insert new for inner loop, which would reset parent's settings.
10647   for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) {
10648     if (MachineBasicBlock *Exit = P->getExitBlock()) {
10649       auto I = Exit->getFirstNonDebugInstr();
10650       if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
10651         return CacheLineAlign;
10652     }
10653   }
10654 
10655   MachineBasicBlock *Pre = ML->getLoopPreheader();
10656   MachineBasicBlock *Exit = ML->getExitBlock();
10657 
10658   if (Pre && Exit) {
10659     BuildMI(*Pre, Pre->getFirstTerminator(), DebugLoc(),
10660             TII->get(AMDGPU::S_INST_PREFETCH))
10661       .addImm(1); // prefetch 2 lines behind PC
10662 
10663     BuildMI(*Exit, Exit->getFirstNonDebugInstr(), DebugLoc(),
10664             TII->get(AMDGPU::S_INST_PREFETCH))
10665       .addImm(2); // prefetch 1 line behind PC
10666   }
10667 
10668   return CacheLineAlign;
10669 }
10670 
10671 LLVM_ATTRIBUTE_UNUSED
10672 static bool isCopyFromRegOfInlineAsm(const SDNode *N) {
10673   assert(N->getOpcode() == ISD::CopyFromReg);
10674   do {
10675     // Follow the chain until we find an INLINEASM node.
10676     N = N->getOperand(0).getNode();
10677     if (N->getOpcode() == ISD::INLINEASM ||
10678         N->getOpcode() == ISD::INLINEASM_BR)
10679       return true;
10680   } while (N->getOpcode() == ISD::CopyFromReg);
10681   return false;
10682 }
10683 
10684 bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
10685   FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const
10686 {
10687   switch (N->getOpcode()) {
10688     case ISD::CopyFromReg:
10689     {
10690       const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1));
10691       const MachineFunction * MF = FLI->MF;
10692       const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
10693       const MachineRegisterInfo &MRI = MF->getRegInfo();
10694       const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
10695       unsigned Reg = R->getReg();
10696       if (TRI.isPhysicalRegister(Reg))
10697         return !TRI.isSGPRReg(MRI, Reg);
10698 
10699       if (MRI.isLiveIn(Reg)) {
10700         // workitem.id.x workitem.id.y workitem.id.z
10701         // Any VGPR formal argument is also considered divergent
10702         if (!TRI.isSGPRReg(MRI, Reg))
10703           return true;
10704         // Formal arguments of non-entry functions
10705         // are conservatively considered divergent
10706         else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv()))
10707           return true;
10708         return false;
10709       }
10710       const Value *V = FLI->getValueFromVirtualReg(Reg);
10711       if (V)
10712         return KDA->isDivergent(V);
10713       assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N));
10714       return !TRI.isSGPRReg(MRI, Reg);
10715     }
10716     break;
10717     case ISD::LOAD: {
10718       const LoadSDNode *L = cast<LoadSDNode>(N);
10719       unsigned AS = L->getAddressSpace();
10720       // A flat load may access private memory.
10721       return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
10722     } break;
10723     case ISD::CALLSEQ_END:
10724     return true;
10725     break;
10726     case ISD::INTRINSIC_WO_CHAIN:
10727     {
10728 
10729     }
10730       return AMDGPU::isIntrinsicSourceOfDivergence(
10731       cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
10732     case ISD::INTRINSIC_W_CHAIN:
10733       return AMDGPU::isIntrinsicSourceOfDivergence(
10734       cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
10735     // In some cases intrinsics that are a source of divergence have been
10736     // lowered to AMDGPUISD so we also need to check those too.
10737     case AMDGPUISD::INTERP_MOV:
10738     case AMDGPUISD::INTERP_P1:
10739     case AMDGPUISD::INTERP_P2:
10740       return true;
10741   }
10742   return false;
10743 }
10744 
10745 bool SITargetLowering::denormalsEnabledForType(EVT VT) const {
10746   switch (VT.getScalarType().getSimpleVT().SimpleTy) {
10747   case MVT::f32:
10748     return Subtarget->hasFP32Denormals();
10749   case MVT::f64:
10750     return Subtarget->hasFP64Denormals();
10751   case MVT::f16:
10752     return Subtarget->hasFP16Denormals();
10753   default:
10754     return false;
10755   }
10756 }
10757 
10758 bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
10759                                                     const SelectionDAG &DAG,
10760                                                     bool SNaN,
10761                                                     unsigned Depth) const {
10762   if (Op.getOpcode() == AMDGPUISD::CLAMP) {
10763     const MachineFunction &MF = DAG.getMachineFunction();
10764     const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10765 
10766     if (Info->getMode().DX10Clamp)
10767       return true; // Clamped to 0.
10768     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
10769   }
10770 
10771   return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG,
10772                                                             SNaN, Depth);
10773 }
10774 
10775 TargetLowering::AtomicExpansionKind
10776 SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
10777   switch (RMW->getOperation()) {
10778   case AtomicRMWInst::FAdd: {
10779     Type *Ty = RMW->getType();
10780 
10781     // We don't have a way to support 16-bit atomics now, so just leave them
10782     // as-is.
10783     if (Ty->isHalfTy())
10784       return AtomicExpansionKind::None;
10785 
10786     if (!Ty->isFloatTy())
10787       return AtomicExpansionKind::CmpXChg;
10788 
10789     // TODO: Do have these for flat. Older targets also had them for buffers.
10790     unsigned AS = RMW->getPointerAddressSpace();
10791     return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ?
10792       AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg;
10793   }
10794   default:
10795     break;
10796   }
10797 
10798   return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
10799 }
10800