1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Custom DAG lowering for SI
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #if defined(_MSC_VER) || defined(__MINGW32__)
15 // Provide M_PI.
16 #define _USE_MATH_DEFINES
17 #endif
18 
19 #include "SIISelLowering.h"
20 #include "AMDGPU.h"
21 #include "AMDGPUSubtarget.h"
22 #include "AMDGPUTargetMachine.h"
23 #include "SIDefines.h"
24 #include "SIInstrInfo.h"
25 #include "SIMachineFunctionInfo.h"
26 #include "SIRegisterInfo.h"
27 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
28 #include "Utils/AMDGPUBaseInfo.h"
29 #include "llvm/ADT/APFloat.h"
30 #include "llvm/ADT/APInt.h"
31 #include "llvm/ADT/ArrayRef.h"
32 #include "llvm/ADT/BitVector.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/CodeGen/Analysis.h"
39 #include "llvm/CodeGen/CallingConvLower.h"
40 #include "llvm/CodeGen/DAGCombine.h"
41 #include "llvm/CodeGen/ISDOpcodes.h"
42 #include "llvm/CodeGen/MachineBasicBlock.h"
43 #include "llvm/CodeGen/MachineFrameInfo.h"
44 #include "llvm/CodeGen/MachineFunction.h"
45 #include "llvm/CodeGen/MachineInstr.h"
46 #include "llvm/CodeGen/MachineInstrBuilder.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineModuleInfo.h"
49 #include "llvm/CodeGen/MachineOperand.h"
50 #include "llvm/CodeGen/MachineRegisterInfo.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetCallingConv.h"
54 #include "llvm/CodeGen/TargetRegisterInfo.h"
55 #include "llvm/CodeGen/ValueTypes.h"
56 #include "llvm/IR/Constants.h"
57 #include "llvm/IR/DataLayout.h"
58 #include "llvm/IR/DebugLoc.h"
59 #include "llvm/IR/DerivedTypes.h"
60 #include "llvm/IR/DiagnosticInfo.h"
61 #include "llvm/IR/Function.h"
62 #include "llvm/IR/GlobalValue.h"
63 #include "llvm/IR/InstrTypes.h"
64 #include "llvm/IR/Instruction.h"
65 #include "llvm/IR/Instructions.h"
66 #include "llvm/IR/IntrinsicInst.h"
67 #include "llvm/IR/Type.h"
68 #include "llvm/Support/Casting.h"
69 #include "llvm/Support/CodeGen.h"
70 #include "llvm/Support/CommandLine.h"
71 #include "llvm/Support/Compiler.h"
72 #include "llvm/Support/ErrorHandling.h"
73 #include "llvm/Support/KnownBits.h"
74 #include "llvm/Support/MachineValueType.h"
75 #include "llvm/Support/MathExtras.h"
76 #include "llvm/Target/TargetOptions.h"
77 #include <cassert>
78 #include <cmath>
79 #include <cstdint>
80 #include <iterator>
81 #include <tuple>
82 #include <utility>
83 #include <vector>
84 
85 using namespace llvm;
86 
87 #define DEBUG_TYPE "si-lower"
88 
89 STATISTIC(NumTailCalls, "Number of tail calls");
90 
91 static cl::opt<bool> EnableVGPRIndexMode(
92   "amdgpu-vgpr-index-mode",
93   cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
94   cl::init(false));
95 
96 static cl::opt<bool> DisableLoopAlignment(
97   "amdgpu-disable-loop-alignment",
98   cl::desc("Do not align and prefetch loops"),
99   cl::init(false));
100 
101 static unsigned findFirstFreeSGPR(CCState &CCInfo) {
102   unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
103   for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
104     if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
105       return AMDGPU::SGPR0 + Reg;
106     }
107   }
108   llvm_unreachable("Cannot allocate sgpr");
109 }
110 
111 SITargetLowering::SITargetLowering(const TargetMachine &TM,
112                                    const GCNSubtarget &STI)
113     : AMDGPUTargetLowering(TM, STI),
114       Subtarget(&STI) {
115   addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
116   addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
117 
118   addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
119   addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
120 
121   addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
122   addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
123   addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
124 
125   addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
126   addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass);
127 
128   addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
129   addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
130 
131   addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
132   addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
133 
134   addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
135   addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass);
136 
137   addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
138   addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
139 
140   addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
141   addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
142 
143   if (Subtarget->has16BitInsts()) {
144     addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
145     addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
146 
147     // Unless there are also VOP3P operations, not operations are really legal.
148     addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
149     addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
150     addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
151     addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
152   }
153 
154   if (Subtarget->hasMAIInsts()) {
155     addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass);
156     addRegisterClass(MVT::v32f32, &AMDGPU::VReg_1024RegClass);
157   }
158 
159   computeRegisterProperties(Subtarget->getRegisterInfo());
160 
161   // We need to custom lower vector stores from local memory
162   setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
163   setOperationAction(ISD::LOAD, MVT::v3i32, Custom);
164   setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
165   setOperationAction(ISD::LOAD, MVT::v5i32, Custom);
166   setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
167   setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
168   setOperationAction(ISD::LOAD, MVT::i1, Custom);
169   setOperationAction(ISD::LOAD, MVT::v32i32, Custom);
170 
171   setOperationAction(ISD::STORE, MVT::v2i32, Custom);
172   setOperationAction(ISD::STORE, MVT::v3i32, Custom);
173   setOperationAction(ISD::STORE, MVT::v4i32, Custom);
174   setOperationAction(ISD::STORE, MVT::v5i32, Custom);
175   setOperationAction(ISD::STORE, MVT::v8i32, Custom);
176   setOperationAction(ISD::STORE, MVT::v16i32, Custom);
177   setOperationAction(ISD::STORE, MVT::i1, Custom);
178   setOperationAction(ISD::STORE, MVT::v32i32, Custom);
179 
180   setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
181   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
182   setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
183   setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
184   setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
185   setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
186   setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
187   setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
188   setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
189   setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
190 
191   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
192   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
193 
194   setOperationAction(ISD::SELECT, MVT::i1, Promote);
195   setOperationAction(ISD::SELECT, MVT::i64, Custom);
196   setOperationAction(ISD::SELECT, MVT::f64, Promote);
197   AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
198 
199   setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
200   setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
201   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
202   setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
203   setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
204 
205   setOperationAction(ISD::SETCC, MVT::i1, Promote);
206   setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
207   setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
208   AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
209 
210   setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
211   setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
212 
213   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
214   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
215   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
216   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
217   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
218   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
219   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
220 
221   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
222   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
223   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
224   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
225   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom);
226   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
227   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
228 
229   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
230   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
231   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom);
232   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
233   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
234   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
235 
236   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
237   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
238   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
239   setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
240   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
241   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
242 
243   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
244   setOperationAction(ISD::BR_CC, MVT::i1, Expand);
245   setOperationAction(ISD::BR_CC, MVT::i32, Expand);
246   setOperationAction(ISD::BR_CC, MVT::i64, Expand);
247   setOperationAction(ISD::BR_CC, MVT::f32, Expand);
248   setOperationAction(ISD::BR_CC, MVT::f64, Expand);
249 
250   setOperationAction(ISD::UADDO, MVT::i32, Legal);
251   setOperationAction(ISD::USUBO, MVT::i32, Legal);
252 
253   setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
254   setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
255 
256   setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
257   setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
258   setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
259 
260 #if 0
261   setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
262   setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
263 #endif
264 
265   // We only support LOAD/STORE and vector manipulation ops for vectors
266   // with > 4 elements.
267   for (MVT VT : { MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
268                   MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16,
269                   MVT::v32i32, MVT::v32f32 }) {
270     for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
271       switch (Op) {
272       case ISD::LOAD:
273       case ISD::STORE:
274       case ISD::BUILD_VECTOR:
275       case ISD::BITCAST:
276       case ISD::EXTRACT_VECTOR_ELT:
277       case ISD::INSERT_VECTOR_ELT:
278       case ISD::INSERT_SUBVECTOR:
279       case ISD::EXTRACT_SUBVECTOR:
280       case ISD::SCALAR_TO_VECTOR:
281         break;
282       case ISD::CONCAT_VECTORS:
283         setOperationAction(Op, VT, Custom);
284         break;
285       default:
286         setOperationAction(Op, VT, Expand);
287         break;
288       }
289     }
290   }
291 
292   setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
293 
294   // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
295   // is expanded to avoid having two separate loops in case the index is a VGPR.
296 
297   // Most operations are naturally 32-bit vector operations. We only support
298   // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
299   for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
300     setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
301     AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
302 
303     setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
304     AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
305 
306     setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
307     AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
308 
309     setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
310     AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
311   }
312 
313   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
314   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
315   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
316   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
317 
318   setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom);
319   setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
320 
321   // Avoid stack access for these.
322   // TODO: Generalize to more vector types.
323   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
324   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
325   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
326   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
327 
328   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
329   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
330   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom);
331   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom);
332   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom);
333 
334   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom);
335   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
336   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom);
337 
338   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom);
339   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom);
340   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
341   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
342 
343   // Deal with vec3 vector operations when widened to vec4.
344   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom);
345   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom);
346   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom);
347   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom);
348 
349   // Deal with vec5 vector operations when widened to vec8.
350   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom);
351   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom);
352   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom);
353   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom);
354 
355   // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
356   // and output demarshalling
357   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
358   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
359 
360   // We can't return success/failure, only the old value,
361   // let LLVM add the comparison
362   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
363   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
364 
365   if (Subtarget->hasFlatAddressSpace()) {
366     setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
367     setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
368   }
369 
370   setOperationAction(ISD::BSWAP, MVT::i32, Legal);
371   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
372 
373   // On SI this is s_memtime and s_memrealtime on VI.
374   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
375   setOperationAction(ISD::TRAP, MVT::Other, Custom);
376   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
377 
378   if (Subtarget->has16BitInsts()) {
379     setOperationAction(ISD::FLOG, MVT::f16, Custom);
380     setOperationAction(ISD::FEXP, MVT::f16, Custom);
381     setOperationAction(ISD::FLOG10, MVT::f16, Custom);
382   }
383 
384   // v_mad_f32 does not support denormals according to some sources.
385   if (!Subtarget->hasFP32Denormals())
386     setOperationAction(ISD::FMAD, MVT::f32, Legal);
387 
388   if (!Subtarget->hasBFI()) {
389     // fcopysign can be done in a single instruction with BFI.
390     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
391     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
392   }
393 
394   if (!Subtarget->hasBCNT(32))
395     setOperationAction(ISD::CTPOP, MVT::i32, Expand);
396 
397   if (!Subtarget->hasBCNT(64))
398     setOperationAction(ISD::CTPOP, MVT::i64, Expand);
399 
400   if (Subtarget->hasFFBH())
401     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
402 
403   if (Subtarget->hasFFBL())
404     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
405 
406   // We only really have 32-bit BFE instructions (and 16-bit on VI).
407   //
408   // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
409   // effort to match them now. We want this to be false for i64 cases when the
410   // extraction isn't restricted to the upper or lower half. Ideally we would
411   // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
412   // span the midpoint are probably relatively rare, so don't worry about them
413   // for now.
414   if (Subtarget->hasBFE())
415     setHasExtractBitsInsn(true);
416 
417   setOperationAction(ISD::FMINNUM, MVT::f32, Custom);
418   setOperationAction(ISD::FMAXNUM, MVT::f32, Custom);
419   setOperationAction(ISD::FMINNUM, MVT::f64, Custom);
420   setOperationAction(ISD::FMAXNUM, MVT::f64, Custom);
421 
422 
423   // These are really only legal for ieee_mode functions. We should be avoiding
424   // them for functions that don't have ieee_mode enabled, so just say they are
425   // legal.
426   setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
427   setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
428   setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
429   setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
430 
431 
432   if (Subtarget->haveRoundOpsF64()) {
433     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
434     setOperationAction(ISD::FCEIL, MVT::f64, Legal);
435     setOperationAction(ISD::FRINT, MVT::f64, Legal);
436   } else {
437     setOperationAction(ISD::FCEIL, MVT::f64, Custom);
438     setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
439     setOperationAction(ISD::FRINT, MVT::f64, Custom);
440     setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
441   }
442 
443   setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
444 
445   setOperationAction(ISD::FSIN, MVT::f32, Custom);
446   setOperationAction(ISD::FCOS, MVT::f32, Custom);
447   setOperationAction(ISD::FDIV, MVT::f32, Custom);
448   setOperationAction(ISD::FDIV, MVT::f64, Custom);
449 
450   if (Subtarget->has16BitInsts()) {
451     setOperationAction(ISD::Constant, MVT::i16, Legal);
452 
453     setOperationAction(ISD::SMIN, MVT::i16, Legal);
454     setOperationAction(ISD::SMAX, MVT::i16, Legal);
455 
456     setOperationAction(ISD::UMIN, MVT::i16, Legal);
457     setOperationAction(ISD::UMAX, MVT::i16, Legal);
458 
459     setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
460     AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
461 
462     setOperationAction(ISD::ROTR, MVT::i16, Promote);
463     setOperationAction(ISD::ROTL, MVT::i16, Promote);
464 
465     setOperationAction(ISD::SDIV, MVT::i16, Promote);
466     setOperationAction(ISD::UDIV, MVT::i16, Promote);
467     setOperationAction(ISD::SREM, MVT::i16, Promote);
468     setOperationAction(ISD::UREM, MVT::i16, Promote);
469 
470     setOperationAction(ISD::BSWAP, MVT::i16, Promote);
471     setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
472 
473     setOperationAction(ISD::CTTZ, MVT::i16, Promote);
474     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
475     setOperationAction(ISD::CTLZ, MVT::i16, Promote);
476     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
477     setOperationAction(ISD::CTPOP, MVT::i16, Promote);
478 
479     setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
480 
481     setOperationAction(ISD::BR_CC, MVT::i16, Expand);
482 
483     setOperationAction(ISD::LOAD, MVT::i16, Custom);
484 
485     setTruncStoreAction(MVT::i64, MVT::i16, Expand);
486 
487     setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
488     AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
489     setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
490     AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
491 
492     setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
493     setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
494     setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
495     setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
496 
497     // F16 - Constant Actions.
498     setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
499 
500     // F16 - Load/Store Actions.
501     setOperationAction(ISD::LOAD, MVT::f16, Promote);
502     AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
503     setOperationAction(ISD::STORE, MVT::f16, Promote);
504     AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
505 
506     // F16 - VOP1 Actions.
507     setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
508     setOperationAction(ISD::FCOS, MVT::f16, Promote);
509     setOperationAction(ISD::FSIN, MVT::f16, Promote);
510     setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
511     setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
512     setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
513     setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
514     setOperationAction(ISD::FROUND, MVT::f16, Custom);
515 
516     // F16 - VOP2 Actions.
517     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
518     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
519 
520     setOperationAction(ISD::FDIV, MVT::f16, Custom);
521 
522     // F16 - VOP3 Actions.
523     setOperationAction(ISD::FMA, MVT::f16, Legal);
524     if (!Subtarget->hasFP16Denormals() && STI.hasMadF16())
525       setOperationAction(ISD::FMAD, MVT::f16, Legal);
526 
527     for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
528       for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
529         switch (Op) {
530         case ISD::LOAD:
531         case ISD::STORE:
532         case ISD::BUILD_VECTOR:
533         case ISD::BITCAST:
534         case ISD::EXTRACT_VECTOR_ELT:
535         case ISD::INSERT_VECTOR_ELT:
536         case ISD::INSERT_SUBVECTOR:
537         case ISD::EXTRACT_SUBVECTOR:
538         case ISD::SCALAR_TO_VECTOR:
539           break;
540         case ISD::CONCAT_VECTORS:
541           setOperationAction(Op, VT, Custom);
542           break;
543         default:
544           setOperationAction(Op, VT, Expand);
545           break;
546         }
547       }
548     }
549 
550     // XXX - Do these do anything? Vector constants turn into build_vector.
551     setOperationAction(ISD::Constant, MVT::v2i16, Legal);
552     setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
553 
554     setOperationAction(ISD::UNDEF, MVT::v2i16, Legal);
555     setOperationAction(ISD::UNDEF, MVT::v2f16, Legal);
556 
557     setOperationAction(ISD::STORE, MVT::v2i16, Promote);
558     AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
559     setOperationAction(ISD::STORE, MVT::v2f16, Promote);
560     AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
561 
562     setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
563     AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
564     setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
565     AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
566 
567     setOperationAction(ISD::AND, MVT::v2i16, Promote);
568     AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
569     setOperationAction(ISD::OR, MVT::v2i16, Promote);
570     AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
571     setOperationAction(ISD::XOR, MVT::v2i16, Promote);
572     AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
573 
574     setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
575     AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
576     setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
577     AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
578 
579     setOperationAction(ISD::STORE, MVT::v4i16, Promote);
580     AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
581     setOperationAction(ISD::STORE, MVT::v4f16, Promote);
582     AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
583 
584     setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
585     setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
586     setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
587     setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
588 
589     setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand);
590     setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand);
591     setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand);
592 
593     if (!Subtarget->hasVOP3PInsts()) {
594       setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
595       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
596     }
597 
598     setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
599     // This isn't really legal, but this avoids the legalizer unrolling it (and
600     // allows matching fneg (fabs x) patterns)
601     setOperationAction(ISD::FABS, MVT::v2f16, Legal);
602 
603     setOperationAction(ISD::FMAXNUM, MVT::f16, Custom);
604     setOperationAction(ISD::FMINNUM, MVT::f16, Custom);
605     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal);
606     setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal);
607 
608     setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom);
609     setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom);
610 
611     setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand);
612     setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand);
613   }
614 
615   if (Subtarget->hasVOP3PInsts()) {
616     setOperationAction(ISD::ADD, MVT::v2i16, Legal);
617     setOperationAction(ISD::SUB, MVT::v2i16, Legal);
618     setOperationAction(ISD::MUL, MVT::v2i16, Legal);
619     setOperationAction(ISD::SHL, MVT::v2i16, Legal);
620     setOperationAction(ISD::SRL, MVT::v2i16, Legal);
621     setOperationAction(ISD::SRA, MVT::v2i16, Legal);
622     setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
623     setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
624     setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
625     setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
626 
627     setOperationAction(ISD::FADD, MVT::v2f16, Legal);
628     setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
629     setOperationAction(ISD::FMA, MVT::v2f16, Legal);
630 
631     setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal);
632     setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal);
633 
634     setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal);
635 
636     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
637     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
638 
639     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f16, Custom);
640     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
641 
642     setOperationAction(ISD::SHL, MVT::v4i16, Custom);
643     setOperationAction(ISD::SRA, MVT::v4i16, Custom);
644     setOperationAction(ISD::SRL, MVT::v4i16, Custom);
645     setOperationAction(ISD::ADD, MVT::v4i16, Custom);
646     setOperationAction(ISD::SUB, MVT::v4i16, Custom);
647     setOperationAction(ISD::MUL, MVT::v4i16, Custom);
648 
649     setOperationAction(ISD::SMIN, MVT::v4i16, Custom);
650     setOperationAction(ISD::SMAX, MVT::v4i16, Custom);
651     setOperationAction(ISD::UMIN, MVT::v4i16, Custom);
652     setOperationAction(ISD::UMAX, MVT::v4i16, Custom);
653 
654     setOperationAction(ISD::FADD, MVT::v4f16, Custom);
655     setOperationAction(ISD::FMUL, MVT::v4f16, Custom);
656     setOperationAction(ISD::FMA, MVT::v4f16, Custom);
657 
658     setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom);
659     setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom);
660 
661     setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom);
662     setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom);
663     setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom);
664 
665     setOperationAction(ISD::FEXP, MVT::v2f16, Custom);
666     setOperationAction(ISD::SELECT, MVT::v4i16, Custom);
667     setOperationAction(ISD::SELECT, MVT::v4f16, Custom);
668   }
669 
670   setOperationAction(ISD::FNEG, MVT::v4f16, Custom);
671   setOperationAction(ISD::FABS, MVT::v4f16, Custom);
672 
673   if (Subtarget->has16BitInsts()) {
674     setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
675     AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
676     setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
677     AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
678   } else {
679     // Legalization hack.
680     setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
681     setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
682 
683     setOperationAction(ISD::FNEG, MVT::v2f16, Custom);
684     setOperationAction(ISD::FABS, MVT::v2f16, Custom);
685   }
686 
687   for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
688     setOperationAction(ISD::SELECT, VT, Custom);
689   }
690 
691   setTargetDAGCombine(ISD::ADD);
692   setTargetDAGCombine(ISD::ADDCARRY);
693   setTargetDAGCombine(ISD::SUB);
694   setTargetDAGCombine(ISD::SUBCARRY);
695   setTargetDAGCombine(ISD::FADD);
696   setTargetDAGCombine(ISD::FSUB);
697   setTargetDAGCombine(ISD::FMINNUM);
698   setTargetDAGCombine(ISD::FMAXNUM);
699   setTargetDAGCombine(ISD::FMINNUM_IEEE);
700   setTargetDAGCombine(ISD::FMAXNUM_IEEE);
701   setTargetDAGCombine(ISD::FMA);
702   setTargetDAGCombine(ISD::SMIN);
703   setTargetDAGCombine(ISD::SMAX);
704   setTargetDAGCombine(ISD::UMIN);
705   setTargetDAGCombine(ISD::UMAX);
706   setTargetDAGCombine(ISD::SETCC);
707   setTargetDAGCombine(ISD::AND);
708   setTargetDAGCombine(ISD::OR);
709   setTargetDAGCombine(ISD::XOR);
710   setTargetDAGCombine(ISD::SINT_TO_FP);
711   setTargetDAGCombine(ISD::UINT_TO_FP);
712   setTargetDAGCombine(ISD::FCANONICALIZE);
713   setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
714   setTargetDAGCombine(ISD::ZERO_EXTEND);
715   setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
716   setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
717   setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
718 
719   // All memory operations. Some folding on the pointer operand is done to help
720   // matching the constant offsets in the addressing modes.
721   setTargetDAGCombine(ISD::LOAD);
722   setTargetDAGCombine(ISD::STORE);
723   setTargetDAGCombine(ISD::ATOMIC_LOAD);
724   setTargetDAGCombine(ISD::ATOMIC_STORE);
725   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
726   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
727   setTargetDAGCombine(ISD::ATOMIC_SWAP);
728   setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
729   setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
730   setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
731   setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
732   setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
733   setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
734   setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
735   setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
736   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
737   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
738   setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD);
739 
740   setSchedulingPreference(Sched::RegPressure);
741 }
742 
743 const GCNSubtarget *SITargetLowering::getSubtarget() const {
744   return Subtarget;
745 }
746 
747 //===----------------------------------------------------------------------===//
748 // TargetLowering queries
749 //===----------------------------------------------------------------------===//
750 
751 // v_mad_mix* support a conversion from f16 to f32.
752 //
753 // There is only one special case when denormals are enabled we don't currently,
754 // where this is OK to use.
755 bool SITargetLowering::isFPExtFoldable(unsigned Opcode,
756                                            EVT DestVT, EVT SrcVT) const {
757   return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
758           (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
759          DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() &&
760          SrcVT.getScalarType() == MVT::f16;
761 }
762 
763 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
764   // SI has some legal vector types, but no legal vector operations. Say no
765   // shuffles are legal in order to prefer scalarizing some vector operations.
766   return false;
767 }
768 
769 MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
770                                                     CallingConv::ID CC,
771                                                     EVT VT) const {
772   if (CC == CallingConv::AMDGPU_KERNEL)
773     return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
774 
775   if (VT.isVector()) {
776     EVT ScalarVT = VT.getScalarType();
777     unsigned Size = ScalarVT.getSizeInBits();
778     if (Size == 32)
779       return ScalarVT.getSimpleVT();
780 
781     if (Size > 32)
782       return MVT::i32;
783 
784     if (Size == 16 && Subtarget->has16BitInsts())
785       return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
786   } else if (VT.getSizeInBits() > 32)
787     return MVT::i32;
788 
789   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
790 }
791 
792 unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
793                                                          CallingConv::ID CC,
794                                                          EVT VT) const {
795   if (CC == CallingConv::AMDGPU_KERNEL)
796     return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
797 
798   if (VT.isVector()) {
799     unsigned NumElts = VT.getVectorNumElements();
800     EVT ScalarVT = VT.getScalarType();
801     unsigned Size = ScalarVT.getSizeInBits();
802 
803     if (Size == 32)
804       return NumElts;
805 
806     if (Size > 32)
807       return NumElts * ((Size + 31) / 32);
808 
809     if (Size == 16 && Subtarget->has16BitInsts())
810       return (NumElts + 1) / 2;
811   } else if (VT.getSizeInBits() > 32)
812     return (VT.getSizeInBits() + 31) / 32;
813 
814   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
815 }
816 
817 unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
818   LLVMContext &Context, CallingConv::ID CC,
819   EVT VT, EVT &IntermediateVT,
820   unsigned &NumIntermediates, MVT &RegisterVT) const {
821   if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
822     unsigned NumElts = VT.getVectorNumElements();
823     EVT ScalarVT = VT.getScalarType();
824     unsigned Size = ScalarVT.getSizeInBits();
825     if (Size == 32) {
826       RegisterVT = ScalarVT.getSimpleVT();
827       IntermediateVT = RegisterVT;
828       NumIntermediates = NumElts;
829       return NumIntermediates;
830     }
831 
832     if (Size > 32) {
833       RegisterVT = MVT::i32;
834       IntermediateVT = RegisterVT;
835       NumIntermediates = NumElts * ((Size + 31) / 32);
836       return NumIntermediates;
837     }
838 
839     // FIXME: We should fix the ABI to be the same on targets without 16-bit
840     // support, but unless we can properly handle 3-vectors, it will be still be
841     // inconsistent.
842     if (Size == 16 && Subtarget->has16BitInsts()) {
843       RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
844       IntermediateVT = RegisterVT;
845       NumIntermediates = (NumElts + 1) / 2;
846       return NumIntermediates;
847     }
848   }
849 
850   return TargetLowering::getVectorTypeBreakdownForCallingConv(
851     Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
852 }
853 
854 static MVT memVTFromAggregate(Type *Ty) {
855   // Only limited forms of aggregate type currently expected.
856   assert(Ty->isStructTy() && "Expected struct type");
857 
858 
859   Type *ElementType = nullptr;
860   unsigned NumElts;
861   if (Ty->getContainedType(0)->isVectorTy()) {
862     VectorType *VecComponent = cast<VectorType>(Ty->getContainedType(0));
863     ElementType = VecComponent->getElementType();
864     NumElts = VecComponent->getNumElements();
865   } else {
866     ElementType = Ty->getContainedType(0);
867     NumElts = 1;
868   }
869 
870   assert((Ty->getContainedType(1) && Ty->getContainedType(1)->isIntegerTy(32)) && "Expected int32 type");
871 
872   // Calculate the size of the memVT type from the aggregate
873   unsigned Pow2Elts = 0;
874   unsigned ElementSize;
875   switch (ElementType->getTypeID()) {
876     default:
877       llvm_unreachable("Unknown type!");
878     case Type::IntegerTyID:
879       ElementSize = cast<IntegerType>(ElementType)->getBitWidth();
880       break;
881     case Type::HalfTyID:
882       ElementSize = 16;
883       break;
884     case Type::FloatTyID:
885       ElementSize = 32;
886       break;
887   }
888   unsigned AdditionalElts = ElementSize == 16 ? 2 : 1;
889   Pow2Elts = 1 << Log2_32_Ceil(NumElts + AdditionalElts);
890 
891   return MVT::getVectorVT(MVT::getVT(ElementType, false),
892                           Pow2Elts);
893 }
894 
895 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
896                                           const CallInst &CI,
897                                           MachineFunction &MF,
898                                           unsigned IntrID) const {
899   if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
900           AMDGPU::lookupRsrcIntrinsic(IntrID)) {
901     AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
902                                                   (Intrinsic::ID)IntrID);
903     if (Attr.hasFnAttribute(Attribute::ReadNone))
904       return false;
905 
906     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
907 
908     if (RsrcIntr->IsImage) {
909       Info.ptrVal = MFI->getImagePSV(
910         *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
911         CI.getArgOperand(RsrcIntr->RsrcArg));
912       Info.align = 0;
913     } else {
914       Info.ptrVal = MFI->getBufferPSV(
915         *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
916         CI.getArgOperand(RsrcIntr->RsrcArg));
917     }
918 
919     Info.flags = MachineMemOperand::MODereferenceable;
920     if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
921       Info.opc = ISD::INTRINSIC_W_CHAIN;
922       Info.memVT = MVT::getVT(CI.getType(), true);
923       if (Info.memVT == MVT::Other) {
924         // Some intrinsics return an aggregate type - special case to work out
925         // the correct memVT
926         Info.memVT = memVTFromAggregate(CI.getType());
927       }
928       Info.flags |= MachineMemOperand::MOLoad;
929     } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
930       Info.opc = ISD::INTRINSIC_VOID;
931       Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
932       Info.flags |= MachineMemOperand::MOStore;
933     } else {
934       // Atomic
935       Info.opc = ISD::INTRINSIC_W_CHAIN;
936       Info.memVT = MVT::getVT(CI.getType());
937       Info.flags = MachineMemOperand::MOLoad |
938                    MachineMemOperand::MOStore |
939                    MachineMemOperand::MODereferenceable;
940 
941       // XXX - Should this be volatile without known ordering?
942       Info.flags |= MachineMemOperand::MOVolatile;
943     }
944     return true;
945   }
946 
947   switch (IntrID) {
948   case Intrinsic::amdgcn_atomic_inc:
949   case Intrinsic::amdgcn_atomic_dec:
950   case Intrinsic::amdgcn_ds_ordered_add:
951   case Intrinsic::amdgcn_ds_ordered_swap:
952   case Intrinsic::amdgcn_ds_fadd:
953   case Intrinsic::amdgcn_ds_fmin:
954   case Intrinsic::amdgcn_ds_fmax: {
955     Info.opc = ISD::INTRINSIC_W_CHAIN;
956     Info.memVT = MVT::getVT(CI.getType());
957     Info.ptrVal = CI.getOperand(0);
958     Info.align = 0;
959     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
960 
961     const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4));
962     if (!Vol->isZero())
963       Info.flags |= MachineMemOperand::MOVolatile;
964 
965     return true;
966   }
967   case Intrinsic::amdgcn_buffer_atomic_fadd: {
968     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
969 
970     Info.opc = ISD::INTRINSIC_VOID;
971     Info.memVT = MVT::getVT(CI.getOperand(0)->getType());
972     Info.ptrVal = MFI->getBufferPSV(
973       *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
974       CI.getArgOperand(1));
975     Info.align = 0;
976     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
977 
978     const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
979     if (!Vol || !Vol->isZero())
980       Info.flags |= MachineMemOperand::MOVolatile;
981 
982     return true;
983   }
984   case Intrinsic::amdgcn_global_atomic_fadd: {
985     Info.opc = ISD::INTRINSIC_VOID;
986     Info.memVT = MVT::getVT(CI.getOperand(0)->getType()
987                             ->getPointerElementType());
988     Info.ptrVal = CI.getOperand(0);
989     Info.align = 0;
990     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
991 
992     return true;
993   }
994   case Intrinsic::amdgcn_ds_append:
995   case Intrinsic::amdgcn_ds_consume: {
996     Info.opc = ISD::INTRINSIC_W_CHAIN;
997     Info.memVT = MVT::getVT(CI.getType());
998     Info.ptrVal = CI.getOperand(0);
999     Info.align = 0;
1000     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1001 
1002     const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1));
1003     if (!Vol->isZero())
1004       Info.flags |= MachineMemOperand::MOVolatile;
1005 
1006     return true;
1007   }
1008   case Intrinsic::amdgcn_ds_gws_init:
1009   case Intrinsic::amdgcn_ds_gws_barrier:
1010   case Intrinsic::amdgcn_ds_gws_sema_v:
1011   case Intrinsic::amdgcn_ds_gws_sema_br:
1012   case Intrinsic::amdgcn_ds_gws_sema_p:
1013   case Intrinsic::amdgcn_ds_gws_sema_release_all: {
1014     Info.opc = ISD::INTRINSIC_VOID;
1015 
1016     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1017     Info.ptrVal =
1018         MFI->getGWSPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo());
1019 
1020     // This is an abstract access, but we need to specify a type and size.
1021     Info.memVT = MVT::i32;
1022     Info.size = 4;
1023     Info.align = 4;
1024 
1025     Info.flags = MachineMemOperand::MOStore;
1026     if (IntrID == Intrinsic::amdgcn_ds_gws_barrier)
1027       Info.flags = MachineMemOperand::MOLoad;
1028     return true;
1029   }
1030   default:
1031     return false;
1032   }
1033 }
1034 
1035 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
1036                                             SmallVectorImpl<Value*> &Ops,
1037                                             Type *&AccessTy) const {
1038   switch (II->getIntrinsicID()) {
1039   case Intrinsic::amdgcn_atomic_inc:
1040   case Intrinsic::amdgcn_atomic_dec:
1041   case Intrinsic::amdgcn_ds_ordered_add:
1042   case Intrinsic::amdgcn_ds_ordered_swap:
1043   case Intrinsic::amdgcn_ds_fadd:
1044   case Intrinsic::amdgcn_ds_fmin:
1045   case Intrinsic::amdgcn_ds_fmax: {
1046     Value *Ptr = II->getArgOperand(0);
1047     AccessTy = II->getType();
1048     Ops.push_back(Ptr);
1049     return true;
1050   }
1051   default:
1052     return false;
1053   }
1054 }
1055 
1056 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
1057   if (!Subtarget->hasFlatInstOffsets()) {
1058     // Flat instructions do not have offsets, and only have the register
1059     // address.
1060     return AM.BaseOffs == 0 && AM.Scale == 0;
1061   }
1062 
1063   // GFX9 added a 13-bit signed offset. When using regular flat instructions,
1064   // the sign bit is ignored and is treated as a 12-bit unsigned offset.
1065 
1066   // GFX10 shrinked signed offset to 12 bits. When using regular flat
1067   // instructions, the sign bit is also ignored and is treated as 11-bit
1068   // unsigned offset.
1069 
1070   if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
1071     return isUInt<11>(AM.BaseOffs) && AM.Scale == 0;
1072 
1073   // Just r + i
1074   return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
1075 }
1076 
1077 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
1078   if (Subtarget->hasFlatGlobalInsts())
1079     return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
1080 
1081   if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
1082       // Assume the we will use FLAT for all global memory accesses
1083       // on VI.
1084       // FIXME: This assumption is currently wrong.  On VI we still use
1085       // MUBUF instructions for the r + i addressing mode.  As currently
1086       // implemented, the MUBUF instructions only work on buffer < 4GB.
1087       // It may be possible to support > 4GB buffers with MUBUF instructions,
1088       // by setting the stride value in the resource descriptor which would
1089       // increase the size limit to (stride * 4GB).  However, this is risky,
1090       // because it has never been validated.
1091     return isLegalFlatAddressingMode(AM);
1092   }
1093 
1094   return isLegalMUBUFAddressingMode(AM);
1095 }
1096 
1097 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
1098   // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
1099   // additionally can do r + r + i with addr64. 32-bit has more addressing
1100   // mode options. Depending on the resource constant, it can also do
1101   // (i64 r0) + (i32 r1) * (i14 i).
1102   //
1103   // Private arrays end up using a scratch buffer most of the time, so also
1104   // assume those use MUBUF instructions. Scratch loads / stores are currently
1105   // implemented as mubuf instructions with offen bit set, so slightly
1106   // different than the normal addr64.
1107   if (!isUInt<12>(AM.BaseOffs))
1108     return false;
1109 
1110   // FIXME: Since we can split immediate into soffset and immediate offset,
1111   // would it make sense to allow any immediate?
1112 
1113   switch (AM.Scale) {
1114   case 0: // r + i or just i, depending on HasBaseReg.
1115     return true;
1116   case 1:
1117     return true; // We have r + r or r + i.
1118   case 2:
1119     if (AM.HasBaseReg) {
1120       // Reject 2 * r + r.
1121       return false;
1122     }
1123 
1124     // Allow 2 * r as r + r
1125     // Or  2 * r + i is allowed as r + r + i.
1126     return true;
1127   default: // Don't allow n * r
1128     return false;
1129   }
1130 }
1131 
1132 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
1133                                              const AddrMode &AM, Type *Ty,
1134                                              unsigned AS, Instruction *I) const {
1135   // No global is ever allowed as a base.
1136   if (AM.BaseGV)
1137     return false;
1138 
1139   if (AS == AMDGPUAS::GLOBAL_ADDRESS)
1140     return isLegalGlobalAddressingMode(AM);
1141 
1142   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
1143       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
1144       AS == AMDGPUAS::BUFFER_FAT_POINTER) {
1145     // If the offset isn't a multiple of 4, it probably isn't going to be
1146     // correctly aligned.
1147     // FIXME: Can we get the real alignment here?
1148     if (AM.BaseOffs % 4 != 0)
1149       return isLegalMUBUFAddressingMode(AM);
1150 
1151     // There are no SMRD extloads, so if we have to do a small type access we
1152     // will use a MUBUF load.
1153     // FIXME?: We also need to do this if unaligned, but we don't know the
1154     // alignment here.
1155     if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
1156       return isLegalGlobalAddressingMode(AM);
1157 
1158     if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
1159       // SMRD instructions have an 8-bit, dword offset on SI.
1160       if (!isUInt<8>(AM.BaseOffs / 4))
1161         return false;
1162     } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
1163       // On CI+, this can also be a 32-bit literal constant offset. If it fits
1164       // in 8-bits, it can use a smaller encoding.
1165       if (!isUInt<32>(AM.BaseOffs / 4))
1166         return false;
1167     } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
1168       // On VI, these use the SMEM format and the offset is 20-bit in bytes.
1169       if (!isUInt<20>(AM.BaseOffs))
1170         return false;
1171     } else
1172       llvm_unreachable("unhandled generation");
1173 
1174     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1175       return true;
1176 
1177     if (AM.Scale == 1 && AM.HasBaseReg)
1178       return true;
1179 
1180     return false;
1181 
1182   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1183     return isLegalMUBUFAddressingMode(AM);
1184   } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
1185              AS == AMDGPUAS::REGION_ADDRESS) {
1186     // Basic, single offset DS instructions allow a 16-bit unsigned immediate
1187     // field.
1188     // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
1189     // an 8-bit dword offset but we don't know the alignment here.
1190     if (!isUInt<16>(AM.BaseOffs))
1191       return false;
1192 
1193     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1194       return true;
1195 
1196     if (AM.Scale == 1 && AM.HasBaseReg)
1197       return true;
1198 
1199     return false;
1200   } else if (AS == AMDGPUAS::FLAT_ADDRESS ||
1201              AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) {
1202     // For an unknown address space, this usually means that this is for some
1203     // reason being used for pure arithmetic, and not based on some addressing
1204     // computation. We don't have instructions that compute pointers with any
1205     // addressing modes, so treat them as having no offset like flat
1206     // instructions.
1207     return isLegalFlatAddressingMode(AM);
1208   } else {
1209     llvm_unreachable("unhandled address space");
1210   }
1211 }
1212 
1213 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1214                                         const SelectionDAG &DAG) const {
1215   if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
1216     return (MemVT.getSizeInBits() <= 4 * 32);
1217   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1218     unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1219     return (MemVT.getSizeInBits() <= MaxPrivateBits);
1220   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
1221     return (MemVT.getSizeInBits() <= 2 * 32);
1222   }
1223   return true;
1224 }
1225 
1226 bool SITargetLowering::allowsMisalignedMemoryAccesses(
1227     EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
1228     bool *IsFast) const {
1229   if (IsFast)
1230     *IsFast = false;
1231 
1232   // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
1233   // which isn't a simple VT.
1234   // Until MVT is extended to handle this, simply check for the size and
1235   // rely on the condition below: allow accesses if the size is a multiple of 4.
1236   if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
1237                            VT.getStoreSize() > 16)) {
1238     return false;
1239   }
1240 
1241   if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1242       AddrSpace == AMDGPUAS::REGION_ADDRESS) {
1243     // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1244     // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1245     // with adjacent offsets.
1246     bool AlignedBy4 = (Align % 4 == 0);
1247     if (IsFast)
1248       *IsFast = AlignedBy4;
1249 
1250     return AlignedBy4;
1251   }
1252 
1253   // FIXME: We have to be conservative here and assume that flat operations
1254   // will access scratch.  If we had access to the IR function, then we
1255   // could determine if any private memory was used in the function.
1256   if (!Subtarget->hasUnalignedScratchAccess() &&
1257       (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1258        AddrSpace == AMDGPUAS::FLAT_ADDRESS)) {
1259     bool AlignedBy4 = Align >= 4;
1260     if (IsFast)
1261       *IsFast = AlignedBy4;
1262 
1263     return AlignedBy4;
1264   }
1265 
1266   if (Subtarget->hasUnalignedBufferAccess()) {
1267     // If we have an uniform constant load, it still requires using a slow
1268     // buffer instruction if unaligned.
1269     if (IsFast) {
1270       *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
1271                  AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
1272         (Align % 4 == 0) : true;
1273     }
1274 
1275     return true;
1276   }
1277 
1278   // Smaller than dword value must be aligned.
1279   if (VT.bitsLT(MVT::i32))
1280     return false;
1281 
1282   // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1283   // byte-address are ignored, thus forcing Dword alignment.
1284   // This applies to private, global, and constant memory.
1285   if (IsFast)
1286     *IsFast = true;
1287 
1288   return VT.bitsGT(MVT::i32) && Align % 4 == 0;
1289 }
1290 
1291 EVT SITargetLowering::getOptimalMemOpType(
1292     uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
1293     bool ZeroMemset, bool MemcpyStrSrc,
1294     const AttributeList &FuncAttributes) const {
1295   // FIXME: Should account for address space here.
1296 
1297   // The default fallback uses the private pointer size as a guess for a type to
1298   // use. Make sure we switch these to 64-bit accesses.
1299 
1300   if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
1301     return MVT::v4i32;
1302 
1303   if (Size >= 8 && DstAlign >= 4)
1304     return MVT::v2i32;
1305 
1306   // Use the default.
1307   return MVT::Other;
1308 }
1309 
1310 static bool isFlatGlobalAddrSpace(unsigned AS) {
1311   return AS == AMDGPUAS::GLOBAL_ADDRESS ||
1312          AS == AMDGPUAS::FLAT_ADDRESS ||
1313          AS == AMDGPUAS::CONSTANT_ADDRESS ||
1314          AS > AMDGPUAS::MAX_AMDGPU_ADDRESS;
1315 }
1316 
1317 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1318                                            unsigned DestAS) const {
1319   return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
1320 }
1321 
1322 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
1323   const MemSDNode *MemNode = cast<MemSDNode>(N);
1324   const Value *Ptr = MemNode->getMemOperand()->getValue();
1325   const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
1326   return I && I->getMetadata("amdgpu.noclobber");
1327 }
1328 
1329 bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS,
1330                                            unsigned DestAS) const {
1331   // Flat -> private/local is a simple truncate.
1332   // Flat -> global is no-op
1333   if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
1334     return true;
1335 
1336   return isNoopAddrSpaceCast(SrcAS, DestAS);
1337 }
1338 
1339 bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1340   const MemSDNode *MemNode = cast<MemSDNode>(N);
1341 
1342   return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
1343 }
1344 
1345 TargetLoweringBase::LegalizeTypeAction
1346 SITargetLowering::getPreferredVectorAction(MVT VT) const {
1347   if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
1348     return TypeSplitVector;
1349 
1350   return TargetLoweringBase::getPreferredVectorAction(VT);
1351 }
1352 
1353 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1354                                                          Type *Ty) const {
1355   // FIXME: Could be smarter if called for vector constants.
1356   return true;
1357 }
1358 
1359 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
1360   if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1361     switch (Op) {
1362     case ISD::LOAD:
1363     case ISD::STORE:
1364 
1365     // These operations are done with 32-bit instructions anyway.
1366     case ISD::AND:
1367     case ISD::OR:
1368     case ISD::XOR:
1369     case ISD::SELECT:
1370       // TODO: Extensions?
1371       return true;
1372     default:
1373       return false;
1374     }
1375   }
1376 
1377   // SimplifySetCC uses this function to determine whether or not it should
1378   // create setcc with i1 operands.  We don't have instructions for i1 setcc.
1379   if (VT == MVT::i1 && Op == ISD::SETCC)
1380     return false;
1381 
1382   return TargetLowering::isTypeDesirableForOp(Op, VT);
1383 }
1384 
1385 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1386                                                    const SDLoc &SL,
1387                                                    SDValue Chain,
1388                                                    uint64_t Offset) const {
1389   const DataLayout &DL = DAG.getDataLayout();
1390   MachineFunction &MF = DAG.getMachineFunction();
1391   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1392 
1393   const ArgDescriptor *InputPtrReg;
1394   const TargetRegisterClass *RC;
1395 
1396   std::tie(InputPtrReg, RC)
1397     = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1398 
1399   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1400   MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
1401   SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
1402     MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1403 
1404   return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
1405 }
1406 
1407 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1408                                             const SDLoc &SL) const {
1409   uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1410                                                FIRST_IMPLICIT);
1411   return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1412 }
1413 
1414 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1415                                          const SDLoc &SL, SDValue Val,
1416                                          bool Signed,
1417                                          const ISD::InputArg *Arg) const {
1418   // First, if it is a widened vector, narrow it.
1419   if (VT.isVector() &&
1420       VT.getVectorNumElements() != MemVT.getVectorNumElements()) {
1421     EVT NarrowedVT =
1422         EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(),
1423                          VT.getVectorNumElements());
1424     Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val,
1425                       DAG.getConstant(0, SL, MVT::i32));
1426   }
1427 
1428   // Then convert the vector elements or scalar value.
1429   if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1430       VT.bitsLT(MemVT)) {
1431     unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1432     Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1433   }
1434 
1435   if (MemVT.isFloatingPoint())
1436     Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
1437   else if (Signed)
1438     Val = DAG.getSExtOrTrunc(Val, SL, VT);
1439   else
1440     Val = DAG.getZExtOrTrunc(Val, SL, VT);
1441 
1442   return Val;
1443 }
1444 
1445 SDValue SITargetLowering::lowerKernargMemParameter(
1446   SelectionDAG &DAG, EVT VT, EVT MemVT,
1447   const SDLoc &SL, SDValue Chain,
1448   uint64_t Offset, unsigned Align, bool Signed,
1449   const ISD::InputArg *Arg) const {
1450   Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
1451   PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
1452   MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1453 
1454   // Try to avoid using an extload by loading earlier than the argument address,
1455   // and extracting the relevant bits. The load should hopefully be merged with
1456   // the previous argument.
1457   if (MemVT.getStoreSize() < 4 && Align < 4) {
1458     // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
1459     int64_t AlignDownOffset = alignDown(Offset, 4);
1460     int64_t OffsetDiff = Offset - AlignDownOffset;
1461 
1462     EVT IntVT = MemVT.changeTypeToInteger();
1463 
1464     // TODO: If we passed in the base kernel offset we could have a better
1465     // alignment than 4, but we don't really need it.
1466     SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1467     SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1468                                MachineMemOperand::MODereferenceable |
1469                                MachineMemOperand::MOInvariant);
1470 
1471     SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1472     SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1473 
1474     SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1475     ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1476     ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1477 
1478 
1479     return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1480   }
1481 
1482   SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1483   SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
1484                              MachineMemOperand::MODereferenceable |
1485                              MachineMemOperand::MOInvariant);
1486 
1487   SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
1488   return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
1489 }
1490 
1491 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1492                                               const SDLoc &SL, SDValue Chain,
1493                                               const ISD::InputArg &Arg) const {
1494   MachineFunction &MF = DAG.getMachineFunction();
1495   MachineFrameInfo &MFI = MF.getFrameInfo();
1496 
1497   if (Arg.Flags.isByVal()) {
1498     unsigned Size = Arg.Flags.getByValSize();
1499     int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1500     return DAG.getFrameIndex(FrameIdx, MVT::i32);
1501   }
1502 
1503   unsigned ArgOffset = VA.getLocMemOffset();
1504   unsigned ArgSize = VA.getValVT().getStoreSize();
1505 
1506   int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1507 
1508   // Create load nodes to retrieve arguments from the stack.
1509   SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1510   SDValue ArgValue;
1511 
1512   // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1513   ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1514   MVT MemVT = VA.getValVT();
1515 
1516   switch (VA.getLocInfo()) {
1517   default:
1518     break;
1519   case CCValAssign::BCvt:
1520     MemVT = VA.getLocVT();
1521     break;
1522   case CCValAssign::SExt:
1523     ExtType = ISD::SEXTLOAD;
1524     break;
1525   case CCValAssign::ZExt:
1526     ExtType = ISD::ZEXTLOAD;
1527     break;
1528   case CCValAssign::AExt:
1529     ExtType = ISD::EXTLOAD;
1530     break;
1531   }
1532 
1533   ArgValue = DAG.getExtLoad(
1534     ExtType, SL, VA.getLocVT(), Chain, FIN,
1535     MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1536     MemVT);
1537   return ArgValue;
1538 }
1539 
1540 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1541   const SIMachineFunctionInfo &MFI,
1542   EVT VT,
1543   AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1544   const ArgDescriptor *Reg;
1545   const TargetRegisterClass *RC;
1546 
1547   std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1548   return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1549 }
1550 
1551 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1552                                    CallingConv::ID CallConv,
1553                                    ArrayRef<ISD::InputArg> Ins,
1554                                    BitVector &Skipped,
1555                                    FunctionType *FType,
1556                                    SIMachineFunctionInfo *Info) {
1557   for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1558     const ISD::InputArg *Arg = &Ins[I];
1559 
1560     assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1561            "vector type argument should have been split");
1562 
1563     // First check if it's a PS input addr.
1564     if (CallConv == CallingConv::AMDGPU_PS &&
1565         !Arg->Flags.isInReg() && PSInputNum <= 15) {
1566       bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1567 
1568       // Inconveniently only the first part of the split is marked as isSplit,
1569       // so skip to the end. We only want to increment PSInputNum once for the
1570       // entire split argument.
1571       if (Arg->Flags.isSplit()) {
1572         while (!Arg->Flags.isSplitEnd()) {
1573           assert((!Arg->VT.isVector() ||
1574                   Arg->VT.getScalarSizeInBits() == 16) &&
1575                  "unexpected vector split in ps argument type");
1576           if (!SkipArg)
1577             Splits.push_back(*Arg);
1578           Arg = &Ins[++I];
1579         }
1580       }
1581 
1582       if (SkipArg) {
1583         // We can safely skip PS inputs.
1584         Skipped.set(Arg->getOrigArgIndex());
1585         ++PSInputNum;
1586         continue;
1587       }
1588 
1589       Info->markPSInputAllocated(PSInputNum);
1590       if (Arg->Used)
1591         Info->markPSInputEnabled(PSInputNum);
1592 
1593       ++PSInputNum;
1594     }
1595 
1596     Splits.push_back(*Arg);
1597   }
1598 }
1599 
1600 // Allocate special inputs passed in VGPRs.
1601 void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1602                                                       MachineFunction &MF,
1603                                                       const SIRegisterInfo &TRI,
1604                                                       SIMachineFunctionInfo &Info) const {
1605   const LLT S32 = LLT::scalar(32);
1606   MachineRegisterInfo &MRI = MF.getRegInfo();
1607 
1608   if (Info.hasWorkItemIDX()) {
1609     Register Reg = AMDGPU::VGPR0;
1610     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1611 
1612     CCInfo.AllocateReg(Reg);
1613     Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1614   }
1615 
1616   if (Info.hasWorkItemIDY()) {
1617     Register Reg = AMDGPU::VGPR1;
1618     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1619 
1620     CCInfo.AllocateReg(Reg);
1621     Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1622   }
1623 
1624   if (Info.hasWorkItemIDZ()) {
1625     Register Reg = AMDGPU::VGPR2;
1626     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1627 
1628     CCInfo.AllocateReg(Reg);
1629     Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1630   }
1631 }
1632 
1633 // Try to allocate a VGPR at the end of the argument list, or if no argument
1634 // VGPRs are left allocating a stack slot.
1635 // If \p Mask is is given it indicates bitfield position in the register.
1636 // If \p Arg is given use it with new ]p Mask instead of allocating new.
1637 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u,
1638                                          ArgDescriptor Arg = ArgDescriptor()) {
1639   if (Arg.isSet())
1640     return ArgDescriptor::createArg(Arg, Mask);
1641 
1642   ArrayRef<MCPhysReg> ArgVGPRs
1643     = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1644   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1645   if (RegIdx == ArgVGPRs.size()) {
1646     // Spill to stack required.
1647     int64_t Offset = CCInfo.AllocateStack(4, 4);
1648 
1649     return ArgDescriptor::createStack(Offset, Mask);
1650   }
1651 
1652   unsigned Reg = ArgVGPRs[RegIdx];
1653   Reg = CCInfo.AllocateReg(Reg);
1654   assert(Reg != AMDGPU::NoRegister);
1655 
1656   MachineFunction &MF = CCInfo.getMachineFunction();
1657   MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1658   return ArgDescriptor::createRegister(Reg, Mask);
1659 }
1660 
1661 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1662                                              const TargetRegisterClass *RC,
1663                                              unsigned NumArgRegs) {
1664   ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1665   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1666   if (RegIdx == ArgSGPRs.size())
1667     report_fatal_error("ran out of SGPRs for arguments");
1668 
1669   unsigned Reg = ArgSGPRs[RegIdx];
1670   Reg = CCInfo.AllocateReg(Reg);
1671   assert(Reg != AMDGPU::NoRegister);
1672 
1673   MachineFunction &MF = CCInfo.getMachineFunction();
1674   MF.addLiveIn(Reg, RC);
1675   return ArgDescriptor::createRegister(Reg);
1676 }
1677 
1678 static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1679   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1680 }
1681 
1682 static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1683   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1684 }
1685 
1686 void SITargetLowering::allocateSpecialInputVGPRs(CCState &CCInfo,
1687                                                  MachineFunction &MF,
1688                                                  const SIRegisterInfo &TRI,
1689                                                  SIMachineFunctionInfo &Info) const {
1690   const unsigned Mask = 0x3ff;
1691   ArgDescriptor Arg;
1692 
1693   if (Info.hasWorkItemIDX()) {
1694     Arg = allocateVGPR32Input(CCInfo, Mask);
1695     Info.setWorkItemIDX(Arg);
1696   }
1697 
1698   if (Info.hasWorkItemIDY()) {
1699     Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg);
1700     Info.setWorkItemIDY(Arg);
1701   }
1702 
1703   if (Info.hasWorkItemIDZ())
1704     Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg));
1705 }
1706 
1707 void SITargetLowering::allocateSpecialInputSGPRs(
1708   CCState &CCInfo,
1709   MachineFunction &MF,
1710   const SIRegisterInfo &TRI,
1711   SIMachineFunctionInfo &Info) const {
1712   auto &ArgInfo = Info.getArgInfo();
1713 
1714   // TODO: Unify handling with private memory pointers.
1715 
1716   if (Info.hasDispatchPtr())
1717     ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1718 
1719   if (Info.hasQueuePtr())
1720     ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1721 
1722   if (Info.hasKernargSegmentPtr())
1723     ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1724 
1725   if (Info.hasDispatchID())
1726     ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1727 
1728   // flat_scratch_init is not applicable for non-kernel functions.
1729 
1730   if (Info.hasWorkGroupIDX())
1731     ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1732 
1733   if (Info.hasWorkGroupIDY())
1734     ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1735 
1736   if (Info.hasWorkGroupIDZ())
1737     ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
1738 
1739   if (Info.hasImplicitArgPtr())
1740     ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
1741 }
1742 
1743 // Allocate special inputs passed in user SGPRs.
1744 void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo,
1745                                             MachineFunction &MF,
1746                                             const SIRegisterInfo &TRI,
1747                                             SIMachineFunctionInfo &Info) const {
1748   if (Info.hasImplicitBufferPtr()) {
1749     unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1750     MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1751     CCInfo.AllocateReg(ImplicitBufferPtrReg);
1752   }
1753 
1754   // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1755   if (Info.hasPrivateSegmentBuffer()) {
1756     unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1757     MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1758     CCInfo.AllocateReg(PrivateSegmentBufferReg);
1759   }
1760 
1761   if (Info.hasDispatchPtr()) {
1762     unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1763     MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1764     CCInfo.AllocateReg(DispatchPtrReg);
1765   }
1766 
1767   if (Info.hasQueuePtr()) {
1768     unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1769     MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1770     CCInfo.AllocateReg(QueuePtrReg);
1771   }
1772 
1773   if (Info.hasKernargSegmentPtr()) {
1774     MachineRegisterInfo &MRI = MF.getRegInfo();
1775     Register InputPtrReg = Info.addKernargSegmentPtr(TRI);
1776     CCInfo.AllocateReg(InputPtrReg);
1777 
1778     Register VReg = MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1779     MRI.setType(VReg, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
1780   }
1781 
1782   if (Info.hasDispatchID()) {
1783     unsigned DispatchIDReg = Info.addDispatchID(TRI);
1784     MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1785     CCInfo.AllocateReg(DispatchIDReg);
1786   }
1787 
1788   if (Info.hasFlatScratchInit()) {
1789     unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1790     MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1791     CCInfo.AllocateReg(FlatScratchInitReg);
1792   }
1793 
1794   // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1795   // these from the dispatch pointer.
1796 }
1797 
1798 // Allocate special input registers that are initialized per-wave.
1799 void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo,
1800                                            MachineFunction &MF,
1801                                            SIMachineFunctionInfo &Info,
1802                                            CallingConv::ID CallConv,
1803                                            bool IsShader) const {
1804   if (Info.hasWorkGroupIDX()) {
1805     unsigned Reg = Info.addWorkGroupIDX();
1806     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1807     CCInfo.AllocateReg(Reg);
1808   }
1809 
1810   if (Info.hasWorkGroupIDY()) {
1811     unsigned Reg = Info.addWorkGroupIDY();
1812     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1813     CCInfo.AllocateReg(Reg);
1814   }
1815 
1816   if (Info.hasWorkGroupIDZ()) {
1817     unsigned Reg = Info.addWorkGroupIDZ();
1818     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1819     CCInfo.AllocateReg(Reg);
1820   }
1821 
1822   if (Info.hasWorkGroupInfo()) {
1823     unsigned Reg = Info.addWorkGroupInfo();
1824     MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1825     CCInfo.AllocateReg(Reg);
1826   }
1827 
1828   if (Info.hasPrivateSegmentWaveByteOffset()) {
1829     // Scratch wave offset passed in system SGPR.
1830     unsigned PrivateSegmentWaveByteOffsetReg;
1831 
1832     if (IsShader) {
1833       PrivateSegmentWaveByteOffsetReg =
1834         Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1835 
1836       // This is true if the scratch wave byte offset doesn't have a fixed
1837       // location.
1838       if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1839         PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1840         Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1841       }
1842     } else
1843       PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1844 
1845     MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1846     CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1847   }
1848 }
1849 
1850 static void reservePrivateMemoryRegs(const TargetMachine &TM,
1851                                      MachineFunction &MF,
1852                                      const SIRegisterInfo &TRI,
1853                                      SIMachineFunctionInfo &Info) {
1854   // Now that we've figured out where the scratch register inputs are, see if
1855   // should reserve the arguments and use them directly.
1856   MachineFrameInfo &MFI = MF.getFrameInfo();
1857   bool HasStackObjects = MFI.hasStackObjects();
1858   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1859 
1860   // Record that we know we have non-spill stack objects so we don't need to
1861   // check all stack objects later.
1862   if (HasStackObjects)
1863     Info.setHasNonSpillStackObjects(true);
1864 
1865   // Everything live out of a block is spilled with fast regalloc, so it's
1866   // almost certain that spilling will be required.
1867   if (TM.getOptLevel() == CodeGenOpt::None)
1868     HasStackObjects = true;
1869 
1870   // For now assume stack access is needed in any callee functions, so we need
1871   // the scratch registers to pass in.
1872   bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1873 
1874   if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) {
1875     // If we have stack objects, we unquestionably need the private buffer
1876     // resource. For the Code Object V2 ABI, this will be the first 4 user
1877     // SGPR inputs. We can reserve those and use them directly.
1878 
1879     unsigned PrivateSegmentBufferReg =
1880         Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
1881     Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1882   } else {
1883     unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1884     // We tentatively reserve the last registers (skipping the last registers
1885     // which may contain VCC, FLAT_SCR, and XNACK). After register allocation,
1886     // we'll replace these with the ones immediately after those which were
1887     // really allocated. In the prologue copies will be inserted from the
1888     // argument to these reserved registers.
1889 
1890     // Without HSA, relocations are used for the scratch pointer and the
1891     // buffer resource setup is always inserted in the prologue. Scratch wave
1892     // offset is still in an input SGPR.
1893     Info.setScratchRSrcReg(ReservedBufferReg);
1894   }
1895 
1896   // hasFP should be accurate for kernels even before the frame is finalized.
1897   if (ST.getFrameLowering()->hasFP(MF)) {
1898     MachineRegisterInfo &MRI = MF.getRegInfo();
1899 
1900     // Try to use s32 as the SP, but move it if it would interfere with input
1901     // arguments. This won't work with calls though.
1902     //
1903     // FIXME: Move SP to avoid any possible inputs, or find a way to spill input
1904     // registers.
1905     if (!MRI.isLiveIn(AMDGPU::SGPR32)) {
1906       Info.setStackPtrOffsetReg(AMDGPU::SGPR32);
1907     } else {
1908       assert(AMDGPU::isShader(MF.getFunction().getCallingConv()));
1909 
1910       if (MFI.hasCalls())
1911         report_fatal_error("call in graphics shader with too many input SGPRs");
1912 
1913       for (unsigned Reg : AMDGPU::SGPR_32RegClass) {
1914         if (!MRI.isLiveIn(Reg)) {
1915           Info.setStackPtrOffsetReg(Reg);
1916           break;
1917         }
1918       }
1919 
1920       if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG)
1921         report_fatal_error("failed to find register for SP");
1922     }
1923 
1924     if (MFI.hasCalls()) {
1925       Info.setScratchWaveOffsetReg(AMDGPU::SGPR33);
1926       Info.setFrameOffsetReg(AMDGPU::SGPR33);
1927     } else {
1928       unsigned ReservedOffsetReg =
1929         TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1930       Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1931       Info.setFrameOffsetReg(ReservedOffsetReg);
1932     }
1933   } else if (RequiresStackAccess) {
1934     assert(!MFI.hasCalls());
1935     // We know there are accesses and they will be done relative to SP, so just
1936     // pin it to the input.
1937     //
1938     // FIXME: Should not do this if inline asm is reading/writing these
1939     // registers.
1940     unsigned PreloadedSP = Info.getPreloadedReg(
1941         AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1942 
1943     Info.setStackPtrOffsetReg(PreloadedSP);
1944     Info.setScratchWaveOffsetReg(PreloadedSP);
1945     Info.setFrameOffsetReg(PreloadedSP);
1946   } else {
1947     assert(!MFI.hasCalls());
1948 
1949     // There may not be stack access at all. There may still be spills, or
1950     // access of a constant pointer (in which cases an extra copy will be
1951     // emitted in the prolog).
1952     unsigned ReservedOffsetReg
1953       = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1954     Info.setStackPtrOffsetReg(ReservedOffsetReg);
1955     Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1956     Info.setFrameOffsetReg(ReservedOffsetReg);
1957   }
1958 }
1959 
1960 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1961   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1962   return !Info->isEntryFunction();
1963 }
1964 
1965 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1966 
1967 }
1968 
1969 void SITargetLowering::insertCopiesSplitCSR(
1970   MachineBasicBlock *Entry,
1971   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1972   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1973 
1974   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1975   if (!IStart)
1976     return;
1977 
1978   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1979   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1980   MachineBasicBlock::iterator MBBI = Entry->begin();
1981   for (const MCPhysReg *I = IStart; *I; ++I) {
1982     const TargetRegisterClass *RC = nullptr;
1983     if (AMDGPU::SReg_64RegClass.contains(*I))
1984       RC = &AMDGPU::SGPR_64RegClass;
1985     else if (AMDGPU::SReg_32RegClass.contains(*I))
1986       RC = &AMDGPU::SGPR_32RegClass;
1987     else
1988       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1989 
1990     unsigned NewVR = MRI->createVirtualRegister(RC);
1991     // Create copy from CSR to a virtual register.
1992     Entry->addLiveIn(*I);
1993     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1994       .addReg(*I);
1995 
1996     // Insert the copy-back instructions right before the terminator.
1997     for (auto *Exit : Exits)
1998       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1999               TII->get(TargetOpcode::COPY), *I)
2000         .addReg(NewVR);
2001   }
2002 }
2003 
2004 SDValue SITargetLowering::LowerFormalArguments(
2005     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
2006     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2007     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2008   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2009 
2010   MachineFunction &MF = DAG.getMachineFunction();
2011   const Function &Fn = MF.getFunction();
2012   FunctionType *FType = MF.getFunction().getFunctionType();
2013   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2014 
2015   if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
2016     DiagnosticInfoUnsupported NoGraphicsHSA(
2017         Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
2018     DAG.getContext()->diagnose(NoGraphicsHSA);
2019     return DAG.getEntryNode();
2020   }
2021 
2022   SmallVector<ISD::InputArg, 16> Splits;
2023   SmallVector<CCValAssign, 16> ArgLocs;
2024   BitVector Skipped(Ins.size());
2025   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
2026                  *DAG.getContext());
2027 
2028   bool IsShader = AMDGPU::isShader(CallConv);
2029   bool IsKernel = AMDGPU::isKernel(CallConv);
2030   bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
2031 
2032   if (IsShader) {
2033     processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
2034 
2035     // At least one interpolation mode must be enabled or else the GPU will
2036     // hang.
2037     //
2038     // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
2039     // set PSInputAddr, the user wants to enable some bits after the compilation
2040     // based on run-time states. Since we can't know what the final PSInputEna
2041     // will look like, so we shouldn't do anything here and the user should take
2042     // responsibility for the correct programming.
2043     //
2044     // Otherwise, the following restrictions apply:
2045     // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
2046     // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
2047     //   enabled too.
2048     if (CallConv == CallingConv::AMDGPU_PS) {
2049       if ((Info->getPSInputAddr() & 0x7F) == 0 ||
2050            ((Info->getPSInputAddr() & 0xF) == 0 &&
2051             Info->isPSInputAllocated(11))) {
2052         CCInfo.AllocateReg(AMDGPU::VGPR0);
2053         CCInfo.AllocateReg(AMDGPU::VGPR1);
2054         Info->markPSInputAllocated(0);
2055         Info->markPSInputEnabled(0);
2056       }
2057       if (Subtarget->isAmdPalOS()) {
2058         // For isAmdPalOS, the user does not enable some bits after compilation
2059         // based on run-time states; the register values being generated here are
2060         // the final ones set in hardware. Therefore we need to apply the
2061         // workaround to PSInputAddr and PSInputEnable together.  (The case where
2062         // a bit is set in PSInputAddr but not PSInputEnable is where the
2063         // frontend set up an input arg for a particular interpolation mode, but
2064         // nothing uses that input arg. Really we should have an earlier pass
2065         // that removes such an arg.)
2066         unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
2067         if ((PsInputBits & 0x7F) == 0 ||
2068             ((PsInputBits & 0xF) == 0 &&
2069              (PsInputBits >> 11 & 1)))
2070           Info->markPSInputEnabled(
2071               countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
2072       }
2073     }
2074 
2075     assert(!Info->hasDispatchPtr() &&
2076            !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
2077            !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
2078            !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
2079            !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
2080            !Info->hasWorkItemIDZ());
2081   } else if (IsKernel) {
2082     assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
2083   } else {
2084     Splits.append(Ins.begin(), Ins.end());
2085   }
2086 
2087   if (IsEntryFunc) {
2088     allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
2089     allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
2090   }
2091 
2092   if (IsKernel) {
2093     analyzeFormalArgumentsCompute(CCInfo, Ins);
2094   } else {
2095     CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
2096     CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
2097   }
2098 
2099   SmallVector<SDValue, 16> Chains;
2100 
2101   // FIXME: This is the minimum kernel argument alignment. We should improve
2102   // this to the maximum alignment of the arguments.
2103   //
2104   // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
2105   // kern arg offset.
2106   const unsigned KernelArgBaseAlign = 16;
2107 
2108    for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
2109     const ISD::InputArg &Arg = Ins[i];
2110     if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
2111       InVals.push_back(DAG.getUNDEF(Arg.VT));
2112       continue;
2113     }
2114 
2115     CCValAssign &VA = ArgLocs[ArgIdx++];
2116     MVT VT = VA.getLocVT();
2117 
2118     if (IsEntryFunc && VA.isMemLoc()) {
2119       VT = Ins[i].VT;
2120       EVT MemVT = VA.getLocVT();
2121 
2122       const uint64_t Offset = VA.getLocMemOffset();
2123       unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
2124 
2125       SDValue Arg = lowerKernargMemParameter(
2126         DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
2127       Chains.push_back(Arg.getValue(1));
2128 
2129       auto *ParamTy =
2130         dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
2131       if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
2132           ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
2133                       ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) {
2134         // On SI local pointers are just offsets into LDS, so they are always
2135         // less than 16-bits.  On CI and newer they could potentially be
2136         // real pointers, so we can't guarantee their size.
2137         Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
2138                           DAG.getValueType(MVT::i16));
2139       }
2140 
2141       InVals.push_back(Arg);
2142       continue;
2143     } else if (!IsEntryFunc && VA.isMemLoc()) {
2144       SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
2145       InVals.push_back(Val);
2146       if (!Arg.Flags.isByVal())
2147         Chains.push_back(Val.getValue(1));
2148       continue;
2149     }
2150 
2151     assert(VA.isRegLoc() && "Parameter must be in a register!");
2152 
2153     unsigned Reg = VA.getLocReg();
2154     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2155     EVT ValVT = VA.getValVT();
2156 
2157     Reg = MF.addLiveIn(Reg, RC);
2158     SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
2159 
2160     if (Arg.Flags.isSRet()) {
2161       // The return object should be reasonably addressable.
2162 
2163       // FIXME: This helps when the return is a real sret. If it is a
2164       // automatically inserted sret (i.e. CanLowerReturn returns false), an
2165       // extra copy is inserted in SelectionDAGBuilder which obscures this.
2166       unsigned NumBits
2167         = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex();
2168       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2169         DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
2170     }
2171 
2172     // If this is an 8 or 16-bit value, it is really passed promoted
2173     // to 32 bits. Insert an assert[sz]ext to capture this, then
2174     // truncate to the right size.
2175     switch (VA.getLocInfo()) {
2176     case CCValAssign::Full:
2177       break;
2178     case CCValAssign::BCvt:
2179       Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
2180       break;
2181     case CCValAssign::SExt:
2182       Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
2183                         DAG.getValueType(ValVT));
2184       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2185       break;
2186     case CCValAssign::ZExt:
2187       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2188                         DAG.getValueType(ValVT));
2189       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2190       break;
2191     case CCValAssign::AExt:
2192       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2193       break;
2194     default:
2195       llvm_unreachable("Unknown loc info!");
2196     }
2197 
2198     InVals.push_back(Val);
2199   }
2200 
2201   if (!IsEntryFunc) {
2202     // Special inputs come after user arguments.
2203     allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
2204   }
2205 
2206   // Start adding system SGPRs.
2207   if (IsEntryFunc) {
2208     allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
2209   } else {
2210     CCInfo.AllocateReg(Info->getScratchRSrcReg());
2211     CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
2212     CCInfo.AllocateReg(Info->getFrameOffsetReg());
2213     allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
2214   }
2215 
2216   auto &ArgUsageInfo =
2217     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2218   ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
2219 
2220   unsigned StackArgSize = CCInfo.getNextStackOffset();
2221   Info->setBytesInStackArgArea(StackArgSize);
2222 
2223   return Chains.empty() ? Chain :
2224     DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
2225 }
2226 
2227 // TODO: If return values can't fit in registers, we should return as many as
2228 // possible in registers before passing on stack.
2229 bool SITargetLowering::CanLowerReturn(
2230   CallingConv::ID CallConv,
2231   MachineFunction &MF, bool IsVarArg,
2232   const SmallVectorImpl<ISD::OutputArg> &Outs,
2233   LLVMContext &Context) const {
2234   // Replacing returns with sret/stack usage doesn't make sense for shaders.
2235   // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
2236   // for shaders. Vector types should be explicitly handled by CC.
2237   if (AMDGPU::isEntryFunctionCC(CallConv))
2238     return true;
2239 
2240   SmallVector<CCValAssign, 16> RVLocs;
2241   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2242   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2243 }
2244 
2245 SDValue
2246 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2247                               bool isVarArg,
2248                               const SmallVectorImpl<ISD::OutputArg> &Outs,
2249                               const SmallVectorImpl<SDValue> &OutVals,
2250                               const SDLoc &DL, SelectionDAG &DAG) const {
2251   MachineFunction &MF = DAG.getMachineFunction();
2252   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2253 
2254   if (AMDGPU::isKernel(CallConv)) {
2255     return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2256                                              OutVals, DL, DAG);
2257   }
2258 
2259   bool IsShader = AMDGPU::isShader(CallConv);
2260 
2261   Info->setIfReturnsVoid(Outs.empty());
2262   bool IsWaveEnd = Info->returnsVoid() && IsShader;
2263 
2264   // CCValAssign - represent the assignment of the return value to a location.
2265   SmallVector<CCValAssign, 48> RVLocs;
2266   SmallVector<ISD::OutputArg, 48> Splits;
2267 
2268   // CCState - Info about the registers and stack slots.
2269   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2270                  *DAG.getContext());
2271 
2272   // Analyze outgoing return values.
2273   CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2274 
2275   SDValue Flag;
2276   SmallVector<SDValue, 48> RetOps;
2277   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2278 
2279   // Add return address for callable functions.
2280   if (!Info->isEntryFunction()) {
2281     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2282     SDValue ReturnAddrReg = CreateLiveInRegister(
2283       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2284 
2285     SDValue ReturnAddrVirtualReg = DAG.getRegister(
2286         MF.getRegInfo().createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass),
2287         MVT::i64);
2288     Chain =
2289         DAG.getCopyToReg(Chain, DL, ReturnAddrVirtualReg, ReturnAddrReg, Flag);
2290     Flag = Chain.getValue(1);
2291     RetOps.push_back(ReturnAddrVirtualReg);
2292   }
2293 
2294   // Copy the result values into the output registers.
2295   for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2296        ++I, ++RealRVLocIdx) {
2297     CCValAssign &VA = RVLocs[I];
2298     assert(VA.isRegLoc() && "Can only return in registers!");
2299     // TODO: Partially return in registers if return values don't fit.
2300     SDValue Arg = OutVals[RealRVLocIdx];
2301 
2302     // Copied from other backends.
2303     switch (VA.getLocInfo()) {
2304     case CCValAssign::Full:
2305       break;
2306     case CCValAssign::BCvt:
2307       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2308       break;
2309     case CCValAssign::SExt:
2310       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2311       break;
2312     case CCValAssign::ZExt:
2313       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2314       break;
2315     case CCValAssign::AExt:
2316       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2317       break;
2318     default:
2319       llvm_unreachable("Unknown loc info!");
2320     }
2321 
2322     Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2323     Flag = Chain.getValue(1);
2324     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2325   }
2326 
2327   // FIXME: Does sret work properly?
2328   if (!Info->isEntryFunction()) {
2329     const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2330     const MCPhysReg *I =
2331       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2332     if (I) {
2333       for (; *I; ++I) {
2334         if (AMDGPU::SReg_64RegClass.contains(*I))
2335           RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2336         else if (AMDGPU::SReg_32RegClass.contains(*I))
2337           RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2338         else
2339           llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2340       }
2341     }
2342   }
2343 
2344   // Update chain and glue.
2345   RetOps[0] = Chain;
2346   if (Flag.getNode())
2347     RetOps.push_back(Flag);
2348 
2349   unsigned Opc = AMDGPUISD::ENDPGM;
2350   if (!IsWaveEnd)
2351     Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
2352   return DAG.getNode(Opc, DL, MVT::Other, RetOps);
2353 }
2354 
2355 SDValue SITargetLowering::LowerCallResult(
2356     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2357     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2358     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2359     SDValue ThisVal) const {
2360   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2361 
2362   // Assign locations to each value returned by this call.
2363   SmallVector<CCValAssign, 16> RVLocs;
2364   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2365                  *DAG.getContext());
2366   CCInfo.AnalyzeCallResult(Ins, RetCC);
2367 
2368   // Copy all of the result registers out of their specified physreg.
2369   for (unsigned i = 0; i != RVLocs.size(); ++i) {
2370     CCValAssign VA = RVLocs[i];
2371     SDValue Val;
2372 
2373     if (VA.isRegLoc()) {
2374       Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2375       Chain = Val.getValue(1);
2376       InFlag = Val.getValue(2);
2377     } else if (VA.isMemLoc()) {
2378       report_fatal_error("TODO: return values in memory");
2379     } else
2380       llvm_unreachable("unknown argument location type");
2381 
2382     switch (VA.getLocInfo()) {
2383     case CCValAssign::Full:
2384       break;
2385     case CCValAssign::BCvt:
2386       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2387       break;
2388     case CCValAssign::ZExt:
2389       Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2390                         DAG.getValueType(VA.getValVT()));
2391       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2392       break;
2393     case CCValAssign::SExt:
2394       Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2395                         DAG.getValueType(VA.getValVT()));
2396       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2397       break;
2398     case CCValAssign::AExt:
2399       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2400       break;
2401     default:
2402       llvm_unreachable("Unknown loc info!");
2403     }
2404 
2405     InVals.push_back(Val);
2406   }
2407 
2408   return Chain;
2409 }
2410 
2411 // Add code to pass special inputs required depending on used features separate
2412 // from the explicit user arguments present in the IR.
2413 void SITargetLowering::passSpecialInputs(
2414     CallLoweringInfo &CLI,
2415     CCState &CCInfo,
2416     const SIMachineFunctionInfo &Info,
2417     SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2418     SmallVectorImpl<SDValue> &MemOpChains,
2419     SDValue Chain) const {
2420   // If we don't have a call site, this was a call inserted by
2421   // legalization. These can never use special inputs.
2422   if (!CLI.CS)
2423     return;
2424 
2425   const Function *CalleeFunc = CLI.CS.getCalledFunction();
2426   assert(CalleeFunc);
2427 
2428   SelectionDAG &DAG = CLI.DAG;
2429   const SDLoc &DL = CLI.DL;
2430 
2431   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2432 
2433   auto &ArgUsageInfo =
2434     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2435   const AMDGPUFunctionArgInfo &CalleeArgInfo
2436     = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2437 
2438   const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2439 
2440   // TODO: Unify with private memory register handling. This is complicated by
2441   // the fact that at least in kernels, the input argument is not necessarily
2442   // in the same location as the input.
2443   AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2444     AMDGPUFunctionArgInfo::DISPATCH_PTR,
2445     AMDGPUFunctionArgInfo::QUEUE_PTR,
2446     AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
2447     AMDGPUFunctionArgInfo::DISPATCH_ID,
2448     AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2449     AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2450     AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
2451     AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
2452   };
2453 
2454   for (auto InputID : InputRegs) {
2455     const ArgDescriptor *OutgoingArg;
2456     const TargetRegisterClass *ArgRC;
2457 
2458     std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2459     if (!OutgoingArg)
2460       continue;
2461 
2462     const ArgDescriptor *IncomingArg;
2463     const TargetRegisterClass *IncomingArgRC;
2464     std::tie(IncomingArg, IncomingArgRC)
2465       = CallerArgInfo.getPreloadedValue(InputID);
2466     assert(IncomingArgRC == ArgRC);
2467 
2468     // All special arguments are ints for now.
2469     EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
2470     SDValue InputReg;
2471 
2472     if (IncomingArg) {
2473       InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2474     } else {
2475       // The implicit arg ptr is special because it doesn't have a corresponding
2476       // input for kernels, and is computed from the kernarg segment pointer.
2477       assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2478       InputReg = getImplicitArgPtr(DAG, DL);
2479     }
2480 
2481     if (OutgoingArg->isRegister()) {
2482       RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2483     } else {
2484       unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4);
2485       SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2486                                               SpecialArgOffset);
2487       MemOpChains.push_back(ArgStore);
2488     }
2489   }
2490 
2491   // Pack workitem IDs into a single register or pass it as is if already
2492   // packed.
2493   const ArgDescriptor *OutgoingArg;
2494   const TargetRegisterClass *ArgRC;
2495 
2496   std::tie(OutgoingArg, ArgRC) =
2497     CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
2498   if (!OutgoingArg)
2499     std::tie(OutgoingArg, ArgRC) =
2500       CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
2501   if (!OutgoingArg)
2502     std::tie(OutgoingArg, ArgRC) =
2503       CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
2504   if (!OutgoingArg)
2505     return;
2506 
2507   const ArgDescriptor *IncomingArgX
2508     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X).first;
2509   const ArgDescriptor *IncomingArgY
2510     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y).first;
2511   const ArgDescriptor *IncomingArgZ
2512     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z).first;
2513 
2514   SDValue InputReg;
2515   SDLoc SL;
2516 
2517   // If incoming ids are not packed we need to pack them.
2518   if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo.WorkItemIDX)
2519     InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX);
2520 
2521   if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo.WorkItemIDY) {
2522     SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY);
2523     Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y,
2524                     DAG.getShiftAmountConstant(10, MVT::i32, SL));
2525     InputReg = InputReg.getNode() ?
2526                  DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y;
2527   }
2528 
2529   if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo.WorkItemIDZ) {
2530     SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ);
2531     Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z,
2532                     DAG.getShiftAmountConstant(20, MVT::i32, SL));
2533     InputReg = InputReg.getNode() ?
2534                  DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z;
2535   }
2536 
2537   if (!InputReg.getNode()) {
2538     // Workitem ids are already packed, any of present incoming arguments
2539     // will carry all required fields.
2540     ArgDescriptor IncomingArg = ArgDescriptor::createArg(
2541       IncomingArgX ? *IncomingArgX :
2542       IncomingArgY ? *IncomingArgY :
2543                      *IncomingArgZ, ~0u);
2544     InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg);
2545   }
2546 
2547   if (OutgoingArg->isRegister()) {
2548     RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2549   } else {
2550     unsigned SpecialArgOffset = CCInfo.AllocateStack(4, 4);
2551     SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2552                                             SpecialArgOffset);
2553     MemOpChains.push_back(ArgStore);
2554   }
2555 }
2556 
2557 static bool canGuaranteeTCO(CallingConv::ID CC) {
2558   return CC == CallingConv::Fast;
2559 }
2560 
2561 /// Return true if we might ever do TCO for calls with this calling convention.
2562 static bool mayTailCallThisCC(CallingConv::ID CC) {
2563   switch (CC) {
2564   case CallingConv::C:
2565     return true;
2566   default:
2567     return canGuaranteeTCO(CC);
2568   }
2569 }
2570 
2571 bool SITargetLowering::isEligibleForTailCallOptimization(
2572     SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2573     const SmallVectorImpl<ISD::OutputArg> &Outs,
2574     const SmallVectorImpl<SDValue> &OutVals,
2575     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2576   if (!mayTailCallThisCC(CalleeCC))
2577     return false;
2578 
2579   MachineFunction &MF = DAG.getMachineFunction();
2580   const Function &CallerF = MF.getFunction();
2581   CallingConv::ID CallerCC = CallerF.getCallingConv();
2582   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2583   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2584 
2585   // Kernels aren't callable, and don't have a live in return address so it
2586   // doesn't make sense to do a tail call with entry functions.
2587   if (!CallerPreserved)
2588     return false;
2589 
2590   bool CCMatch = CallerCC == CalleeCC;
2591 
2592   if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2593     if (canGuaranteeTCO(CalleeCC) && CCMatch)
2594       return true;
2595     return false;
2596   }
2597 
2598   // TODO: Can we handle var args?
2599   if (IsVarArg)
2600     return false;
2601 
2602   for (const Argument &Arg : CallerF.args()) {
2603     if (Arg.hasByValAttr())
2604       return false;
2605   }
2606 
2607   LLVMContext &Ctx = *DAG.getContext();
2608 
2609   // Check that the call results are passed in the same way.
2610   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2611                                   CCAssignFnForCall(CalleeCC, IsVarArg),
2612                                   CCAssignFnForCall(CallerCC, IsVarArg)))
2613     return false;
2614 
2615   // The callee has to preserve all registers the caller needs to preserve.
2616   if (!CCMatch) {
2617     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2618     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2619       return false;
2620   }
2621 
2622   // Nothing more to check if the callee is taking no arguments.
2623   if (Outs.empty())
2624     return true;
2625 
2626   SmallVector<CCValAssign, 16> ArgLocs;
2627   CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2628 
2629   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2630 
2631   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2632   // If the stack arguments for this call do not fit into our own save area then
2633   // the call cannot be made tail.
2634   // TODO: Is this really necessary?
2635   if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2636     return false;
2637 
2638   const MachineRegisterInfo &MRI = MF.getRegInfo();
2639   return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2640 }
2641 
2642 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2643   if (!CI->isTailCall())
2644     return false;
2645 
2646   const Function *ParentFn = CI->getParent()->getParent();
2647   if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2648     return false;
2649 
2650   auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2651   return (Attr.getValueAsString() != "true");
2652 }
2653 
2654 // The wave scratch offset register is used as the global base pointer.
2655 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2656                                     SmallVectorImpl<SDValue> &InVals) const {
2657   SelectionDAG &DAG = CLI.DAG;
2658   const SDLoc &DL = CLI.DL;
2659   SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2660   SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2661   SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2662   SDValue Chain = CLI.Chain;
2663   SDValue Callee = CLI.Callee;
2664   bool &IsTailCall = CLI.IsTailCall;
2665   CallingConv::ID CallConv = CLI.CallConv;
2666   bool IsVarArg = CLI.IsVarArg;
2667   bool IsSibCall = false;
2668   bool IsThisReturn = false;
2669   MachineFunction &MF = DAG.getMachineFunction();
2670 
2671   if (IsVarArg) {
2672     return lowerUnhandledCall(CLI, InVals,
2673                               "unsupported call to variadic function ");
2674   }
2675 
2676   if (!CLI.CS.getInstruction())
2677     report_fatal_error("unsupported libcall legalization");
2678 
2679   if (!CLI.CS.getCalledFunction()) {
2680     return lowerUnhandledCall(CLI, InVals,
2681                               "unsupported indirect call to function ");
2682   }
2683 
2684   if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2685     return lowerUnhandledCall(CLI, InVals,
2686                               "unsupported required tail call to function ");
2687   }
2688 
2689   if (AMDGPU::isShader(MF.getFunction().getCallingConv())) {
2690     // Note the issue is with the CC of the calling function, not of the call
2691     // itself.
2692     return lowerUnhandledCall(CLI, InVals,
2693                           "unsupported call from graphics shader of function ");
2694   }
2695 
2696   if (IsTailCall) {
2697     IsTailCall = isEligibleForTailCallOptimization(
2698       Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2699     if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2700       report_fatal_error("failed to perform tail call elimination on a call "
2701                          "site marked musttail");
2702     }
2703 
2704     bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2705 
2706     // A sibling call is one where we're under the usual C ABI and not planning
2707     // to change that but can still do a tail call:
2708     if (!TailCallOpt && IsTailCall)
2709       IsSibCall = true;
2710 
2711     if (IsTailCall)
2712       ++NumTailCalls;
2713   }
2714 
2715   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2716 
2717   // Analyze operands of the call, assigning locations to each operand.
2718   SmallVector<CCValAssign, 16> ArgLocs;
2719   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2720   CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2721 
2722   CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2723 
2724   // Get a count of how many bytes are to be pushed on the stack.
2725   unsigned NumBytes = CCInfo.getNextStackOffset();
2726 
2727   if (IsSibCall) {
2728     // Since we're not changing the ABI to make this a tail call, the memory
2729     // operands are already available in the caller's incoming argument space.
2730     NumBytes = 0;
2731   }
2732 
2733   // FPDiff is the byte offset of the call's argument area from the callee's.
2734   // Stores to callee stack arguments will be placed in FixedStackSlots offset
2735   // by this amount for a tail call. In a sibling call it must be 0 because the
2736   // caller will deallocate the entire stack and the callee still expects its
2737   // arguments to begin at SP+0. Completely unused for non-tail calls.
2738   int32_t FPDiff = 0;
2739   MachineFrameInfo &MFI = MF.getFrameInfo();
2740   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2741 
2742   // Adjust the stack pointer for the new arguments...
2743   // These operations are automatically eliminated by the prolog/epilog pass
2744   if (!IsSibCall) {
2745     Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2746 
2747     SmallVector<SDValue, 4> CopyFromChains;
2748 
2749     // In the HSA case, this should be an identity copy.
2750     SDValue ScratchRSrcReg
2751       = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2752     RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2753     CopyFromChains.push_back(ScratchRSrcReg.getValue(1));
2754     Chain = DAG.getTokenFactor(DL, CopyFromChains);
2755   }
2756 
2757   SmallVector<SDValue, 8> MemOpChains;
2758   MVT PtrVT = MVT::i32;
2759 
2760   // Walk the register/memloc assignments, inserting copies/loads.
2761   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2762        ++i, ++realArgIdx) {
2763     CCValAssign &VA = ArgLocs[i];
2764     SDValue Arg = OutVals[realArgIdx];
2765 
2766     // Promote the value if needed.
2767     switch (VA.getLocInfo()) {
2768     case CCValAssign::Full:
2769       break;
2770     case CCValAssign::BCvt:
2771       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2772       break;
2773     case CCValAssign::ZExt:
2774       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2775       break;
2776     case CCValAssign::SExt:
2777       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2778       break;
2779     case CCValAssign::AExt:
2780       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2781       break;
2782     case CCValAssign::FPExt:
2783       Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2784       break;
2785     default:
2786       llvm_unreachable("Unknown loc info!");
2787     }
2788 
2789     if (VA.isRegLoc()) {
2790       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2791     } else {
2792       assert(VA.isMemLoc());
2793 
2794       SDValue DstAddr;
2795       MachinePointerInfo DstInfo;
2796 
2797       unsigned LocMemOffset = VA.getLocMemOffset();
2798       int32_t Offset = LocMemOffset;
2799 
2800       SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT);
2801       unsigned Align = 0;
2802 
2803       if (IsTailCall) {
2804         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2805         unsigned OpSize = Flags.isByVal() ?
2806           Flags.getByValSize() : VA.getValVT().getStoreSize();
2807 
2808         // FIXME: We can have better than the minimum byval required alignment.
2809         Align = Flags.isByVal() ? Flags.getByValAlign() :
2810           MinAlign(Subtarget->getStackAlignment(), Offset);
2811 
2812         Offset = Offset + FPDiff;
2813         int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2814 
2815         DstAddr = DAG.getFrameIndex(FI, PtrVT);
2816         DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2817 
2818         // Make sure any stack arguments overlapping with where we're storing
2819         // are loaded before this eventual operation. Otherwise they'll be
2820         // clobbered.
2821 
2822         // FIXME: Why is this really necessary? This seems to just result in a
2823         // lot of code to copy the stack and write them back to the same
2824         // locations, which are supposed to be immutable?
2825         Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2826       } else {
2827         DstAddr = PtrOff;
2828         DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2829         Align = MinAlign(Subtarget->getStackAlignment(), LocMemOffset);
2830       }
2831 
2832       if (Outs[i].Flags.isByVal()) {
2833         SDValue SizeNode =
2834             DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2835         SDValue Cpy = DAG.getMemcpy(
2836             Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2837             /*isVol = */ false, /*AlwaysInline = */ true,
2838             /*isTailCall = */ false, DstInfo,
2839             MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy(
2840                 *DAG.getContext(), AMDGPUAS::PRIVATE_ADDRESS))));
2841 
2842         MemOpChains.push_back(Cpy);
2843       } else {
2844         SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Align);
2845         MemOpChains.push_back(Store);
2846       }
2847     }
2848   }
2849 
2850   // Copy special input registers after user input arguments.
2851   passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
2852 
2853   if (!MemOpChains.empty())
2854     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2855 
2856   // Build a sequence of copy-to-reg nodes chained together with token chain
2857   // and flag operands which copy the outgoing args into the appropriate regs.
2858   SDValue InFlag;
2859   for (auto &RegToPass : RegsToPass) {
2860     Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2861                              RegToPass.second, InFlag);
2862     InFlag = Chain.getValue(1);
2863   }
2864 
2865 
2866   SDValue PhysReturnAddrReg;
2867   if (IsTailCall) {
2868     // Since the return is being combined with the call, we need to pass on the
2869     // return address.
2870 
2871     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2872     SDValue ReturnAddrReg = CreateLiveInRegister(
2873       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2874 
2875     PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2876                                         MVT::i64);
2877     Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2878     InFlag = Chain.getValue(1);
2879   }
2880 
2881   // We don't usually want to end the call-sequence here because we would tidy
2882   // the frame up *after* the call, however in the ABI-changing tail-call case
2883   // we've carefully laid out the parameters so that when sp is reset they'll be
2884   // in the correct location.
2885   if (IsTailCall && !IsSibCall) {
2886     Chain = DAG.getCALLSEQ_END(Chain,
2887                                DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2888                                DAG.getTargetConstant(0, DL, MVT::i32),
2889                                InFlag, DL);
2890     InFlag = Chain.getValue(1);
2891   }
2892 
2893   std::vector<SDValue> Ops;
2894   Ops.push_back(Chain);
2895   Ops.push_back(Callee);
2896   // Add a redundant copy of the callee global which will not be legalized, as
2897   // we need direct access to the callee later.
2898   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee);
2899   const GlobalValue *GV = GSD->getGlobal();
2900   Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
2901 
2902   if (IsTailCall) {
2903     // Each tail call may have to adjust the stack by a different amount, so
2904     // this information must travel along with the operation for eventual
2905     // consumption by emitEpilogue.
2906     Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
2907 
2908     Ops.push_back(PhysReturnAddrReg);
2909   }
2910 
2911   // Add argument registers to the end of the list so that they are known live
2912   // into the call.
2913   for (auto &RegToPass : RegsToPass) {
2914     Ops.push_back(DAG.getRegister(RegToPass.first,
2915                                   RegToPass.second.getValueType()));
2916   }
2917 
2918   // Add a register mask operand representing the call-preserved registers.
2919 
2920   auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
2921   const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2922   assert(Mask && "Missing call preserved mask for calling convention");
2923   Ops.push_back(DAG.getRegisterMask(Mask));
2924 
2925   if (InFlag.getNode())
2926     Ops.push_back(InFlag);
2927 
2928   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2929 
2930   // If we're doing a tall call, use a TC_RETURN here rather than an
2931   // actual call instruction.
2932   if (IsTailCall) {
2933     MFI.setHasTailCall();
2934     return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
2935   }
2936 
2937   // Returns a chain and a flag for retval copy to use.
2938   SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2939   Chain = Call.getValue(0);
2940   InFlag = Call.getValue(1);
2941 
2942   uint64_t CalleePopBytes = NumBytes;
2943   Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
2944                              DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2945                              InFlag, DL);
2946   if (!Ins.empty())
2947     InFlag = Chain.getValue(1);
2948 
2949   // Handle result values, copying them out of physregs into vregs that we
2950   // return.
2951   return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2952                          InVals, IsThisReturn,
2953                          IsThisReturn ? OutVals[0] : SDValue());
2954 }
2955 
2956 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2957                                              SelectionDAG &DAG) const {
2958   unsigned Reg = StringSwitch<unsigned>(RegName)
2959     .Case("m0", AMDGPU::M0)
2960     .Case("exec", AMDGPU::EXEC)
2961     .Case("exec_lo", AMDGPU::EXEC_LO)
2962     .Case("exec_hi", AMDGPU::EXEC_HI)
2963     .Case("flat_scratch", AMDGPU::FLAT_SCR)
2964     .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2965     .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2966     .Default(AMDGPU::NoRegister);
2967 
2968   if (Reg == AMDGPU::NoRegister) {
2969     report_fatal_error(Twine("invalid register name \""
2970                              + StringRef(RegName)  + "\"."));
2971 
2972   }
2973 
2974   if (!Subtarget->hasFlatScrRegister() &&
2975        Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2976     report_fatal_error(Twine("invalid register \""
2977                              + StringRef(RegName)  + "\" for subtarget."));
2978   }
2979 
2980   switch (Reg) {
2981   case AMDGPU::M0:
2982   case AMDGPU::EXEC_LO:
2983   case AMDGPU::EXEC_HI:
2984   case AMDGPU::FLAT_SCR_LO:
2985   case AMDGPU::FLAT_SCR_HI:
2986     if (VT.getSizeInBits() == 32)
2987       return Reg;
2988     break;
2989   case AMDGPU::EXEC:
2990   case AMDGPU::FLAT_SCR:
2991     if (VT.getSizeInBits() == 64)
2992       return Reg;
2993     break;
2994   default:
2995     llvm_unreachable("missing register type checking");
2996   }
2997 
2998   report_fatal_error(Twine("invalid type for register \""
2999                            + StringRef(RegName) + "\"."));
3000 }
3001 
3002 // If kill is not the last instruction, split the block so kill is always a
3003 // proper terminator.
3004 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
3005                                                     MachineBasicBlock *BB) const {
3006   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3007 
3008   MachineBasicBlock::iterator SplitPoint(&MI);
3009   ++SplitPoint;
3010 
3011   if (SplitPoint == BB->end()) {
3012     // Don't bother with a new block.
3013     MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
3014     return BB;
3015   }
3016 
3017   MachineFunction *MF = BB->getParent();
3018   MachineBasicBlock *SplitBB
3019     = MF->CreateMachineBasicBlock(BB->getBasicBlock());
3020 
3021   MF->insert(++MachineFunction::iterator(BB), SplitBB);
3022   SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
3023 
3024   SplitBB->transferSuccessorsAndUpdatePHIs(BB);
3025   BB->addSuccessor(SplitBB);
3026 
3027   MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
3028   return SplitBB;
3029 }
3030 
3031 // Split block \p MBB at \p MI, as to insert a loop. If \p InstInLoop is true,
3032 // \p MI will be the only instruction in the loop body block. Otherwise, it will
3033 // be the first instruction in the remainder block.
3034 //
3035 /// \returns { LoopBody, Remainder }
3036 static std::pair<MachineBasicBlock *, MachineBasicBlock *>
3037 splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) {
3038   MachineFunction *MF = MBB.getParent();
3039   MachineBasicBlock::iterator I(&MI);
3040 
3041   // To insert the loop we need to split the block. Move everything after this
3042   // point to a new block, and insert a new empty block between the two.
3043   MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
3044   MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
3045   MachineFunction::iterator MBBI(MBB);
3046   ++MBBI;
3047 
3048   MF->insert(MBBI, LoopBB);
3049   MF->insert(MBBI, RemainderBB);
3050 
3051   LoopBB->addSuccessor(LoopBB);
3052   LoopBB->addSuccessor(RemainderBB);
3053 
3054   // Move the rest of the block into a new block.
3055   RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
3056 
3057   if (InstInLoop) {
3058     auto Next = std::next(I);
3059 
3060     // Move instruction to loop body.
3061     LoopBB->splice(LoopBB->begin(), &MBB, I, Next);
3062 
3063     // Move the rest of the block.
3064     RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end());
3065   } else {
3066     RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
3067   }
3068 
3069   MBB.addSuccessor(LoopBB);
3070 
3071   return std::make_pair(LoopBB, RemainderBB);
3072 }
3073 
3074 /// Insert \p MI into a BUNDLE with an S_WAITCNT 0 immediately following it.
3075 void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const {
3076   MachineBasicBlock *MBB = MI.getParent();
3077   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3078   auto I = MI.getIterator();
3079   auto E = std::next(I);
3080 
3081   BuildMI(*MBB, E, MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT))
3082     .addImm(0);
3083 
3084   MIBundleBuilder Bundler(*MBB, I, E);
3085   finalizeBundle(*MBB, Bundler.begin());
3086 }
3087 
3088 MachineBasicBlock *
3089 SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI,
3090                                          MachineBasicBlock *BB) const {
3091   const DebugLoc &DL = MI.getDebugLoc();
3092 
3093   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3094 
3095   MachineBasicBlock *LoopBB;
3096   MachineBasicBlock *RemainderBB;
3097   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3098 
3099   // Apparently kill flags are only valid if the def is in the same block?
3100   if (MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0))
3101     Src->setIsKill(false);
3102 
3103   std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true);
3104 
3105   MachineBasicBlock::iterator I = LoopBB->end();
3106 
3107   const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg(
3108     AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1);
3109 
3110   // Clear TRAP_STS.MEM_VIOL
3111   BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32))
3112     .addImm(0)
3113     .addImm(EncodedReg);
3114 
3115   bundleInstWithWaitcnt(MI);
3116 
3117   unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3118 
3119   // Load and check TRAP_STS.MEM_VIOL
3120   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg)
3121     .addImm(EncodedReg);
3122 
3123   // FIXME: Do we need to use an isel pseudo that may clobber scc?
3124   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32))
3125     .addReg(Reg, RegState::Kill)
3126     .addImm(0);
3127   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3128     .addMBB(LoopBB);
3129 
3130   return RemainderBB;
3131 }
3132 
3133 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
3134 // wavefront. If the value is uniform and just happens to be in a VGPR, this
3135 // will only do one iteration. In the worst case, this will loop 64 times.
3136 //
3137 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
3138 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
3139   const SIInstrInfo *TII,
3140   MachineRegisterInfo &MRI,
3141   MachineBasicBlock &OrigBB,
3142   MachineBasicBlock &LoopBB,
3143   const DebugLoc &DL,
3144   const MachineOperand &IdxReg,
3145   unsigned InitReg,
3146   unsigned ResultReg,
3147   unsigned PhiReg,
3148   unsigned InitSaveExecReg,
3149   int Offset,
3150   bool UseGPRIdxMode,
3151   bool IsIndirectSrc) {
3152   MachineFunction *MF = OrigBB.getParent();
3153   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3154   const SIRegisterInfo *TRI = ST.getRegisterInfo();
3155   MachineBasicBlock::iterator I = LoopBB.begin();
3156 
3157   const TargetRegisterClass *BoolRC = TRI->getBoolRC();
3158   unsigned PhiExec = MRI.createVirtualRegister(BoolRC);
3159   unsigned NewExec = MRI.createVirtualRegister(BoolRC);
3160   unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3161   unsigned CondReg = MRI.createVirtualRegister(BoolRC);
3162 
3163   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
3164     .addReg(InitReg)
3165     .addMBB(&OrigBB)
3166     .addReg(ResultReg)
3167     .addMBB(&LoopBB);
3168 
3169   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
3170     .addReg(InitSaveExecReg)
3171     .addMBB(&OrigBB)
3172     .addReg(NewExec)
3173     .addMBB(&LoopBB);
3174 
3175   // Read the next variant <- also loop target.
3176   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
3177     .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
3178 
3179   // Compare the just read M0 value to all possible Idx values.
3180   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
3181     .addReg(CurrentIdxReg)
3182     .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
3183 
3184   // Update EXEC, save the original EXEC value to VCC.
3185   BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32
3186                                                 : AMDGPU::S_AND_SAVEEXEC_B64),
3187           NewExec)
3188     .addReg(CondReg, RegState::Kill);
3189 
3190   MRI.setSimpleHint(NewExec, CondReg);
3191 
3192   if (UseGPRIdxMode) {
3193     unsigned IdxReg;
3194     if (Offset == 0) {
3195       IdxReg = CurrentIdxReg;
3196     } else {
3197       IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3198       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
3199         .addReg(CurrentIdxReg, RegState::Kill)
3200         .addImm(Offset);
3201     }
3202     unsigned IdxMode = IsIndirectSrc ?
3203       AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
3204     MachineInstr *SetOn =
3205       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3206       .addReg(IdxReg, RegState::Kill)
3207       .addImm(IdxMode);
3208     SetOn->getOperand(3).setIsUndef();
3209   } else {
3210     // Move index from VCC into M0
3211     if (Offset == 0) {
3212       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3213         .addReg(CurrentIdxReg, RegState::Kill);
3214     } else {
3215       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3216         .addReg(CurrentIdxReg, RegState::Kill)
3217         .addImm(Offset);
3218     }
3219   }
3220 
3221   // Update EXEC, switch all done bits to 0 and all todo bits to 1.
3222   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3223   MachineInstr *InsertPt =
3224     BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term
3225                                                   : AMDGPU::S_XOR_B64_term), Exec)
3226       .addReg(Exec)
3227       .addReg(NewExec);
3228 
3229   // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
3230   // s_cbranch_scc0?
3231 
3232   // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
3233   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
3234     .addMBB(&LoopBB);
3235 
3236   return InsertPt->getIterator();
3237 }
3238 
3239 // This has slightly sub-optimal regalloc when the source vector is killed by
3240 // the read. The register allocator does not understand that the kill is
3241 // per-workitem, so is kept alive for the whole loop so we end up not re-using a
3242 // subregister from it, using 1 more VGPR than necessary. This was saved when
3243 // this was expanded after register allocation.
3244 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
3245                                                   MachineBasicBlock &MBB,
3246                                                   MachineInstr &MI,
3247                                                   unsigned InitResultReg,
3248                                                   unsigned PhiReg,
3249                                                   int Offset,
3250                                                   bool UseGPRIdxMode,
3251                                                   bool IsIndirectSrc) {
3252   MachineFunction *MF = MBB.getParent();
3253   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3254   const SIRegisterInfo *TRI = ST.getRegisterInfo();
3255   MachineRegisterInfo &MRI = MF->getRegInfo();
3256   const DebugLoc &DL = MI.getDebugLoc();
3257   MachineBasicBlock::iterator I(&MI);
3258 
3259   const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
3260   unsigned DstReg = MI.getOperand(0).getReg();
3261   unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC);
3262   unsigned TmpExec = MRI.createVirtualRegister(BoolXExecRC);
3263   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3264   unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
3265 
3266   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
3267 
3268   // Save the EXEC mask
3269   BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec)
3270     .addReg(Exec);
3271 
3272   MachineBasicBlock *LoopBB;
3273   MachineBasicBlock *RemainderBB;
3274   std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false);
3275 
3276   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3277 
3278   auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
3279                                       InitResultReg, DstReg, PhiReg, TmpExec,
3280                                       Offset, UseGPRIdxMode, IsIndirectSrc);
3281 
3282   MachineBasicBlock::iterator First = RemainderBB->begin();
3283   BuildMI(*RemainderBB, First, DL, TII->get(MovExecOpc), Exec)
3284     .addReg(SaveExec);
3285 
3286   return InsPt;
3287 }
3288 
3289 // Returns subreg index, offset
3290 static std::pair<unsigned, int>
3291 computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
3292                             const TargetRegisterClass *SuperRC,
3293                             unsigned VecReg,
3294                             int Offset) {
3295   int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
3296 
3297   // Skip out of bounds offsets, or else we would end up using an undefined
3298   // register.
3299   if (Offset >= NumElts || Offset < 0)
3300     return std::make_pair(AMDGPU::sub0, Offset);
3301 
3302   return std::make_pair(AMDGPU::sub0 + Offset, 0);
3303 }
3304 
3305 // Return true if the index is an SGPR and was set.
3306 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
3307                                  MachineRegisterInfo &MRI,
3308                                  MachineInstr &MI,
3309                                  int Offset,
3310                                  bool UseGPRIdxMode,
3311                                  bool IsIndirectSrc) {
3312   MachineBasicBlock *MBB = MI.getParent();
3313   const DebugLoc &DL = MI.getDebugLoc();
3314   MachineBasicBlock::iterator I(&MI);
3315 
3316   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3317   const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
3318 
3319   assert(Idx->getReg() != AMDGPU::NoRegister);
3320 
3321   if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
3322     return false;
3323 
3324   if (UseGPRIdxMode) {
3325     unsigned IdxMode = IsIndirectSrc ?
3326       AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
3327     if (Offset == 0) {
3328       MachineInstr *SetOn =
3329           BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3330               .add(*Idx)
3331               .addImm(IdxMode);
3332 
3333       SetOn->getOperand(3).setIsUndef();
3334     } else {
3335       unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3336       BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
3337           .add(*Idx)
3338           .addImm(Offset);
3339       MachineInstr *SetOn =
3340         BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3341         .addReg(Tmp, RegState::Kill)
3342         .addImm(IdxMode);
3343 
3344       SetOn->getOperand(3).setIsUndef();
3345     }
3346 
3347     return true;
3348   }
3349 
3350   if (Offset == 0) {
3351     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3352       .add(*Idx);
3353   } else {
3354     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3355       .add(*Idx)
3356       .addImm(Offset);
3357   }
3358 
3359   return true;
3360 }
3361 
3362 // Control flow needs to be inserted if indexing with a VGPR.
3363 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
3364                                           MachineBasicBlock &MBB,
3365                                           const GCNSubtarget &ST) {
3366   const SIInstrInfo *TII = ST.getInstrInfo();
3367   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3368   MachineFunction *MF = MBB.getParent();
3369   MachineRegisterInfo &MRI = MF->getRegInfo();
3370 
3371   unsigned Dst = MI.getOperand(0).getReg();
3372   unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
3373   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3374 
3375   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
3376 
3377   unsigned SubReg;
3378   std::tie(SubReg, Offset)
3379     = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
3380 
3381   bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
3382 
3383   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
3384     MachineBasicBlock::iterator I(&MI);
3385     const DebugLoc &DL = MI.getDebugLoc();
3386 
3387     if (UseGPRIdxMode) {
3388       // TODO: Look at the uses to avoid the copy. This may require rescheduling
3389       // to avoid interfering with other uses, so probably requires a new
3390       // optimization pass.
3391       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3392         .addReg(SrcReg, RegState::Undef, SubReg)
3393         .addReg(SrcReg, RegState::Implicit)
3394         .addReg(AMDGPU::M0, RegState::Implicit);
3395       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3396     } else {
3397       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3398         .addReg(SrcReg, RegState::Undef, SubReg)
3399         .addReg(SrcReg, RegState::Implicit);
3400     }
3401 
3402     MI.eraseFromParent();
3403 
3404     return &MBB;
3405   }
3406 
3407   const DebugLoc &DL = MI.getDebugLoc();
3408   MachineBasicBlock::iterator I(&MI);
3409 
3410   unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3411   unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3412 
3413   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3414 
3415   auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3416                               Offset, UseGPRIdxMode, true);
3417   MachineBasicBlock *LoopBB = InsPt->getParent();
3418 
3419   if (UseGPRIdxMode) {
3420     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3421       .addReg(SrcReg, RegState::Undef, SubReg)
3422       .addReg(SrcReg, RegState::Implicit)
3423       .addReg(AMDGPU::M0, RegState::Implicit);
3424     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3425   } else {
3426     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3427       .addReg(SrcReg, RegState::Undef, SubReg)
3428       .addReg(SrcReg, RegState::Implicit);
3429   }
3430 
3431   MI.eraseFromParent();
3432 
3433   return LoopBB;
3434 }
3435 
3436 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
3437                                  const TargetRegisterClass *VecRC) {
3438   switch (TRI.getRegSizeInBits(*VecRC)) {
3439   case 32: // 4 bytes
3440     return AMDGPU::V_MOVRELD_B32_V1;
3441   case 64: // 8 bytes
3442     return AMDGPU::V_MOVRELD_B32_V2;
3443   case 128: // 16 bytes
3444     return AMDGPU::V_MOVRELD_B32_V4;
3445   case 256: // 32 bytes
3446     return AMDGPU::V_MOVRELD_B32_V8;
3447   case 512: // 64 bytes
3448     return AMDGPU::V_MOVRELD_B32_V16;
3449   default:
3450     llvm_unreachable("unsupported size for MOVRELD pseudos");
3451   }
3452 }
3453 
3454 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3455                                           MachineBasicBlock &MBB,
3456                                           const GCNSubtarget &ST) {
3457   const SIInstrInfo *TII = ST.getInstrInfo();
3458   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3459   MachineFunction *MF = MBB.getParent();
3460   MachineRegisterInfo &MRI = MF->getRegInfo();
3461 
3462   unsigned Dst = MI.getOperand(0).getReg();
3463   const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3464   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3465   const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3466   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3467   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3468 
3469   // This can be an immediate, but will be folded later.
3470   assert(Val->getReg());
3471 
3472   unsigned SubReg;
3473   std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3474                                                          SrcVec->getReg(),
3475                                                          Offset);
3476   bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
3477 
3478   if (Idx->getReg() == AMDGPU::NoRegister) {
3479     MachineBasicBlock::iterator I(&MI);
3480     const DebugLoc &DL = MI.getDebugLoc();
3481 
3482     assert(Offset == 0);
3483 
3484     BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
3485         .add(*SrcVec)
3486         .add(*Val)
3487         .addImm(SubReg);
3488 
3489     MI.eraseFromParent();
3490     return &MBB;
3491   }
3492 
3493   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
3494     MachineBasicBlock::iterator I(&MI);
3495     const DebugLoc &DL = MI.getDebugLoc();
3496 
3497     if (UseGPRIdxMode) {
3498       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3499           .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3500           .add(*Val)
3501           .addReg(Dst, RegState::ImplicitDefine)
3502           .addReg(SrcVec->getReg(), RegState::Implicit)
3503           .addReg(AMDGPU::M0, RegState::Implicit);
3504 
3505       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3506     } else {
3507       const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3508 
3509       BuildMI(MBB, I, DL, MovRelDesc)
3510           .addReg(Dst, RegState::Define)
3511           .addReg(SrcVec->getReg())
3512           .add(*Val)
3513           .addImm(SubReg - AMDGPU::sub0);
3514     }
3515 
3516     MI.eraseFromParent();
3517     return &MBB;
3518   }
3519 
3520   if (Val->isReg())
3521     MRI.clearKillFlags(Val->getReg());
3522 
3523   const DebugLoc &DL = MI.getDebugLoc();
3524 
3525   unsigned PhiReg = MRI.createVirtualRegister(VecRC);
3526 
3527   auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
3528                               Offset, UseGPRIdxMode, false);
3529   MachineBasicBlock *LoopBB = InsPt->getParent();
3530 
3531   if (UseGPRIdxMode) {
3532     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3533         .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3534         .add(*Val)                               // src0
3535         .addReg(Dst, RegState::ImplicitDefine)
3536         .addReg(PhiReg, RegState::Implicit)
3537         .addReg(AMDGPU::M0, RegState::Implicit);
3538     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3539   } else {
3540     const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3541 
3542     BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3543         .addReg(Dst, RegState::Define)
3544         .addReg(PhiReg)
3545         .add(*Val)
3546         .addImm(SubReg - AMDGPU::sub0);
3547   }
3548 
3549   MI.eraseFromParent();
3550 
3551   return LoopBB;
3552 }
3553 
3554 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3555   MachineInstr &MI, MachineBasicBlock *BB) const {
3556 
3557   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3558   MachineFunction *MF = BB->getParent();
3559   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3560 
3561   if (TII->isMIMG(MI)) {
3562     if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3563       report_fatal_error("missing mem operand from MIMG instruction");
3564     }
3565     // Add a memoperand for mimg instructions so that they aren't assumed to
3566     // be ordered memory instuctions.
3567 
3568     return BB;
3569   }
3570 
3571   switch (MI.getOpcode()) {
3572   case AMDGPU::S_ADD_U64_PSEUDO:
3573   case AMDGPU::S_SUB_U64_PSEUDO: {
3574     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3575     const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3576     const SIRegisterInfo *TRI = ST.getRegisterInfo();
3577     const TargetRegisterClass *BoolRC = TRI->getBoolRC();
3578     const DebugLoc &DL = MI.getDebugLoc();
3579 
3580     MachineOperand &Dest = MI.getOperand(0);
3581     MachineOperand &Src0 = MI.getOperand(1);
3582     MachineOperand &Src1 = MI.getOperand(2);
3583 
3584     unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3585     unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3586 
3587     MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3588      Src0, BoolRC, AMDGPU::sub0,
3589      &AMDGPU::SReg_32_XM0RegClass);
3590     MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3591       Src0, BoolRC, AMDGPU::sub1,
3592       &AMDGPU::SReg_32_XM0RegClass);
3593 
3594     MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3595       Src1, BoolRC, AMDGPU::sub0,
3596       &AMDGPU::SReg_32_XM0RegClass);
3597     MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3598       Src1, BoolRC, AMDGPU::sub1,
3599       &AMDGPU::SReg_32_XM0RegClass);
3600 
3601     bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3602 
3603     unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3604     unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3605     BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3606       .add(Src0Sub0)
3607       .add(Src1Sub0);
3608     BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3609       .add(Src0Sub1)
3610       .add(Src1Sub1);
3611     BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3612       .addReg(DestSub0)
3613       .addImm(AMDGPU::sub0)
3614       .addReg(DestSub1)
3615       .addImm(AMDGPU::sub1);
3616     MI.eraseFromParent();
3617     return BB;
3618   }
3619   case AMDGPU::SI_INIT_M0: {
3620     BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
3621             TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3622         .add(MI.getOperand(0));
3623     MI.eraseFromParent();
3624     return BB;
3625   }
3626   case AMDGPU::SI_INIT_EXEC:
3627     // This should be before all vector instructions.
3628     BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3629             AMDGPU::EXEC)
3630         .addImm(MI.getOperand(0).getImm());
3631     MI.eraseFromParent();
3632     return BB;
3633 
3634   case AMDGPU::SI_INIT_EXEC_LO:
3635     // This should be before all vector instructions.
3636     BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B32),
3637             AMDGPU::EXEC_LO)
3638         .addImm(MI.getOperand(0).getImm());
3639     MI.eraseFromParent();
3640     return BB;
3641 
3642   case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3643     // Extract the thread count from an SGPR input and set EXEC accordingly.
3644     // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3645     //
3646     // S_BFE_U32 count, input, {shift, 7}
3647     // S_BFM_B64 exec, count, 0
3648     // S_CMP_EQ_U32 count, 64
3649     // S_CMOV_B64 exec, -1
3650     MachineInstr *FirstMI = &*BB->begin();
3651     MachineRegisterInfo &MRI = MF->getRegInfo();
3652     unsigned InputReg = MI.getOperand(0).getReg();
3653     unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3654     bool Found = false;
3655 
3656     // Move the COPY of the input reg to the beginning, so that we can use it.
3657     for (auto I = BB->begin(); I != &MI; I++) {
3658       if (I->getOpcode() != TargetOpcode::COPY ||
3659           I->getOperand(0).getReg() != InputReg)
3660         continue;
3661 
3662       if (I == FirstMI) {
3663         FirstMI = &*++BB->begin();
3664       } else {
3665         I->removeFromParent();
3666         BB->insert(FirstMI, &*I);
3667       }
3668       Found = true;
3669       break;
3670     }
3671     assert(Found);
3672     (void)Found;
3673 
3674     // This should be before all vector instructions.
3675     unsigned Mask = (getSubtarget()->getWavefrontSize() << 1) - 1;
3676     bool isWave32 = getSubtarget()->isWave32();
3677     unsigned Exec = isWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3678     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3679         .addReg(InputReg)
3680         .addImm((MI.getOperand(1).getImm() & Mask) | 0x70000);
3681     BuildMI(*BB, FirstMI, DebugLoc(),
3682             TII->get(isWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64),
3683             Exec)
3684         .addReg(CountReg)
3685         .addImm(0);
3686     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3687         .addReg(CountReg, RegState::Kill)
3688         .addImm(getSubtarget()->getWavefrontSize());
3689     BuildMI(*BB, FirstMI, DebugLoc(),
3690             TII->get(isWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64),
3691             Exec)
3692         .addImm(-1);
3693     MI.eraseFromParent();
3694     return BB;
3695   }
3696 
3697   case AMDGPU::GET_GROUPSTATICSIZE: {
3698     assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||
3699            getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL);
3700     DebugLoc DL = MI.getDebugLoc();
3701     BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
3702         .add(MI.getOperand(0))
3703         .addImm(MFI->getLDSSize());
3704     MI.eraseFromParent();
3705     return BB;
3706   }
3707   case AMDGPU::SI_INDIRECT_SRC_V1:
3708   case AMDGPU::SI_INDIRECT_SRC_V2:
3709   case AMDGPU::SI_INDIRECT_SRC_V4:
3710   case AMDGPU::SI_INDIRECT_SRC_V8:
3711   case AMDGPU::SI_INDIRECT_SRC_V16:
3712     return emitIndirectSrc(MI, *BB, *getSubtarget());
3713   case AMDGPU::SI_INDIRECT_DST_V1:
3714   case AMDGPU::SI_INDIRECT_DST_V2:
3715   case AMDGPU::SI_INDIRECT_DST_V4:
3716   case AMDGPU::SI_INDIRECT_DST_V8:
3717   case AMDGPU::SI_INDIRECT_DST_V16:
3718     return emitIndirectDst(MI, *BB, *getSubtarget());
3719   case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3720   case AMDGPU::SI_KILL_I1_PSEUDO:
3721     return splitKillBlock(MI, BB);
3722   case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3723     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3724     const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3725     const SIRegisterInfo *TRI = ST.getRegisterInfo();
3726 
3727     unsigned Dst = MI.getOperand(0).getReg();
3728     unsigned Src0 = MI.getOperand(1).getReg();
3729     unsigned Src1 = MI.getOperand(2).getReg();
3730     const DebugLoc &DL = MI.getDebugLoc();
3731     unsigned SrcCond = MI.getOperand(3).getReg();
3732 
3733     unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3734     unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3735     const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
3736     unsigned SrcCondCopy = MRI.createVirtualRegister(CondRC);
3737 
3738     BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3739       .addReg(SrcCond);
3740     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3741       .addImm(0)
3742       .addReg(Src0, 0, AMDGPU::sub0)
3743       .addImm(0)
3744       .addReg(Src1, 0, AMDGPU::sub0)
3745       .addReg(SrcCondCopy);
3746     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3747       .addImm(0)
3748       .addReg(Src0, 0, AMDGPU::sub1)
3749       .addImm(0)
3750       .addReg(Src1, 0, AMDGPU::sub1)
3751       .addReg(SrcCondCopy);
3752 
3753     BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3754       .addReg(DstLo)
3755       .addImm(AMDGPU::sub0)
3756       .addReg(DstHi)
3757       .addImm(AMDGPU::sub1);
3758     MI.eraseFromParent();
3759     return BB;
3760   }
3761   case AMDGPU::SI_BR_UNDEF: {
3762     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3763     const DebugLoc &DL = MI.getDebugLoc();
3764     MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3765                            .add(MI.getOperand(0));
3766     Br->getOperand(1).setIsUndef(true); // read undef SCC
3767     MI.eraseFromParent();
3768     return BB;
3769   }
3770   case AMDGPU::ADJCALLSTACKUP:
3771   case AMDGPU::ADJCALLSTACKDOWN: {
3772     const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3773     MachineInstrBuilder MIB(*MF, &MI);
3774 
3775     // Add an implicit use of the frame offset reg to prevent the restore copy
3776     // inserted after the call from being reorderd after stack operations in the
3777     // the caller's frame.
3778     MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
3779         .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3780         .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
3781     return BB;
3782   }
3783   case AMDGPU::SI_CALL_ISEL: {
3784     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3785     const DebugLoc &DL = MI.getDebugLoc();
3786 
3787     unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
3788 
3789     MachineInstrBuilder MIB;
3790     MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
3791 
3792     for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
3793       MIB.add(MI.getOperand(I));
3794 
3795     MIB.cloneMemRefs(MI);
3796     MI.eraseFromParent();
3797     return BB;
3798   }
3799   case AMDGPU::V_ADD_I32_e32:
3800   case AMDGPU::V_SUB_I32_e32:
3801   case AMDGPU::V_SUBREV_I32_e32: {
3802     // TODO: Define distinct V_*_I32_Pseudo instructions instead.
3803     const DebugLoc &DL = MI.getDebugLoc();
3804     unsigned Opc = MI.getOpcode();
3805 
3806     bool NeedClampOperand = false;
3807     if (TII->pseudoToMCOpcode(Opc) == -1) {
3808       Opc = AMDGPU::getVOPe64(Opc);
3809       NeedClampOperand = true;
3810     }
3811 
3812     auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg());
3813     if (TII->isVOP3(*I)) {
3814       const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3815       const SIRegisterInfo *TRI = ST.getRegisterInfo();
3816       I.addReg(TRI->getVCC(), RegState::Define);
3817     }
3818     I.add(MI.getOperand(1))
3819      .add(MI.getOperand(2));
3820     if (NeedClampOperand)
3821       I.addImm(0); // clamp bit for e64 encoding
3822 
3823     TII->legalizeOperands(*I);
3824 
3825     MI.eraseFromParent();
3826     return BB;
3827   }
3828   case AMDGPU::DS_GWS_INIT:
3829   case AMDGPU::DS_GWS_SEMA_V:
3830   case AMDGPU::DS_GWS_SEMA_BR:
3831   case AMDGPU::DS_GWS_SEMA_P:
3832   case AMDGPU::DS_GWS_SEMA_RELEASE_ALL:
3833   case AMDGPU::DS_GWS_BARRIER:
3834     // A s_waitcnt 0 is required to be the instruction immediately following.
3835     if (getSubtarget()->hasGWSAutoReplay()) {
3836       bundleInstWithWaitcnt(MI);
3837       return BB;
3838     }
3839 
3840     return emitGWSMemViolTestLoop(MI, BB);
3841   default:
3842     return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
3843   }
3844 }
3845 
3846 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3847   return isTypeLegal(VT.getScalarType());
3848 }
3849 
3850 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3851   // This currently forces unfolding various combinations of fsub into fma with
3852   // free fneg'd operands. As long as we have fast FMA (controlled by
3853   // isFMAFasterThanFMulAndFAdd), we should perform these.
3854 
3855   // When fma is quarter rate, for f64 where add / sub are at best half rate,
3856   // most of these combines appear to be cycle neutral but save on instruction
3857   // count / code size.
3858   return true;
3859 }
3860 
3861 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3862                                          EVT VT) const {
3863   if (!VT.isVector()) {
3864     return MVT::i1;
3865   }
3866   return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
3867 }
3868 
3869 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3870   // TODO: Should i16 be used always if legal? For now it would force VALU
3871   // shifts.
3872   return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
3873 }
3874 
3875 // Answering this is somewhat tricky and depends on the specific device which
3876 // have different rates for fma or all f64 operations.
3877 //
3878 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3879 // regardless of which device (although the number of cycles differs between
3880 // devices), so it is always profitable for f64.
3881 //
3882 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3883 // only on full rate devices. Normally, we should prefer selecting v_mad_f32
3884 // which we can always do even without fused FP ops since it returns the same
3885 // result as the separate operations and since it is always full
3886 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3887 // however does not support denormals, so we do report fma as faster if we have
3888 // a fast fma device and require denormals.
3889 //
3890 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3891   VT = VT.getScalarType();
3892 
3893   switch (VT.getSimpleVT().SimpleTy) {
3894   case MVT::f32: {
3895     // This is as fast on some subtargets. However, we always have full rate f32
3896     // mad available which returns the same result as the separate operations
3897     // which we should prefer over fma. We can't use this if we want to support
3898     // denormals, so only report this in these cases.
3899     if (Subtarget->hasFP32Denormals())
3900       return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3901 
3902     // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3903     return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3904   }
3905   case MVT::f64:
3906     return true;
3907   case MVT::f16:
3908     return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
3909   default:
3910     break;
3911   }
3912 
3913   return false;
3914 }
3915 
3916 //===----------------------------------------------------------------------===//
3917 // Custom DAG Lowering Operations
3918 //===----------------------------------------------------------------------===//
3919 
3920 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3921 // wider vector type is legal.
3922 SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
3923                                              SelectionDAG &DAG) const {
3924   unsigned Opc = Op.getOpcode();
3925   EVT VT = Op.getValueType();
3926   assert(VT == MVT::v4f16);
3927 
3928   SDValue Lo, Hi;
3929   std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3930 
3931   SDLoc SL(Op);
3932   SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3933                              Op->getFlags());
3934   SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3935                              Op->getFlags());
3936 
3937   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3938 }
3939 
3940 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3941 // wider vector type is legal.
3942 SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
3943                                               SelectionDAG &DAG) const {
3944   unsigned Opc = Op.getOpcode();
3945   EVT VT = Op.getValueType();
3946   assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3947 
3948   SDValue Lo0, Hi0;
3949   std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3950   SDValue Lo1, Hi1;
3951   std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3952 
3953   SDLoc SL(Op);
3954 
3955   SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3956                              Op->getFlags());
3957   SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3958                              Op->getFlags());
3959 
3960   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3961 }
3962 
3963 SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op,
3964                                               SelectionDAG &DAG) const {
3965   unsigned Opc = Op.getOpcode();
3966   EVT VT = Op.getValueType();
3967   assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3968 
3969   SDValue Lo0, Hi0;
3970   std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3971   SDValue Lo1, Hi1;
3972   std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3973   SDValue Lo2, Hi2;
3974   std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2);
3975 
3976   SDLoc SL(Op);
3977 
3978   SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, Lo2,
3979                              Op->getFlags());
3980   SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, Hi2,
3981                              Op->getFlags());
3982 
3983   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3984 }
3985 
3986 
3987 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3988   switch (Op.getOpcode()) {
3989   default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
3990   case ISD::BRCOND: return LowerBRCOND(Op, DAG);
3991   case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
3992   case ISD::LOAD: {
3993     SDValue Result = LowerLOAD(Op, DAG);
3994     assert((!Result.getNode() ||
3995             Result.getNode()->getNumValues() == 2) &&
3996            "Load should return a value and a chain");
3997     return Result;
3998   }
3999 
4000   case ISD::FSIN:
4001   case ISD::FCOS:
4002     return LowerTrig(Op, DAG);
4003   case ISD::SELECT: return LowerSELECT(Op, DAG);
4004   case ISD::FDIV: return LowerFDIV(Op, DAG);
4005   case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
4006   case ISD::STORE: return LowerSTORE(Op, DAG);
4007   case ISD::GlobalAddress: {
4008     MachineFunction &MF = DAG.getMachineFunction();
4009     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
4010     return LowerGlobalAddress(MFI, Op, DAG);
4011   }
4012   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
4013   case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
4014   case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
4015   case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
4016   case ISD::INSERT_SUBVECTOR:
4017     return lowerINSERT_SUBVECTOR(Op, DAG);
4018   case ISD::INSERT_VECTOR_ELT:
4019     return lowerINSERT_VECTOR_ELT(Op, DAG);
4020   case ISD::EXTRACT_VECTOR_ELT:
4021     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
4022   case ISD::VECTOR_SHUFFLE:
4023     return lowerVECTOR_SHUFFLE(Op, DAG);
4024   case ISD::BUILD_VECTOR:
4025     return lowerBUILD_VECTOR(Op, DAG);
4026   case ISD::FP_ROUND:
4027     return lowerFP_ROUND(Op, DAG);
4028   case ISD::TRAP:
4029     return lowerTRAP(Op, DAG);
4030   case ISD::DEBUGTRAP:
4031     return lowerDEBUGTRAP(Op, DAG);
4032   case ISD::FABS:
4033   case ISD::FNEG:
4034   case ISD::FCANONICALIZE:
4035     return splitUnaryVectorOp(Op, DAG);
4036   case ISD::FMINNUM:
4037   case ISD::FMAXNUM:
4038     return lowerFMINNUM_FMAXNUM(Op, DAG);
4039   case ISD::FMA:
4040     return splitTernaryVectorOp(Op, DAG);
4041   case ISD::SHL:
4042   case ISD::SRA:
4043   case ISD::SRL:
4044   case ISD::ADD:
4045   case ISD::SUB:
4046   case ISD::MUL:
4047   case ISD::SMIN:
4048   case ISD::SMAX:
4049   case ISD::UMIN:
4050   case ISD::UMAX:
4051   case ISD::FADD:
4052   case ISD::FMUL:
4053   case ISD::FMINNUM_IEEE:
4054   case ISD::FMAXNUM_IEEE:
4055     return splitBinaryVectorOp(Op, DAG);
4056   }
4057   return SDValue();
4058 }
4059 
4060 static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
4061                                        const SDLoc &DL,
4062                                        SelectionDAG &DAG, bool Unpacked) {
4063   if (!LoadVT.isVector())
4064     return Result;
4065 
4066   if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
4067     // Truncate to v2i16/v4i16.
4068     EVT IntLoadVT = LoadVT.changeTypeToInteger();
4069 
4070     // Workaround legalizer not scalarizing truncate after vector op
4071     // legalization byt not creating intermediate vector trunc.
4072     SmallVector<SDValue, 4> Elts;
4073     DAG.ExtractVectorElements(Result, Elts);
4074     for (SDValue &Elt : Elts)
4075       Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
4076 
4077     Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
4078 
4079     // Bitcast to original type (v2f16/v4f16).
4080     return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
4081   }
4082 
4083   // Cast back to the original packed type.
4084   return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
4085 }
4086 
4087 SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
4088                                               MemSDNode *M,
4089                                               SelectionDAG &DAG,
4090                                               ArrayRef<SDValue> Ops,
4091                                               bool IsIntrinsic) const {
4092   SDLoc DL(M);
4093 
4094   bool Unpacked = Subtarget->hasUnpackedD16VMem();
4095   EVT LoadVT = M->getValueType(0);
4096 
4097   EVT EquivLoadVT = LoadVT;
4098   if (Unpacked && LoadVT.isVector()) {
4099     EquivLoadVT = LoadVT.isVector() ?
4100       EVT::getVectorVT(*DAG.getContext(), MVT::i32,
4101                        LoadVT.getVectorNumElements()) : LoadVT;
4102   }
4103 
4104   // Change from v4f16/v2f16 to EquivLoadVT.
4105   SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
4106 
4107   SDValue Load
4108     = DAG.getMemIntrinsicNode(
4109       IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
4110       VTList, Ops, M->getMemoryVT(),
4111       M->getMemOperand());
4112   if (!Unpacked) // Just adjusted the opcode.
4113     return Load;
4114 
4115   SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
4116 
4117   return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
4118 }
4119 
4120 static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI,
4121                                   SDNode *N, SelectionDAG &DAG) {
4122   EVT VT = N->getValueType(0);
4123   const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
4124   int CondCode = CD->getSExtValue();
4125   if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
4126       CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
4127     return DAG.getUNDEF(VT);
4128 
4129   ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
4130 
4131   SDValue LHS = N->getOperand(1);
4132   SDValue RHS = N->getOperand(2);
4133 
4134   SDLoc DL(N);
4135 
4136   EVT CmpVT = LHS.getValueType();
4137   if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
4138     unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
4139       ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4140     LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
4141     RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
4142   }
4143 
4144   ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
4145 
4146   unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
4147   EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
4148 
4149   SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS,
4150                               DAG.getCondCode(CCOpcode));
4151   if (VT.bitsEq(CCVT))
4152     return SetCC;
4153   return DAG.getZExtOrTrunc(SetCC, DL, VT);
4154 }
4155 
4156 static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
4157                                   SDNode *N, SelectionDAG &DAG) {
4158   EVT VT = N->getValueType(0);
4159   const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
4160 
4161   int CondCode = CD->getSExtValue();
4162   if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
4163       CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) {
4164     return DAG.getUNDEF(VT);
4165   }
4166 
4167   SDValue Src0 = N->getOperand(1);
4168   SDValue Src1 = N->getOperand(2);
4169   EVT CmpVT = Src0.getValueType();
4170   SDLoc SL(N);
4171 
4172   if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
4173     Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
4174     Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
4175   }
4176 
4177   FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
4178   ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
4179   unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
4180   EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
4181   SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0,
4182                               Src1, DAG.getCondCode(CCOpcode));
4183   if (VT.bitsEq(CCVT))
4184     return SetCC;
4185   return DAG.getZExtOrTrunc(SetCC, SL, VT);
4186 }
4187 
4188 void SITargetLowering::ReplaceNodeResults(SDNode *N,
4189                                           SmallVectorImpl<SDValue> &Results,
4190                                           SelectionDAG &DAG) const {
4191   switch (N->getOpcode()) {
4192   case ISD::INSERT_VECTOR_ELT: {
4193     if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
4194       Results.push_back(Res);
4195     return;
4196   }
4197   case ISD::EXTRACT_VECTOR_ELT: {
4198     if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
4199       Results.push_back(Res);
4200     return;
4201   }
4202   case ISD::INTRINSIC_WO_CHAIN: {
4203     unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
4204     switch (IID) {
4205     case Intrinsic::amdgcn_cvt_pkrtz: {
4206       SDValue Src0 = N->getOperand(1);
4207       SDValue Src1 = N->getOperand(2);
4208       SDLoc SL(N);
4209       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
4210                                 Src0, Src1);
4211       Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
4212       return;
4213     }
4214     case Intrinsic::amdgcn_cvt_pknorm_i16:
4215     case Intrinsic::amdgcn_cvt_pknorm_u16:
4216     case Intrinsic::amdgcn_cvt_pk_i16:
4217     case Intrinsic::amdgcn_cvt_pk_u16: {
4218       SDValue Src0 = N->getOperand(1);
4219       SDValue Src1 = N->getOperand(2);
4220       SDLoc SL(N);
4221       unsigned Opcode;
4222 
4223       if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
4224         Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
4225       else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
4226         Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
4227       else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
4228         Opcode = AMDGPUISD::CVT_PK_I16_I32;
4229       else
4230         Opcode = AMDGPUISD::CVT_PK_U16_U32;
4231 
4232       EVT VT = N->getValueType(0);
4233       if (isTypeLegal(VT))
4234         Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
4235       else {
4236         SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
4237         Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
4238       }
4239       return;
4240     }
4241     }
4242     break;
4243   }
4244   case ISD::INTRINSIC_W_CHAIN: {
4245     if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
4246       Results.push_back(Res);
4247       Results.push_back(Res.getValue(1));
4248       return;
4249     }
4250 
4251     break;
4252   }
4253   case ISD::SELECT: {
4254     SDLoc SL(N);
4255     EVT VT = N->getValueType(0);
4256     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
4257     SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
4258     SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
4259 
4260     EVT SelectVT = NewVT;
4261     if (NewVT.bitsLT(MVT::i32)) {
4262       LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
4263       RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
4264       SelectVT = MVT::i32;
4265     }
4266 
4267     SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
4268                                     N->getOperand(0), LHS, RHS);
4269 
4270     if (NewVT != SelectVT)
4271       NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
4272     Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
4273     return;
4274   }
4275   case ISD::FNEG: {
4276     if (N->getValueType(0) != MVT::v2f16)
4277       break;
4278 
4279     SDLoc SL(N);
4280     SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4281 
4282     SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
4283                              BC,
4284                              DAG.getConstant(0x80008000, SL, MVT::i32));
4285     Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4286     return;
4287   }
4288   case ISD::FABS: {
4289     if (N->getValueType(0) != MVT::v2f16)
4290       break;
4291 
4292     SDLoc SL(N);
4293     SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4294 
4295     SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
4296                              BC,
4297                              DAG.getConstant(0x7fff7fff, SL, MVT::i32));
4298     Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4299     return;
4300   }
4301   default:
4302     break;
4303   }
4304 }
4305 
4306 /// Helper function for LowerBRCOND
4307 static SDNode *findUser(SDValue Value, unsigned Opcode) {
4308 
4309   SDNode *Parent = Value.getNode();
4310   for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
4311        I != E; ++I) {
4312 
4313     if (I.getUse().get() != Value)
4314       continue;
4315 
4316     if (I->getOpcode() == Opcode)
4317       return *I;
4318   }
4319   return nullptr;
4320 }
4321 
4322 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
4323   if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
4324     switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
4325     case Intrinsic::amdgcn_if:
4326       return AMDGPUISD::IF;
4327     case Intrinsic::amdgcn_else:
4328       return AMDGPUISD::ELSE;
4329     case Intrinsic::amdgcn_loop:
4330       return AMDGPUISD::LOOP;
4331     case Intrinsic::amdgcn_end_cf:
4332       llvm_unreachable("should not occur");
4333     default:
4334       return 0;
4335     }
4336   }
4337 
4338   // break, if_break, else_break are all only used as inputs to loop, not
4339   // directly as branch conditions.
4340   return 0;
4341 }
4342 
4343 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
4344   const Triple &TT = getTargetMachine().getTargetTriple();
4345   return (GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4346           GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
4347          AMDGPU::shouldEmitConstantsToTextSection(TT);
4348 }
4349 
4350 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
4351   // FIXME: Either avoid relying on address space here or change the default
4352   // address space for functions to avoid the explicit check.
4353   return (GV->getValueType()->isFunctionTy() ||
4354           GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
4355           GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4356           GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
4357          !shouldEmitFixup(GV) &&
4358          !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
4359 }
4360 
4361 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
4362   return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
4363 }
4364 
4365 /// This transforms the control flow intrinsics to get the branch destination as
4366 /// last parameter, also switches branch target with BR if the need arise
4367 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
4368                                       SelectionDAG &DAG) const {
4369   SDLoc DL(BRCOND);
4370 
4371   SDNode *Intr = BRCOND.getOperand(1).getNode();
4372   SDValue Target = BRCOND.getOperand(2);
4373   SDNode *BR = nullptr;
4374   SDNode *SetCC = nullptr;
4375 
4376   if (Intr->getOpcode() == ISD::SETCC) {
4377     // As long as we negate the condition everything is fine
4378     SetCC = Intr;
4379     Intr = SetCC->getOperand(0).getNode();
4380 
4381   } else {
4382     // Get the target from BR if we don't negate the condition
4383     BR = findUser(BRCOND, ISD::BR);
4384     Target = BR->getOperand(1);
4385   }
4386 
4387   // FIXME: This changes the types of the intrinsics instead of introducing new
4388   // nodes with the correct types.
4389   // e.g. llvm.amdgcn.loop
4390 
4391   // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
4392   // =>     t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
4393 
4394   unsigned CFNode = isCFIntrinsic(Intr);
4395   if (CFNode == 0) {
4396     // This is a uniform branch so we don't need to legalize.
4397     return BRCOND;
4398   }
4399 
4400   bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
4401                    Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
4402 
4403   assert(!SetCC ||
4404         (SetCC->getConstantOperandVal(1) == 1 &&
4405          cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
4406                                                              ISD::SETNE));
4407 
4408   // operands of the new intrinsic call
4409   SmallVector<SDValue, 4> Ops;
4410   if (HaveChain)
4411     Ops.push_back(BRCOND.getOperand(0));
4412 
4413   Ops.append(Intr->op_begin() + (HaveChain ?  2 : 1), Intr->op_end());
4414   Ops.push_back(Target);
4415 
4416   ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
4417 
4418   // build the new intrinsic call
4419   SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
4420 
4421   if (!HaveChain) {
4422     SDValue Ops[] =  {
4423       SDValue(Result, 0),
4424       BRCOND.getOperand(0)
4425     };
4426 
4427     Result = DAG.getMergeValues(Ops, DL).getNode();
4428   }
4429 
4430   if (BR) {
4431     // Give the branch instruction our target
4432     SDValue Ops[] = {
4433       BR->getOperand(0),
4434       BRCOND.getOperand(2)
4435     };
4436     SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
4437     DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
4438     BR = NewBR.getNode();
4439   }
4440 
4441   SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
4442 
4443   // Copy the intrinsic results to registers
4444   for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
4445     SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
4446     if (!CopyToReg)
4447       continue;
4448 
4449     Chain = DAG.getCopyToReg(
4450       Chain, DL,
4451       CopyToReg->getOperand(1),
4452       SDValue(Result, i - 1),
4453       SDValue());
4454 
4455     DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
4456   }
4457 
4458   // Remove the old intrinsic from the chain
4459   DAG.ReplaceAllUsesOfValueWith(
4460     SDValue(Intr, Intr->getNumValues() - 1),
4461     Intr->getOperand(0));
4462 
4463   return Chain;
4464 }
4465 
4466 SDValue SITargetLowering::LowerRETURNADDR(SDValue Op,
4467                                           SelectionDAG &DAG) const {
4468   MVT VT = Op.getSimpleValueType();
4469   SDLoc DL(Op);
4470   // Checking the depth
4471   if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0)
4472     return DAG.getConstant(0, DL, VT);
4473 
4474   MachineFunction &MF = DAG.getMachineFunction();
4475   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4476   // Check for kernel and shader functions
4477   if (Info->isEntryFunction())
4478     return DAG.getConstant(0, DL, VT);
4479 
4480   MachineFrameInfo &MFI = MF.getFrameInfo();
4481   // There is a call to @llvm.returnaddress in this function
4482   MFI.setReturnAddressIsTaken(true);
4483 
4484   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
4485   // Get the return address reg and mark it as an implicit live-in
4486   unsigned Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent()));
4487 
4488   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
4489 }
4490 
4491 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
4492                                             SDValue Op,
4493                                             const SDLoc &DL,
4494                                             EVT VT) const {
4495   return Op.getValueType().bitsLE(VT) ?
4496       DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
4497       DAG.getNode(ISD::FTRUNC, DL, VT, Op);
4498 }
4499 
4500 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
4501   assert(Op.getValueType() == MVT::f16 &&
4502          "Do not know how to custom lower FP_ROUND for non-f16 type");
4503 
4504   SDValue Src = Op.getOperand(0);
4505   EVT SrcVT = Src.getValueType();
4506   if (SrcVT != MVT::f64)
4507     return Op;
4508 
4509   SDLoc DL(Op);
4510 
4511   SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
4512   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
4513   return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
4514 }
4515 
4516 SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
4517                                                SelectionDAG &DAG) const {
4518   EVT VT = Op.getValueType();
4519   const MachineFunction &MF = DAG.getMachineFunction();
4520   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4521   bool IsIEEEMode = Info->getMode().IEEE;
4522 
4523   // FIXME: Assert during eslection that this is only selected for
4524   // ieee_mode. Currently a combine can produce the ieee version for non-ieee
4525   // mode functions, but this happens to be OK since it's only done in cases
4526   // where there is known no sNaN.
4527   if (IsIEEEMode)
4528     return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
4529 
4530   if (VT == MVT::v4f16)
4531     return splitBinaryVectorOp(Op, DAG);
4532   return Op;
4533 }
4534 
4535 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
4536   SDLoc SL(Op);
4537   SDValue Chain = Op.getOperand(0);
4538 
4539   if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4540       !Subtarget->isTrapHandlerEnabled())
4541     return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
4542 
4543   MachineFunction &MF = DAG.getMachineFunction();
4544   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4545   unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4546   assert(UserSGPR != AMDGPU::NoRegister);
4547   SDValue QueuePtr = CreateLiveInRegister(
4548     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4549   SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4550   SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4551                                    QueuePtr, SDValue());
4552   SDValue Ops[] = {
4553     ToReg,
4554     DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16),
4555     SGPR01,
4556     ToReg.getValue(1)
4557   };
4558   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4559 }
4560 
4561 SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4562   SDLoc SL(Op);
4563   SDValue Chain = Op.getOperand(0);
4564   MachineFunction &MF = DAG.getMachineFunction();
4565 
4566   if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4567       !Subtarget->isTrapHandlerEnabled()) {
4568     DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
4569                                      "debugtrap handler not supported",
4570                                      Op.getDebugLoc(),
4571                                      DS_Warning);
4572     LLVMContext &Ctx = MF.getFunction().getContext();
4573     Ctx.diagnose(NoTrap);
4574     return Chain;
4575   }
4576 
4577   SDValue Ops[] = {
4578     Chain,
4579     DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16)
4580   };
4581   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4582 }
4583 
4584 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
4585                                              SelectionDAG &DAG) const {
4586   // FIXME: Use inline constants (src_{shared, private}_base) instead.
4587   if (Subtarget->hasApertureRegs()) {
4588     unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
4589         AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
4590         AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
4591     unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
4592         AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
4593         AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
4594     unsigned Encoding =
4595         AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
4596         Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4597         WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
4598 
4599     SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4600     SDValue ApertureReg = SDValue(
4601         DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4602     SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4603     return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
4604   }
4605 
4606   MachineFunction &MF = DAG.getMachineFunction();
4607   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4608   unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4609   assert(UserSGPR != AMDGPU::NoRegister);
4610 
4611   SDValue QueuePtr = CreateLiveInRegister(
4612     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4613 
4614   // Offset into amd_queue_t for group_segment_aperture_base_hi /
4615   // private_segment_aperture_base_hi.
4616   uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
4617 
4618   SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
4619 
4620   // TODO: Use custom target PseudoSourceValue.
4621   // TODO: We should use the value from the IR intrinsic call, but it might not
4622   // be available and how do we get it?
4623   Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
4624                                               AMDGPUAS::CONSTANT_ADDRESS));
4625 
4626   MachinePointerInfo PtrInfo(V, StructOffset);
4627   return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
4628                      MinAlign(64, StructOffset),
4629                      MachineMemOperand::MODereferenceable |
4630                          MachineMemOperand::MOInvariant);
4631 }
4632 
4633 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4634                                              SelectionDAG &DAG) const {
4635   SDLoc SL(Op);
4636   const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4637 
4638   SDValue Src = ASC->getOperand(0);
4639   SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4640 
4641   const AMDGPUTargetMachine &TM =
4642     static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4643 
4644   // flat -> local/private
4645   if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
4646     unsigned DestAS = ASC->getDestAddressSpace();
4647 
4648     if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
4649         DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
4650       unsigned NullVal = TM.getNullPointerValue(DestAS);
4651       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4652       SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4653       SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4654 
4655       return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4656                          NonNull, Ptr, SegmentNullPtr);
4657     }
4658   }
4659 
4660   // local/private -> flat
4661   if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
4662     unsigned SrcAS = ASC->getSrcAddressSpace();
4663 
4664     if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
4665         SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
4666       unsigned NullVal = TM.getNullPointerValue(SrcAS);
4667       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4668 
4669       SDValue NonNull
4670         = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4671 
4672       SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
4673       SDValue CvtPtr
4674         = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4675 
4676       return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4677                          DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4678                          FlatNullPtr);
4679     }
4680   }
4681 
4682   // global <-> flat are no-ops and never emitted.
4683 
4684   const MachineFunction &MF = DAG.getMachineFunction();
4685   DiagnosticInfoUnsupported InvalidAddrSpaceCast(
4686     MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
4687   DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4688 
4689   return DAG.getUNDEF(ASC->getValueType(0));
4690 }
4691 
4692 // This lowers an INSERT_SUBVECTOR by extracting the individual elements from
4693 // the small vector and inserting them into the big vector. That is better than
4694 // the default expansion of doing it via a stack slot. Even though the use of
4695 // the stack slot would be optimized away afterwards, the stack slot itself
4696 // remains.
4697 SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
4698                                                 SelectionDAG &DAG) const {
4699   SDValue Vec = Op.getOperand(0);
4700   SDValue Ins = Op.getOperand(1);
4701   SDValue Idx = Op.getOperand(2);
4702   EVT VecVT = Vec.getValueType();
4703   EVT InsVT = Ins.getValueType();
4704   EVT EltVT = VecVT.getVectorElementType();
4705   unsigned InsNumElts = InsVT.getVectorNumElements();
4706   unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
4707   SDLoc SL(Op);
4708 
4709   for (unsigned I = 0; I != InsNumElts; ++I) {
4710     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins,
4711                               DAG.getConstant(I, SL, MVT::i32));
4712     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt,
4713                       DAG.getConstant(IdxVal + I, SL, MVT::i32));
4714   }
4715   return Vec;
4716 }
4717 
4718 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4719                                                  SelectionDAG &DAG) const {
4720   SDValue Vec = Op.getOperand(0);
4721   SDValue InsVal = Op.getOperand(1);
4722   SDValue Idx = Op.getOperand(2);
4723   EVT VecVT = Vec.getValueType();
4724   EVT EltVT = VecVT.getVectorElementType();
4725   unsigned VecSize = VecVT.getSizeInBits();
4726   unsigned EltSize = EltVT.getSizeInBits();
4727 
4728 
4729   assert(VecSize <= 64);
4730 
4731   unsigned NumElts = VecVT.getVectorNumElements();
4732   SDLoc SL(Op);
4733   auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4734 
4735   if (NumElts == 4 && EltSize == 16 && KIdx) {
4736     SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4737 
4738     SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4739                                  DAG.getConstant(0, SL, MVT::i32));
4740     SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4741                                  DAG.getConstant(1, SL, MVT::i32));
4742 
4743     SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4744     SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4745 
4746     unsigned Idx = KIdx->getZExtValue();
4747     bool InsertLo = Idx < 2;
4748     SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4749       InsertLo ? LoVec : HiVec,
4750       DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4751       DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4752 
4753     InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4754 
4755     SDValue Concat = InsertLo ?
4756       DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4757       DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4758 
4759     return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4760   }
4761 
4762   if (isa<ConstantSDNode>(Idx))
4763     return SDValue();
4764 
4765   MVT IntVT = MVT::getIntegerVT(VecSize);
4766 
4767   // Avoid stack access for dynamic indexing.
4768   // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
4769 
4770   // Create a congruent vector with the target value in each element so that
4771   // the required element can be masked and ORed into the target vector.
4772   SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT,
4773                                DAG.getSplatBuildVector(VecVT, SL, InsVal));
4774 
4775   assert(isPowerOf2_32(EltSize));
4776   SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4777 
4778   // Convert vector index to bit-index.
4779   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
4780 
4781   SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4782   SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
4783                             DAG.getConstant(0xffff, SL, IntVT),
4784                             ScaledIdx);
4785 
4786   SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
4787   SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
4788                             DAG.getNOT(SL, BFM, IntVT), BCVec);
4789 
4790   SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
4791   return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
4792 }
4793 
4794 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4795                                                   SelectionDAG &DAG) const {
4796   SDLoc SL(Op);
4797 
4798   EVT ResultVT = Op.getValueType();
4799   SDValue Vec = Op.getOperand(0);
4800   SDValue Idx = Op.getOperand(1);
4801   EVT VecVT = Vec.getValueType();
4802   unsigned VecSize = VecVT.getSizeInBits();
4803   EVT EltVT = VecVT.getVectorElementType();
4804   assert(VecSize <= 64);
4805 
4806   DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4807 
4808   // Make sure we do any optimizations that will make it easier to fold
4809   // source modifiers before obscuring it with bit operations.
4810 
4811   // XXX - Why doesn't this get called when vector_shuffle is expanded?
4812   if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4813     return Combined;
4814 
4815   unsigned EltSize = EltVT.getSizeInBits();
4816   assert(isPowerOf2_32(EltSize));
4817 
4818   MVT IntVT = MVT::getIntegerVT(VecSize);
4819   SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4820 
4821   // Convert vector index to bit-index (* EltSize)
4822   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
4823 
4824   SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4825   SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
4826 
4827   if (ResultVT == MVT::f16) {
4828     SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
4829     return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4830   }
4831 
4832   return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
4833 }
4834 
4835 static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) {
4836   assert(Elt % 2 == 0);
4837   return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0);
4838 }
4839 
4840 SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
4841                                               SelectionDAG &DAG) const {
4842   SDLoc SL(Op);
4843   EVT ResultVT = Op.getValueType();
4844   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
4845 
4846   EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16;
4847   EVT EltVT = PackVT.getVectorElementType();
4848   int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements();
4849 
4850   // vector_shuffle <0,1,6,7> lhs, rhs
4851   // -> concat_vectors (extract_subvector lhs, 0), (extract_subvector rhs, 2)
4852   //
4853   // vector_shuffle <6,7,2,3> lhs, rhs
4854   // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 2)
4855   //
4856   // vector_shuffle <6,7,0,1> lhs, rhs
4857   // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 0)
4858 
4859   // Avoid scalarizing when both halves are reading from consecutive elements.
4860   SmallVector<SDValue, 4> Pieces;
4861   for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) {
4862     if (elementPairIsContiguous(SVN->getMask(), I)) {
4863       const int Idx = SVN->getMaskElt(I);
4864       int VecIdx = Idx < SrcNumElts ? 0 : 1;
4865       int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts;
4866       SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL,
4867                                     PackVT, SVN->getOperand(VecIdx),
4868                                     DAG.getConstant(EltIdx, SL, MVT::i32));
4869       Pieces.push_back(SubVec);
4870     } else {
4871       const int Idx0 = SVN->getMaskElt(I);
4872       const int Idx1 = SVN->getMaskElt(I + 1);
4873       int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1;
4874       int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1;
4875       int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts;
4876       int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts;
4877 
4878       SDValue Vec0 = SVN->getOperand(VecIdx0);
4879       SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
4880                                  Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32));
4881 
4882       SDValue Vec1 = SVN->getOperand(VecIdx1);
4883       SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
4884                                  Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32));
4885       Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 }));
4886     }
4887   }
4888 
4889   return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces);
4890 }
4891 
4892 SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
4893                                             SelectionDAG &DAG) const {
4894   SDLoc SL(Op);
4895   EVT VT = Op.getValueType();
4896 
4897   if (VT == MVT::v4i16 || VT == MVT::v4f16) {
4898     EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2);
4899 
4900     // Turn into pair of packed build_vectors.
4901     // TODO: Special case for constants that can be materialized with s_mov_b64.
4902     SDValue Lo = DAG.getBuildVector(HalfVT, SL,
4903                                     { Op.getOperand(0), Op.getOperand(1) });
4904     SDValue Hi = DAG.getBuildVector(HalfVT, SL,
4905                                     { Op.getOperand(2), Op.getOperand(3) });
4906 
4907     SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo);
4908     SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi);
4909 
4910     SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi });
4911     return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
4912   }
4913 
4914   assert(VT == MVT::v2f16 || VT == MVT::v2i16);
4915   assert(!Subtarget->hasVOP3PInsts() && "this should be legal");
4916 
4917   SDValue Lo = Op.getOperand(0);
4918   SDValue Hi = Op.getOperand(1);
4919 
4920   // Avoid adding defined bits with the zero_extend.
4921   if (Hi.isUndef()) {
4922     Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4923     SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo);
4924     return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo);
4925   }
4926 
4927   Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
4928   Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
4929 
4930   SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
4931                               DAG.getConstant(16, SL, MVT::i32));
4932   if (Lo.isUndef())
4933     return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi);
4934 
4935   Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4936   Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
4937 
4938   SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
4939   return DAG.getNode(ISD::BITCAST, SL, VT, Or);
4940 }
4941 
4942 bool
4943 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4944   // We can fold offsets for anything that doesn't require a GOT relocation.
4945   return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
4946           GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4947           GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
4948          !shouldEmitGOTReloc(GA->getGlobal());
4949 }
4950 
4951 static SDValue
4952 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
4953                         const SDLoc &DL, unsigned Offset, EVT PtrVT,
4954                         unsigned GAFlags = SIInstrInfo::MO_NONE) {
4955   // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
4956   // lowered to the following code sequence:
4957   //
4958   // For constant address space:
4959   //   s_getpc_b64 s[0:1]
4960   //   s_add_u32 s0, s0, $symbol
4961   //   s_addc_u32 s1, s1, 0
4962   //
4963   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
4964   //   a fixup or relocation is emitted to replace $symbol with a literal
4965   //   constant, which is a pc-relative offset from the encoding of the $symbol
4966   //   operand to the global variable.
4967   //
4968   // For global address space:
4969   //   s_getpc_b64 s[0:1]
4970   //   s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
4971   //   s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
4972   //
4973   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
4974   //   fixups or relocations are emitted to replace $symbol@*@lo and
4975   //   $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
4976   //   which is a 64-bit pc-relative offset from the encoding of the $symbol
4977   //   operand to the global variable.
4978   //
4979   // What we want here is an offset from the value returned by s_getpc
4980   // (which is the address of the s_add_u32 instruction) to the global
4981   // variable, but since the encoding of $symbol starts 4 bytes after the start
4982   // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
4983   // small. This requires us to add 4 to the global variable offset in order to
4984   // compute the correct address.
4985   unsigned LoFlags = GAFlags;
4986   if (LoFlags == SIInstrInfo::MO_NONE)
4987     LoFlags = SIInstrInfo::MO_REL32;
4988   SDValue PtrLo =
4989       DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, LoFlags);
4990   SDValue PtrHi;
4991   if (GAFlags == SIInstrInfo::MO_NONE) {
4992     PtrHi = DAG.getTargetConstant(0, DL, MVT::i32);
4993   } else {
4994     PtrHi =
4995         DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags + 1);
4996   }
4997   return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
4998 }
4999 
5000 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
5001                                              SDValue Op,
5002                                              SelectionDAG &DAG) const {
5003   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
5004   const GlobalValue *GV = GSD->getGlobal();
5005   if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
5006        (!GV->hasExternalLinkage() ||
5007         getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||
5008         getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL)) ||
5009       GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS ||
5010       GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS)
5011     return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
5012 
5013   SDLoc DL(GSD);
5014   EVT PtrVT = Op.getValueType();
5015 
5016   if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
5017     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(),
5018                                             SIInstrInfo::MO_ABS32_LO);
5019     return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA);
5020   }
5021 
5022   if (shouldEmitFixup(GV))
5023     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
5024   else if (shouldEmitPCReloc(GV))
5025     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
5026                                    SIInstrInfo::MO_REL32);
5027 
5028   SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
5029                                             SIInstrInfo::MO_GOTPCREL32);
5030 
5031   Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
5032   PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
5033   const DataLayout &DataLayout = DAG.getDataLayout();
5034   unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
5035   MachinePointerInfo PtrInfo
5036     = MachinePointerInfo::getGOT(DAG.getMachineFunction());
5037 
5038   return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
5039                      MachineMemOperand::MODereferenceable |
5040                          MachineMemOperand::MOInvariant);
5041 }
5042 
5043 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
5044                                    const SDLoc &DL, SDValue V) const {
5045   // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
5046   // the destination register.
5047   //
5048   // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
5049   // so we will end up with redundant moves to m0.
5050   //
5051   // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
5052 
5053   // A Null SDValue creates a glue result.
5054   SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
5055                                   V, Chain);
5056   return SDValue(M0, 0);
5057 }
5058 
5059 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
5060                                                  SDValue Op,
5061                                                  MVT VT,
5062                                                  unsigned Offset) const {
5063   SDLoc SL(Op);
5064   SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
5065                                            DAG.getEntryNode(), Offset, 4, false);
5066   // The local size values will have the hi 16-bits as zero.
5067   return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
5068                      DAG.getValueType(VT));
5069 }
5070 
5071 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
5072                                         EVT VT) {
5073   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
5074                                       "non-hsa intrinsic with hsa target",
5075                                       DL.getDebugLoc());
5076   DAG.getContext()->diagnose(BadIntrin);
5077   return DAG.getUNDEF(VT);
5078 }
5079 
5080 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
5081                                          EVT VT) {
5082   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
5083                                       "intrinsic not supported on subtarget",
5084                                       DL.getDebugLoc());
5085   DAG.getContext()->diagnose(BadIntrin);
5086   return DAG.getUNDEF(VT);
5087 }
5088 
5089 static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
5090                                     ArrayRef<SDValue> Elts) {
5091   assert(!Elts.empty());
5092   MVT Type;
5093   unsigned NumElts;
5094 
5095   if (Elts.size() == 1) {
5096     Type = MVT::f32;
5097     NumElts = 1;
5098   } else if (Elts.size() == 2) {
5099     Type = MVT::v2f32;
5100     NumElts = 2;
5101   } else if (Elts.size() <= 4) {
5102     Type = MVT::v4f32;
5103     NumElts = 4;
5104   } else if (Elts.size() <= 8) {
5105     Type = MVT::v8f32;
5106     NumElts = 8;
5107   } else {
5108     assert(Elts.size() <= 16);
5109     Type = MVT::v16f32;
5110     NumElts = 16;
5111   }
5112 
5113   SmallVector<SDValue, 16> VecElts(NumElts);
5114   for (unsigned i = 0; i < Elts.size(); ++i) {
5115     SDValue Elt = Elts[i];
5116     if (Elt.getValueType() != MVT::f32)
5117       Elt = DAG.getBitcast(MVT::f32, Elt);
5118     VecElts[i] = Elt;
5119   }
5120   for (unsigned i = Elts.size(); i < NumElts; ++i)
5121     VecElts[i] = DAG.getUNDEF(MVT::f32);
5122 
5123   if (NumElts == 1)
5124     return VecElts[0];
5125   return DAG.getBuildVector(Type, DL, VecElts);
5126 }
5127 
5128 static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG,
5129                              SDValue *GLC, SDValue *SLC, SDValue *DLC) {
5130   auto CachePolicyConst = cast<ConstantSDNode>(CachePolicy.getNode());
5131 
5132   uint64_t Value = CachePolicyConst->getZExtValue();
5133   SDLoc DL(CachePolicy);
5134   if (GLC) {
5135     *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
5136     Value &= ~(uint64_t)0x1;
5137   }
5138   if (SLC) {
5139     *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
5140     Value &= ~(uint64_t)0x2;
5141   }
5142   if (DLC) {
5143     *DLC = DAG.getTargetConstant((Value & 0x4) ? 1 : 0, DL, MVT::i32);
5144     Value &= ~(uint64_t)0x4;
5145   }
5146 
5147   return Value == 0;
5148 }
5149 
5150 // Re-construct the required return value for a image load intrinsic.
5151 // This is more complicated due to the optional use TexFailCtrl which means the required
5152 // return type is an aggregate
5153 static SDValue constructRetValue(SelectionDAG &DAG,
5154                                  MachineSDNode *Result,
5155                                  ArrayRef<EVT> ResultTypes,
5156                                  bool IsTexFail, bool Unpacked, bool IsD16,
5157                                  int DMaskPop, int NumVDataDwords,
5158                                  const SDLoc &DL, LLVMContext &Context) {
5159   // Determine the required return type. This is the same regardless of IsTexFail flag
5160   EVT ReqRetVT = ResultTypes[0];
5161   EVT ReqRetEltVT = ReqRetVT.isVector() ? ReqRetVT.getVectorElementType() : ReqRetVT;
5162   int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1;
5163   EVT AdjEltVT = Unpacked && IsD16 ? MVT::i32 : ReqRetEltVT;
5164   EVT AdjVT = Unpacked ? ReqRetNumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, ReqRetNumElts)
5165                                            : AdjEltVT
5166                        : ReqRetVT;
5167 
5168   // Extract data part of the result
5169   // Bitcast the result to the same type as the required return type
5170   int NumElts;
5171   if (IsD16 && !Unpacked)
5172     NumElts = NumVDataDwords << 1;
5173   else
5174     NumElts = NumVDataDwords;
5175 
5176   EVT CastVT = NumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, NumElts)
5177                            : AdjEltVT;
5178 
5179   // Special case for v6f16. Rather than add support for this, use v3i32 to
5180   // extract the data elements
5181   bool V6F16Special = false;
5182   if (NumElts == 6) {
5183     CastVT = EVT::getVectorVT(Context, MVT::i32, NumElts / 2);
5184     DMaskPop >>= 1;
5185     ReqRetNumElts >>= 1;
5186     V6F16Special = true;
5187     AdjVT = MVT::v2i32;
5188   }
5189 
5190   SDValue N = SDValue(Result, 0);
5191   SDValue CastRes = DAG.getNode(ISD::BITCAST, DL, CastVT, N);
5192 
5193   // Iterate over the result
5194   SmallVector<SDValue, 4> BVElts;
5195 
5196   if (CastVT.isVector()) {
5197     DAG.ExtractVectorElements(CastRes, BVElts, 0, DMaskPop);
5198   } else {
5199     BVElts.push_back(CastRes);
5200   }
5201   int ExtraElts = ReqRetNumElts - DMaskPop;
5202   while(ExtraElts--)
5203     BVElts.push_back(DAG.getUNDEF(AdjEltVT));
5204 
5205   SDValue PreTFCRes;
5206   if (ReqRetNumElts > 1) {
5207     SDValue NewVec = DAG.getBuildVector(AdjVT, DL, BVElts);
5208     if (IsD16 && Unpacked)
5209       PreTFCRes = adjustLoadValueTypeImpl(NewVec, ReqRetVT, DL, DAG, Unpacked);
5210     else
5211       PreTFCRes = NewVec;
5212   } else {
5213     PreTFCRes = BVElts[0];
5214   }
5215 
5216   if (V6F16Special)
5217     PreTFCRes = DAG.getNode(ISD::BITCAST, DL, MVT::v4f16, PreTFCRes);
5218 
5219   if (!IsTexFail) {
5220     if (Result->getNumValues() > 1)
5221       return DAG.getMergeValues({PreTFCRes, SDValue(Result, 1)}, DL);
5222     else
5223       return PreTFCRes;
5224   }
5225 
5226   // Extract the TexFail result and insert into aggregate return
5227   SmallVector<SDValue, 1> TFCElt;
5228   DAG.ExtractVectorElements(N, TFCElt, DMaskPop, 1);
5229   SDValue TFCRes = DAG.getNode(ISD::BITCAST, DL, ResultTypes[1], TFCElt[0]);
5230   return DAG.getMergeValues({PreTFCRes, TFCRes, SDValue(Result, 1)}, DL);
5231 }
5232 
5233 static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE,
5234                          SDValue *LWE, bool &IsTexFail) {
5235   auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode());
5236 
5237   uint64_t Value = TexFailCtrlConst->getZExtValue();
5238   if (Value) {
5239     IsTexFail = true;
5240   }
5241 
5242   SDLoc DL(TexFailCtrlConst);
5243   *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
5244   Value &= ~(uint64_t)0x1;
5245   *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
5246   Value &= ~(uint64_t)0x2;
5247 
5248   return Value == 0;
5249 }
5250 
5251 SDValue SITargetLowering::lowerImage(SDValue Op,
5252                                      const AMDGPU::ImageDimIntrinsicInfo *Intr,
5253                                      SelectionDAG &DAG) const {
5254   SDLoc DL(Op);
5255   MachineFunction &MF = DAG.getMachineFunction();
5256   const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>();
5257   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
5258       AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
5259   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
5260   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
5261       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
5262   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
5263       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
5264   unsigned IntrOpcode = Intr->BaseOpcode;
5265   bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10;
5266 
5267   SmallVector<EVT, 3> ResultTypes(Op->value_begin(), Op->value_end());
5268   SmallVector<EVT, 3> OrigResultTypes(Op->value_begin(), Op->value_end());
5269   bool IsD16 = false;
5270   bool IsA16 = false;
5271   SDValue VData;
5272   int NumVDataDwords;
5273   bool AdjustRetType = false;
5274 
5275   unsigned AddrIdx; // Index of first address argument
5276   unsigned DMask;
5277   unsigned DMaskLanes = 0;
5278 
5279   if (BaseOpcode->Atomic) {
5280     VData = Op.getOperand(2);
5281 
5282     bool Is64Bit = VData.getValueType() == MVT::i64;
5283     if (BaseOpcode->AtomicX2) {
5284       SDValue VData2 = Op.getOperand(3);
5285       VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL,
5286                                  {VData, VData2});
5287       if (Is64Bit)
5288         VData = DAG.getBitcast(MVT::v4i32, VData);
5289 
5290       ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32;
5291       DMask = Is64Bit ? 0xf : 0x3;
5292       NumVDataDwords = Is64Bit ? 4 : 2;
5293       AddrIdx = 4;
5294     } else {
5295       DMask = Is64Bit ? 0x3 : 0x1;
5296       NumVDataDwords = Is64Bit ? 2 : 1;
5297       AddrIdx = 3;
5298     }
5299   } else {
5300     unsigned DMaskIdx = BaseOpcode->Store ? 3 : isa<MemSDNode>(Op) ? 2 : 1;
5301     auto DMaskConst = cast<ConstantSDNode>(Op.getOperand(DMaskIdx));
5302     DMask = DMaskConst->getZExtValue();
5303     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
5304 
5305     if (BaseOpcode->Store) {
5306       VData = Op.getOperand(2);
5307 
5308       MVT StoreVT = VData.getSimpleValueType();
5309       if (StoreVT.getScalarType() == MVT::f16) {
5310         if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
5311           return Op; // D16 is unsupported for this instruction
5312 
5313         IsD16 = true;
5314         VData = handleD16VData(VData, DAG);
5315       }
5316 
5317       NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
5318     } else {
5319       // Work out the num dwords based on the dmask popcount and underlying type
5320       // and whether packing is supported.
5321       MVT LoadVT = ResultTypes[0].getSimpleVT();
5322       if (LoadVT.getScalarType() == MVT::f16) {
5323         if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
5324           return Op; // D16 is unsupported for this instruction
5325 
5326         IsD16 = true;
5327       }
5328 
5329       // Confirm that the return type is large enough for the dmask specified
5330       if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) ||
5331           (!LoadVT.isVector() && DMaskLanes > 1))
5332           return Op;
5333 
5334       if (IsD16 && !Subtarget->hasUnpackedD16VMem())
5335         NumVDataDwords = (DMaskLanes + 1) / 2;
5336       else
5337         NumVDataDwords = DMaskLanes;
5338 
5339       AdjustRetType = true;
5340     }
5341 
5342     AddrIdx = DMaskIdx + 1;
5343   }
5344 
5345   unsigned NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0;
5346   unsigned NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0;
5347   unsigned NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0;
5348   unsigned NumVAddrs = BaseOpcode->NumExtraArgs + NumGradients +
5349                        NumCoords + NumLCM;
5350   unsigned NumMIVAddrs = NumVAddrs;
5351 
5352   SmallVector<SDValue, 4> VAddrs;
5353 
5354   // Optimize _L to _LZ when _L is zero
5355   if (LZMappingInfo) {
5356     if (auto ConstantLod =
5357          dyn_cast<ConstantFPSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
5358       if (ConstantLod->isZero() || ConstantLod->isNegative()) {
5359         IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
5360         NumMIVAddrs--;               // remove 'lod'
5361       }
5362     }
5363   }
5364 
5365   // Optimize _mip away, when 'lod' is zero
5366   if (MIPMappingInfo) {
5367     if (auto ConstantLod =
5368          dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
5369       if (ConstantLod->isNullValue()) {
5370         IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
5371         NumMIVAddrs--;               // remove 'lod'
5372       }
5373     }
5374   }
5375 
5376   // Check for 16 bit addresses and pack if true.
5377   unsigned DimIdx = AddrIdx + BaseOpcode->NumExtraArgs;
5378   MVT VAddrVT = Op.getOperand(DimIdx).getSimpleValueType();
5379   const MVT VAddrScalarVT = VAddrVT.getScalarType();
5380   if (((VAddrScalarVT == MVT::f16) || (VAddrScalarVT == MVT::i16)) &&
5381       ST->hasFeature(AMDGPU::FeatureR128A16)) {
5382     IsA16 = true;
5383     const MVT VectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16;
5384     for (unsigned i = AddrIdx; i < (AddrIdx + NumMIVAddrs); ++i) {
5385       SDValue AddrLo, AddrHi;
5386       // Push back extra arguments.
5387       if (i < DimIdx) {
5388         AddrLo = Op.getOperand(i);
5389       } else {
5390         AddrLo = Op.getOperand(i);
5391         // Dz/dh, dz/dv and the last odd coord are packed with undef. Also,
5392         // in 1D, derivatives dx/dh and dx/dv are packed with undef.
5393         if (((i + 1) >= (AddrIdx + NumMIVAddrs)) ||
5394             ((NumGradients / 2) % 2 == 1 &&
5395             (i == DimIdx + (NumGradients / 2) - 1 ||
5396              i == DimIdx + NumGradients - 1))) {
5397           AddrHi = DAG.getUNDEF(MVT::f16);
5398         } else {
5399           AddrHi = Op.getOperand(i + 1);
5400           i++;
5401         }
5402         AddrLo = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VectorVT,
5403                              {AddrLo, AddrHi});
5404         AddrLo = DAG.getBitcast(MVT::i32, AddrLo);
5405       }
5406       VAddrs.push_back(AddrLo);
5407     }
5408   } else {
5409     for (unsigned i = 0; i < NumMIVAddrs; ++i)
5410       VAddrs.push_back(Op.getOperand(AddrIdx + i));
5411   }
5412 
5413   // If the register allocator cannot place the address registers contiguously
5414   // without introducing moves, then using the non-sequential address encoding
5415   // is always preferable, since it saves VALU instructions and is usually a
5416   // wash in terms of code size or even better.
5417   //
5418   // However, we currently have no way of hinting to the register allocator that
5419   // MIMG addresses should be placed contiguously when it is possible to do so,
5420   // so force non-NSA for the common 2-address case as a heuristic.
5421   //
5422   // SIShrinkInstructions will convert NSA encodings to non-NSA after register
5423   // allocation when possible.
5424   bool UseNSA =
5425       ST->hasFeature(AMDGPU::FeatureNSAEncoding) && VAddrs.size() >= 3;
5426   SDValue VAddr;
5427   if (!UseNSA)
5428     VAddr = getBuildDwordsVector(DAG, DL, VAddrs);
5429 
5430   SDValue True = DAG.getTargetConstant(1, DL, MVT::i1);
5431   SDValue False = DAG.getTargetConstant(0, DL, MVT::i1);
5432   unsigned CtrlIdx; // Index of texfailctrl argument
5433   SDValue Unorm;
5434   if (!BaseOpcode->Sampler) {
5435     Unorm = True;
5436     CtrlIdx = AddrIdx + NumVAddrs + 1;
5437   } else {
5438     auto UnormConst =
5439         cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2));
5440 
5441     Unorm = UnormConst->getZExtValue() ? True : False;
5442     CtrlIdx = AddrIdx + NumVAddrs + 3;
5443   }
5444 
5445   SDValue TFE;
5446   SDValue LWE;
5447   SDValue TexFail = Op.getOperand(CtrlIdx);
5448   bool IsTexFail = false;
5449   if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail))
5450     return Op;
5451 
5452   if (IsTexFail) {
5453     if (!DMaskLanes) {
5454       // Expecting to get an error flag since TFC is on - and dmask is 0
5455       // Force dmask to be at least 1 otherwise the instruction will fail
5456       DMask = 0x1;
5457       DMaskLanes = 1;
5458       NumVDataDwords = 1;
5459     }
5460     NumVDataDwords += 1;
5461     AdjustRetType = true;
5462   }
5463 
5464   // Has something earlier tagged that the return type needs adjusting
5465   // This happens if the instruction is a load or has set TexFailCtrl flags
5466   if (AdjustRetType) {
5467     // NumVDataDwords reflects the true number of dwords required in the return type
5468     if (DMaskLanes == 0 && !BaseOpcode->Store) {
5469       // This is a no-op load. This can be eliminated
5470       SDValue Undef = DAG.getUNDEF(Op.getValueType());
5471       if (isa<MemSDNode>(Op))
5472         return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
5473       return Undef;
5474     }
5475 
5476     EVT NewVT = NumVDataDwords > 1 ?
5477                   EVT::getVectorVT(*DAG.getContext(), MVT::f32, NumVDataDwords)
5478                 : MVT::f32;
5479 
5480     ResultTypes[0] = NewVT;
5481     if (ResultTypes.size() == 3) {
5482       // Original result was aggregate type used for TexFailCtrl results
5483       // The actual instruction returns as a vector type which has now been
5484       // created. Remove the aggregate result.
5485       ResultTypes.erase(&ResultTypes[1]);
5486     }
5487   }
5488 
5489   SDValue GLC;
5490   SDValue SLC;
5491   SDValue DLC;
5492   if (BaseOpcode->Atomic) {
5493     GLC = True; // TODO no-return optimization
5494     if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC,
5495                           IsGFX10 ? &DLC : nullptr))
5496       return Op;
5497   } else {
5498     if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC,
5499                           IsGFX10 ? &DLC : nullptr))
5500       return Op;
5501   }
5502 
5503   SmallVector<SDValue, 26> Ops;
5504   if (BaseOpcode->Store || BaseOpcode->Atomic)
5505     Ops.push_back(VData); // vdata
5506   if (UseNSA) {
5507     for (const SDValue &Addr : VAddrs)
5508       Ops.push_back(Addr);
5509   } else {
5510     Ops.push_back(VAddr);
5511   }
5512   Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc
5513   if (BaseOpcode->Sampler)
5514     Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler
5515   Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32));
5516   if (IsGFX10)
5517     Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32));
5518   Ops.push_back(Unorm);
5519   if (IsGFX10)
5520     Ops.push_back(DLC);
5521   Ops.push_back(GLC);
5522   Ops.push_back(SLC);
5523   Ops.push_back(IsA16 &&  // a16 or r128
5524                 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False);
5525   Ops.push_back(TFE); // tfe
5526   Ops.push_back(LWE); // lwe
5527   if (!IsGFX10)
5528     Ops.push_back(DimInfo->DA ? True : False);
5529   if (BaseOpcode->HasD16)
5530     Ops.push_back(IsD16 ? True : False);
5531   if (isa<MemSDNode>(Op))
5532     Ops.push_back(Op.getOperand(0)); // chain
5533 
5534   int NumVAddrDwords =
5535       UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32;
5536   int Opcode = -1;
5537 
5538   if (IsGFX10) {
5539     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
5540                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
5541                                           : AMDGPU::MIMGEncGfx10Default,
5542                                    NumVDataDwords, NumVAddrDwords);
5543   } else {
5544     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5545       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
5546                                      NumVDataDwords, NumVAddrDwords);
5547     if (Opcode == -1)
5548       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
5549                                      NumVDataDwords, NumVAddrDwords);
5550   }
5551   assert(Opcode != -1);
5552 
5553   MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops);
5554   if (auto MemOp = dyn_cast<MemSDNode>(Op)) {
5555     MachineMemOperand *MemRef = MemOp->getMemOperand();
5556     DAG.setNodeMemRefs(NewNode, {MemRef});
5557   }
5558 
5559   if (BaseOpcode->AtomicX2) {
5560     SmallVector<SDValue, 1> Elt;
5561     DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
5562     return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
5563   } else if (!BaseOpcode->Store) {
5564     return constructRetValue(DAG, NewNode,
5565                              OrigResultTypes, IsTexFail,
5566                              Subtarget->hasUnpackedD16VMem(), IsD16,
5567                              DMaskLanes, NumVDataDwords, DL,
5568                              *DAG.getContext());
5569   }
5570 
5571   return SDValue(NewNode, 0);
5572 }
5573 
5574 SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
5575                                        SDValue Offset, SDValue GLC, SDValue DLC,
5576                                        SelectionDAG &DAG) const {
5577   MachineFunction &MF = DAG.getMachineFunction();
5578   MachineMemOperand *MMO = MF.getMachineMemOperand(
5579       MachinePointerInfo(),
5580       MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
5581           MachineMemOperand::MOInvariant,
5582       VT.getStoreSize(), VT.getStoreSize());
5583 
5584   if (!Offset->isDivergent()) {
5585     SDValue Ops[] = {
5586         Rsrc,
5587         Offset, // Offset
5588         GLC,
5589         DLC,
5590     };
5591     return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL,
5592                                    DAG.getVTList(VT), Ops, VT, MMO);
5593   }
5594 
5595   // We have a divergent offset. Emit a MUBUF buffer load instead. We can
5596   // assume that the buffer is unswizzled.
5597   SmallVector<SDValue, 4> Loads;
5598   unsigned NumLoads = 1;
5599   MVT LoadVT = VT.getSimpleVT();
5600   unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1;
5601   assert((LoadVT.getScalarType() == MVT::i32 ||
5602           LoadVT.getScalarType() == MVT::f32) &&
5603          isPowerOf2_32(NumElts));
5604 
5605   if (NumElts == 8 || NumElts == 16) {
5606     NumLoads = NumElts == 16 ? 4 : 2;
5607     LoadVT = MVT::v4i32;
5608   }
5609 
5610   SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue});
5611   unsigned CachePolicy = cast<ConstantSDNode>(GLC)->getZExtValue();
5612   SDValue Ops[] = {
5613       DAG.getEntryNode(),                         // Chain
5614       Rsrc,                                       // rsrc
5615       DAG.getConstant(0, DL, MVT::i32),           // vindex
5616       {},                                         // voffset
5617       {},                                         // soffset
5618       {},                                         // offset
5619       DAG.getConstant(CachePolicy, DL, MVT::i32), // cachepolicy
5620       DAG.getConstant(0, DL, MVT::i1),            // idxen
5621   };
5622 
5623   // Use the alignment to ensure that the required offsets will fit into the
5624   // immediate offsets.
5625   setBufferOffsets(Offset, DAG, &Ops[3], NumLoads > 1 ? 16 * NumLoads : 4);
5626 
5627   uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
5628   for (unsigned i = 0; i < NumLoads; ++i) {
5629     Ops[5] = DAG.getConstant(InstOffset + 16 * i, DL, MVT::i32);
5630     Loads.push_back(DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList,
5631                                             Ops, LoadVT, MMO));
5632   }
5633 
5634   if (VT == MVT::v8i32 || VT == MVT::v16i32)
5635     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads);
5636 
5637   return Loads[0];
5638 }
5639 
5640 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
5641                                                   SelectionDAG &DAG) const {
5642   MachineFunction &MF = DAG.getMachineFunction();
5643   auto MFI = MF.getInfo<SIMachineFunctionInfo>();
5644 
5645   EVT VT = Op.getValueType();
5646   SDLoc DL(Op);
5647   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5648 
5649   // TODO: Should this propagate fast-math-flags?
5650 
5651   switch (IntrinsicID) {
5652   case Intrinsic::amdgcn_implicit_buffer_ptr: {
5653     if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction()))
5654       return emitNonHSAIntrinsicError(DAG, DL, VT);
5655     return getPreloadedValue(DAG, *MFI, VT,
5656                              AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
5657   }
5658   case Intrinsic::amdgcn_dispatch_ptr:
5659   case Intrinsic::amdgcn_queue_ptr: {
5660     if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) {
5661       DiagnosticInfoUnsupported BadIntrin(
5662           MF.getFunction(), "unsupported hsa intrinsic without hsa target",
5663           DL.getDebugLoc());
5664       DAG.getContext()->diagnose(BadIntrin);
5665       return DAG.getUNDEF(VT);
5666     }
5667 
5668     auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
5669       AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
5670     return getPreloadedValue(DAG, *MFI, VT, RegID);
5671   }
5672   case Intrinsic::amdgcn_implicitarg_ptr: {
5673     if (MFI->isEntryFunction())
5674       return getImplicitArgPtr(DAG, DL);
5675     return getPreloadedValue(DAG, *MFI, VT,
5676                              AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
5677   }
5678   case Intrinsic::amdgcn_kernarg_segment_ptr: {
5679     return getPreloadedValue(DAG, *MFI, VT,
5680                              AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
5681   }
5682   case Intrinsic::amdgcn_dispatch_id: {
5683     return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
5684   }
5685   case Intrinsic::amdgcn_rcp:
5686     return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
5687   case Intrinsic::amdgcn_rsq:
5688     return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5689   case Intrinsic::amdgcn_rsq_legacy:
5690     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5691       return emitRemovedIntrinsicError(DAG, DL, VT);
5692 
5693     return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
5694   case Intrinsic::amdgcn_rcp_legacy:
5695     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5696       return emitRemovedIntrinsicError(DAG, DL, VT);
5697     return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
5698   case Intrinsic::amdgcn_rsq_clamp: {
5699     if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
5700       return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
5701 
5702     Type *Type = VT.getTypeForEVT(*DAG.getContext());
5703     APFloat Max = APFloat::getLargest(Type->getFltSemantics());
5704     APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
5705 
5706     SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5707     SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
5708                               DAG.getConstantFP(Max, DL, VT));
5709     return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
5710                        DAG.getConstantFP(Min, DL, VT));
5711   }
5712   case Intrinsic::r600_read_ngroups_x:
5713     if (Subtarget->isAmdHsaOS())
5714       return emitNonHSAIntrinsicError(DAG, DL, VT);
5715 
5716     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5717                                     SI::KernelInputOffsets::NGROUPS_X, 4, false);
5718   case Intrinsic::r600_read_ngroups_y:
5719     if (Subtarget->isAmdHsaOS())
5720       return emitNonHSAIntrinsicError(DAG, DL, VT);
5721 
5722     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5723                                     SI::KernelInputOffsets::NGROUPS_Y, 4, false);
5724   case Intrinsic::r600_read_ngroups_z:
5725     if (Subtarget->isAmdHsaOS())
5726       return emitNonHSAIntrinsicError(DAG, DL, VT);
5727 
5728     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5729                                     SI::KernelInputOffsets::NGROUPS_Z, 4, false);
5730   case Intrinsic::r600_read_global_size_x:
5731     if (Subtarget->isAmdHsaOS())
5732       return emitNonHSAIntrinsicError(DAG, DL, VT);
5733 
5734     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5735                                     SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false);
5736   case Intrinsic::r600_read_global_size_y:
5737     if (Subtarget->isAmdHsaOS())
5738       return emitNonHSAIntrinsicError(DAG, DL, VT);
5739 
5740     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5741                                     SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false);
5742   case Intrinsic::r600_read_global_size_z:
5743     if (Subtarget->isAmdHsaOS())
5744       return emitNonHSAIntrinsicError(DAG, DL, VT);
5745 
5746     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5747                                     SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false);
5748   case Intrinsic::r600_read_local_size_x:
5749     if (Subtarget->isAmdHsaOS())
5750       return emitNonHSAIntrinsicError(DAG, DL, VT);
5751 
5752     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5753                                   SI::KernelInputOffsets::LOCAL_SIZE_X);
5754   case Intrinsic::r600_read_local_size_y:
5755     if (Subtarget->isAmdHsaOS())
5756       return emitNonHSAIntrinsicError(DAG, DL, VT);
5757 
5758     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5759                                   SI::KernelInputOffsets::LOCAL_SIZE_Y);
5760   case Intrinsic::r600_read_local_size_z:
5761     if (Subtarget->isAmdHsaOS())
5762       return emitNonHSAIntrinsicError(DAG, DL, VT);
5763 
5764     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5765                                   SI::KernelInputOffsets::LOCAL_SIZE_Z);
5766   case Intrinsic::amdgcn_workgroup_id_x:
5767   case Intrinsic::r600_read_tgid_x:
5768     return getPreloadedValue(DAG, *MFI, VT,
5769                              AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
5770   case Intrinsic::amdgcn_workgroup_id_y:
5771   case Intrinsic::r600_read_tgid_y:
5772     return getPreloadedValue(DAG, *MFI, VT,
5773                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
5774   case Intrinsic::amdgcn_workgroup_id_z:
5775   case Intrinsic::r600_read_tgid_z:
5776     return getPreloadedValue(DAG, *MFI, VT,
5777                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
5778   case Intrinsic::amdgcn_workitem_id_x:
5779   case Intrinsic::r600_read_tidig_x:
5780     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5781                           SDLoc(DAG.getEntryNode()),
5782                           MFI->getArgInfo().WorkItemIDX);
5783   case Intrinsic::amdgcn_workitem_id_y:
5784   case Intrinsic::r600_read_tidig_y:
5785     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5786                           SDLoc(DAG.getEntryNode()),
5787                           MFI->getArgInfo().WorkItemIDY);
5788   case Intrinsic::amdgcn_workitem_id_z:
5789   case Intrinsic::r600_read_tidig_z:
5790     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5791                           SDLoc(DAG.getEntryNode()),
5792                           MFI->getArgInfo().WorkItemIDZ);
5793   case Intrinsic::amdgcn_wavefrontsize:
5794     return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(),
5795                            SDLoc(Op), MVT::i32);
5796   case Intrinsic::amdgcn_s_buffer_load: {
5797     bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10;
5798     SDValue GLC;
5799     SDValue DLC = DAG.getTargetConstant(0, DL, MVT::i1);
5800     if (!parseCachePolicy(Op.getOperand(3), DAG, &GLC, nullptr,
5801                           IsGFX10 ? &DLC : nullptr))
5802       return Op;
5803     return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), GLC, DLC,
5804                         DAG);
5805   }
5806   case Intrinsic::amdgcn_fdiv_fast:
5807     return lowerFDIV_FAST(Op, DAG);
5808   case Intrinsic::amdgcn_interp_mov: {
5809     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5810     SDValue Glue = M0.getValue(1);
5811     return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
5812                        Op.getOperand(2), Op.getOperand(3), Glue);
5813   }
5814   case Intrinsic::amdgcn_interp_p1: {
5815     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5816     SDValue Glue = M0.getValue(1);
5817     return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
5818                        Op.getOperand(2), Op.getOperand(3), Glue);
5819   }
5820   case Intrinsic::amdgcn_interp_p2: {
5821     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5822     SDValue Glue = SDValue(M0.getNode(), 1);
5823     return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
5824                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
5825                        Glue);
5826   }
5827   case Intrinsic::amdgcn_interp_p1_f16: {
5828     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5829     SDValue Glue = M0.getValue(1);
5830     if (getSubtarget()->getLDSBankCount() == 16) {
5831       // 16 bank LDS
5832       SDValue S = DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32,
5833                               DAG.getConstant(2, DL, MVT::i32), // P0
5834                               Op.getOperand(2), // Attrchan
5835                               Op.getOperand(3), // Attr
5836                               Glue);
5837       SDValue Ops[] = {
5838         Op.getOperand(1), // Src0
5839         Op.getOperand(2), // Attrchan
5840         Op.getOperand(3), // Attr
5841         DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5842         S, // Src2 - holds two f16 values selected by high
5843         DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers
5844         Op.getOperand(4), // high
5845         DAG.getConstant(0, DL, MVT::i1), // $clamp
5846         DAG.getConstant(0, DL, MVT::i32) // $omod
5847       };
5848       return DAG.getNode(AMDGPUISD::INTERP_P1LV_F16, DL, MVT::f32, Ops);
5849     } else {
5850       // 32 bank LDS
5851       SDValue Ops[] = {
5852         Op.getOperand(1), // Src0
5853         Op.getOperand(2), // Attrchan
5854         Op.getOperand(3), // Attr
5855         DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5856         Op.getOperand(4), // high
5857         DAG.getConstant(0, DL, MVT::i1), // $clamp
5858         DAG.getConstant(0, DL, MVT::i32), // $omod
5859         Glue
5860       };
5861       return DAG.getNode(AMDGPUISD::INTERP_P1LL_F16, DL, MVT::f32, Ops);
5862     }
5863   }
5864   case Intrinsic::amdgcn_interp_p2_f16: {
5865     SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(6));
5866     SDValue Glue = SDValue(M0.getNode(), 1);
5867     SDValue Ops[] = {
5868       Op.getOperand(2), // Src0
5869       Op.getOperand(3), // Attrchan
5870       Op.getOperand(4), // Attr
5871       DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5872       Op.getOperand(1), // Src2
5873       DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers
5874       Op.getOperand(5), // high
5875       DAG.getConstant(0, DL, MVT::i1), // $clamp
5876       Glue
5877     };
5878     return DAG.getNode(AMDGPUISD::INTERP_P2_F16, DL, MVT::f16, Ops);
5879   }
5880   case Intrinsic::amdgcn_sin:
5881     return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
5882 
5883   case Intrinsic::amdgcn_cos:
5884     return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
5885 
5886   case Intrinsic::amdgcn_mul_u24:
5887     return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2));
5888   case Intrinsic::amdgcn_mul_i24:
5889     return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2));
5890 
5891   case Intrinsic::amdgcn_log_clamp: {
5892     if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
5893       return SDValue();
5894 
5895     DiagnosticInfoUnsupported BadIntrin(
5896       MF.getFunction(), "intrinsic not supported on subtarget",
5897       DL.getDebugLoc());
5898       DAG.getContext()->diagnose(BadIntrin);
5899       return DAG.getUNDEF(VT);
5900   }
5901   case Intrinsic::amdgcn_ldexp:
5902     return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
5903                        Op.getOperand(1), Op.getOperand(2));
5904 
5905   case Intrinsic::amdgcn_fract:
5906     return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
5907 
5908   case Intrinsic::amdgcn_class:
5909     return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
5910                        Op.getOperand(1), Op.getOperand(2));
5911   case Intrinsic::amdgcn_div_fmas:
5912     return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
5913                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5914                        Op.getOperand(4));
5915 
5916   case Intrinsic::amdgcn_div_fixup:
5917     return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
5918                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5919 
5920   case Intrinsic::amdgcn_trig_preop:
5921     return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
5922                        Op.getOperand(1), Op.getOperand(2));
5923   case Intrinsic::amdgcn_div_scale: {
5924     const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3));
5925 
5926     // Translate to the operands expected by the machine instruction. The
5927     // first parameter must be the same as the first instruction.
5928     SDValue Numerator = Op.getOperand(1);
5929     SDValue Denominator = Op.getOperand(2);
5930 
5931     // Note this order is opposite of the machine instruction's operations,
5932     // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
5933     // intrinsic has the numerator as the first operand to match a normal
5934     // division operation.
5935 
5936     SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
5937 
5938     return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
5939                        Denominator, Numerator);
5940   }
5941   case Intrinsic::amdgcn_icmp: {
5942     // There is a Pat that handles this variant, so return it as-is.
5943     if (Op.getOperand(1).getValueType() == MVT::i1 &&
5944         Op.getConstantOperandVal(2) == 0 &&
5945         Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE)
5946       return Op;
5947     return lowerICMPIntrinsic(*this, Op.getNode(), DAG);
5948   }
5949   case Intrinsic::amdgcn_fcmp: {
5950     return lowerFCMPIntrinsic(*this, Op.getNode(), DAG);
5951   }
5952   case Intrinsic::amdgcn_fmed3:
5953     return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
5954                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5955   case Intrinsic::amdgcn_fdot2:
5956     return DAG.getNode(AMDGPUISD::FDOT2, DL, VT,
5957                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5958                        Op.getOperand(4));
5959   case Intrinsic::amdgcn_fmul_legacy:
5960     return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
5961                        Op.getOperand(1), Op.getOperand(2));
5962   case Intrinsic::amdgcn_sffbh:
5963     return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
5964   case Intrinsic::amdgcn_sbfe:
5965     return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
5966                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5967   case Intrinsic::amdgcn_ubfe:
5968     return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
5969                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5970   case Intrinsic::amdgcn_cvt_pkrtz:
5971   case Intrinsic::amdgcn_cvt_pknorm_i16:
5972   case Intrinsic::amdgcn_cvt_pknorm_u16:
5973   case Intrinsic::amdgcn_cvt_pk_i16:
5974   case Intrinsic::amdgcn_cvt_pk_u16: {
5975     // FIXME: Stop adding cast if v2f16/v2i16 are legal.
5976     EVT VT = Op.getValueType();
5977     unsigned Opcode;
5978 
5979     if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
5980       Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
5981     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
5982       Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
5983     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
5984       Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
5985     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
5986       Opcode = AMDGPUISD::CVT_PK_I16_I32;
5987     else
5988       Opcode = AMDGPUISD::CVT_PK_U16_U32;
5989 
5990     if (isTypeLegal(VT))
5991       return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2));
5992 
5993     SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
5994                                Op.getOperand(1), Op.getOperand(2));
5995     return DAG.getNode(ISD::BITCAST, DL, VT, Node);
5996   }
5997   case Intrinsic::amdgcn_fmad_ftz:
5998     return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1),
5999                        Op.getOperand(2), Op.getOperand(3));
6000 
6001   case Intrinsic::amdgcn_if_break:
6002     return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT,
6003                                       Op->getOperand(1), Op->getOperand(2)), 0);
6004 
6005   case Intrinsic::amdgcn_groupstaticsize: {
6006     Triple::OSType OS = getTargetMachine().getTargetTriple().getOS();
6007     if (OS == Triple::AMDHSA || OS == Triple::AMDPAL)
6008       return Op;
6009 
6010     const Module *M = MF.getFunction().getParent();
6011     const GlobalValue *GV =
6012         M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize));
6013     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0,
6014                                             SIInstrInfo::MO_ABS32_LO);
6015     return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0};
6016   }
6017   default:
6018     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6019             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
6020       return lowerImage(Op, ImageDimIntr, DAG);
6021 
6022     return Op;
6023   }
6024 }
6025 
6026 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
6027                                                  SelectionDAG &DAG) const {
6028   unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6029   SDLoc DL(Op);
6030 
6031   switch (IntrID) {
6032   case Intrinsic::amdgcn_ds_ordered_add:
6033   case Intrinsic::amdgcn_ds_ordered_swap: {
6034     MemSDNode *M = cast<MemSDNode>(Op);
6035     SDValue Chain = M->getOperand(0);
6036     SDValue M0 = M->getOperand(2);
6037     SDValue Value = M->getOperand(3);
6038     unsigned IndexOperand = M->getConstantOperandVal(7);
6039     unsigned WaveRelease = M->getConstantOperandVal(8);
6040     unsigned WaveDone = M->getConstantOperandVal(9);
6041     unsigned ShaderType;
6042     unsigned Instruction;
6043 
6044     unsigned OrderedCountIndex = IndexOperand & 0x3f;
6045     IndexOperand &= ~0x3f;
6046     unsigned CountDw = 0;
6047 
6048     if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) {
6049       CountDw = (IndexOperand >> 24) & 0xf;
6050       IndexOperand &= ~(0xf << 24);
6051 
6052       if (CountDw < 1 || CountDw > 4) {
6053         report_fatal_error(
6054             "ds_ordered_count: dword count must be between 1 and 4");
6055       }
6056     }
6057 
6058     if (IndexOperand)
6059       report_fatal_error("ds_ordered_count: bad index operand");
6060 
6061     switch (IntrID) {
6062     case Intrinsic::amdgcn_ds_ordered_add:
6063       Instruction = 0;
6064       break;
6065     case Intrinsic::amdgcn_ds_ordered_swap:
6066       Instruction = 1;
6067       break;
6068     }
6069 
6070     if (WaveDone && !WaveRelease)
6071       report_fatal_error("ds_ordered_count: wave_done requires wave_release");
6072 
6073     switch (DAG.getMachineFunction().getFunction().getCallingConv()) {
6074     case CallingConv::AMDGPU_CS:
6075     case CallingConv::AMDGPU_KERNEL:
6076       ShaderType = 0;
6077       break;
6078     case CallingConv::AMDGPU_PS:
6079       ShaderType = 1;
6080       break;
6081     case CallingConv::AMDGPU_VS:
6082       ShaderType = 2;
6083       break;
6084     case CallingConv::AMDGPU_GS:
6085       ShaderType = 3;
6086       break;
6087     default:
6088       report_fatal_error("ds_ordered_count unsupported for this calling conv");
6089     }
6090 
6091     unsigned Offset0 = OrderedCountIndex << 2;
6092     unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
6093                        (Instruction << 4);
6094 
6095     if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
6096       Offset1 |= (CountDw - 1) << 6;
6097 
6098     unsigned Offset = Offset0 | (Offset1 << 8);
6099 
6100     SDValue Ops[] = {
6101       Chain,
6102       Value,
6103       DAG.getTargetConstant(Offset, DL, MVT::i16),
6104       copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue
6105     };
6106     return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL,
6107                                    M->getVTList(), Ops, M->getMemoryVT(),
6108                                    M->getMemOperand());
6109   }
6110   case Intrinsic::amdgcn_ds_fadd: {
6111     MemSDNode *M = cast<MemSDNode>(Op);
6112     unsigned Opc;
6113     switch (IntrID) {
6114     case Intrinsic::amdgcn_ds_fadd:
6115       Opc = ISD::ATOMIC_LOAD_FADD;
6116       break;
6117     }
6118 
6119     return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(),
6120                          M->getOperand(0), M->getOperand(2), M->getOperand(3),
6121                          M->getMemOperand());
6122   }
6123   case Intrinsic::amdgcn_atomic_inc:
6124   case Intrinsic::amdgcn_atomic_dec:
6125   case Intrinsic::amdgcn_ds_fmin:
6126   case Intrinsic::amdgcn_ds_fmax: {
6127     MemSDNode *M = cast<MemSDNode>(Op);
6128     unsigned Opc;
6129     switch (IntrID) {
6130     case Intrinsic::amdgcn_atomic_inc:
6131       Opc = AMDGPUISD::ATOMIC_INC;
6132       break;
6133     case Intrinsic::amdgcn_atomic_dec:
6134       Opc = AMDGPUISD::ATOMIC_DEC;
6135       break;
6136     case Intrinsic::amdgcn_ds_fmin:
6137       Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
6138       break;
6139     case Intrinsic::amdgcn_ds_fmax:
6140       Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
6141       break;
6142     default:
6143       llvm_unreachable("Unknown intrinsic!");
6144     }
6145     SDValue Ops[] = {
6146       M->getOperand(0), // Chain
6147       M->getOperand(2), // Ptr
6148       M->getOperand(3)  // Value
6149     };
6150 
6151     return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
6152                                    M->getMemoryVT(), M->getMemOperand());
6153   }
6154   case Intrinsic::amdgcn_buffer_load:
6155   case Intrinsic::amdgcn_buffer_load_format: {
6156     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
6157     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6158     unsigned IdxEn = 1;
6159     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
6160       IdxEn = Idx->getZExtValue() != 0;
6161     SDValue Ops[] = {
6162       Op.getOperand(0), // Chain
6163       Op.getOperand(2), // rsrc
6164       Op.getOperand(3), // vindex
6165       SDValue(),        // voffset -- will be set by setBufferOffsets
6166       SDValue(),        // soffset -- will be set by setBufferOffsets
6167       SDValue(),        // offset -- will be set by setBufferOffsets
6168       DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6169       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6170     };
6171 
6172     setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]);
6173     unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
6174         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
6175 
6176     EVT VT = Op.getValueType();
6177     EVT IntVT = VT.changeTypeToInteger();
6178     auto *M = cast<MemSDNode>(Op);
6179     EVT LoadVT = Op.getValueType();
6180 
6181     if (LoadVT.getScalarType() == MVT::f16)
6182       return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
6183                                  M, DAG, Ops);
6184 
6185     // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
6186     if (LoadVT.getScalarType() == MVT::i8 ||
6187         LoadVT.getScalarType() == MVT::i16)
6188       return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
6189 
6190     return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
6191                                M->getMemOperand(), DAG);
6192   }
6193   case Intrinsic::amdgcn_raw_buffer_load:
6194   case Intrinsic::amdgcn_raw_buffer_load_format: {
6195     auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
6196     SDValue Ops[] = {
6197       Op.getOperand(0), // Chain
6198       Op.getOperand(2), // rsrc
6199       DAG.getConstant(0, DL, MVT::i32), // vindex
6200       Offsets.first,    // voffset
6201       Op.getOperand(4), // soffset
6202       Offsets.second,   // offset
6203       Op.getOperand(5), // cachepolicy
6204       DAG.getConstant(0, DL, MVT::i1), // idxen
6205     };
6206 
6207     unsigned Opc = (IntrID == Intrinsic::amdgcn_raw_buffer_load) ?
6208         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
6209 
6210     EVT VT = Op.getValueType();
6211     EVT IntVT = VT.changeTypeToInteger();
6212     auto *M = cast<MemSDNode>(Op);
6213     EVT LoadVT = Op.getValueType();
6214 
6215     if (LoadVT.getScalarType() == MVT::f16)
6216       return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
6217                                  M, DAG, Ops);
6218 
6219     // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
6220     if (LoadVT.getScalarType() == MVT::i8 ||
6221         LoadVT.getScalarType() == MVT::i16)
6222       return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
6223 
6224     return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
6225                                M->getMemOperand(), DAG);
6226   }
6227   case Intrinsic::amdgcn_struct_buffer_load:
6228   case Intrinsic::amdgcn_struct_buffer_load_format: {
6229     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6230     SDValue Ops[] = {
6231       Op.getOperand(0), // Chain
6232       Op.getOperand(2), // rsrc
6233       Op.getOperand(3), // vindex
6234       Offsets.first,    // voffset
6235       Op.getOperand(5), // soffset
6236       Offsets.second,   // offset
6237       Op.getOperand(6), // cachepolicy
6238       DAG.getConstant(1, DL, MVT::i1), // idxen
6239     };
6240 
6241     unsigned Opc = (IntrID == Intrinsic::amdgcn_struct_buffer_load) ?
6242         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
6243 
6244     EVT VT = Op.getValueType();
6245     EVT IntVT = VT.changeTypeToInteger();
6246     auto *M = cast<MemSDNode>(Op);
6247     EVT LoadVT = Op.getValueType();
6248 
6249     if (LoadVT.getScalarType() == MVT::f16)
6250       return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
6251                                  M, DAG, Ops);
6252 
6253     // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
6254     if (LoadVT.getScalarType() == MVT::i8 ||
6255         LoadVT.getScalarType() == MVT::i16)
6256       return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
6257 
6258     return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
6259                                M->getMemOperand(), DAG);
6260   }
6261   case Intrinsic::amdgcn_tbuffer_load: {
6262     MemSDNode *M = cast<MemSDNode>(Op);
6263     EVT LoadVT = Op.getValueType();
6264 
6265     unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6266     unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6267     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6268     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6269     unsigned IdxEn = 1;
6270     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
6271       IdxEn = Idx->getZExtValue() != 0;
6272     SDValue Ops[] = {
6273       Op.getOperand(0),  // Chain
6274       Op.getOperand(2),  // rsrc
6275       Op.getOperand(3),  // vindex
6276       Op.getOperand(4),  // voffset
6277       Op.getOperand(5),  // soffset
6278       Op.getOperand(6),  // offset
6279       DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6280       DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6281       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6282     };
6283 
6284     if (LoadVT.getScalarType() == MVT::f16)
6285       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6286                                  M, DAG, Ops);
6287     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6288                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6289                                DAG);
6290   }
6291   case Intrinsic::amdgcn_raw_tbuffer_load: {
6292     MemSDNode *M = cast<MemSDNode>(Op);
6293     EVT LoadVT = Op.getValueType();
6294     auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
6295 
6296     SDValue Ops[] = {
6297       Op.getOperand(0),  // Chain
6298       Op.getOperand(2),  // rsrc
6299       DAG.getConstant(0, DL, MVT::i32), // vindex
6300       Offsets.first,     // voffset
6301       Op.getOperand(4),  // soffset
6302       Offsets.second,    // offset
6303       Op.getOperand(5),  // format
6304       Op.getOperand(6),  // cachepolicy
6305       DAG.getConstant(0, DL, MVT::i1), // idxen
6306     };
6307 
6308     if (LoadVT.getScalarType() == MVT::f16)
6309       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6310                                  M, DAG, Ops);
6311     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6312                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6313                                DAG);
6314   }
6315   case Intrinsic::amdgcn_struct_tbuffer_load: {
6316     MemSDNode *M = cast<MemSDNode>(Op);
6317     EVT LoadVT = Op.getValueType();
6318     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6319 
6320     SDValue Ops[] = {
6321       Op.getOperand(0),  // Chain
6322       Op.getOperand(2),  // rsrc
6323       Op.getOperand(3),  // vindex
6324       Offsets.first,     // voffset
6325       Op.getOperand(5),  // soffset
6326       Offsets.second,    // offset
6327       Op.getOperand(6),  // format
6328       Op.getOperand(7),  // cachepolicy
6329       DAG.getConstant(1, DL, MVT::i1), // idxen
6330     };
6331 
6332     if (LoadVT.getScalarType() == MVT::f16)
6333       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6334                                  M, DAG, Ops);
6335     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6336                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6337                                DAG);
6338   }
6339   case Intrinsic::amdgcn_buffer_atomic_swap:
6340   case Intrinsic::amdgcn_buffer_atomic_add:
6341   case Intrinsic::amdgcn_buffer_atomic_sub:
6342   case Intrinsic::amdgcn_buffer_atomic_smin:
6343   case Intrinsic::amdgcn_buffer_atomic_umin:
6344   case Intrinsic::amdgcn_buffer_atomic_smax:
6345   case Intrinsic::amdgcn_buffer_atomic_umax:
6346   case Intrinsic::amdgcn_buffer_atomic_and:
6347   case Intrinsic::amdgcn_buffer_atomic_or:
6348   case Intrinsic::amdgcn_buffer_atomic_xor: {
6349     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6350     unsigned IdxEn = 1;
6351     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6352       IdxEn = Idx->getZExtValue() != 0;
6353     SDValue Ops[] = {
6354       Op.getOperand(0), // Chain
6355       Op.getOperand(2), // vdata
6356       Op.getOperand(3), // rsrc
6357       Op.getOperand(4), // vindex
6358       SDValue(),        // voffset -- will be set by setBufferOffsets
6359       SDValue(),        // soffset -- will be set by setBufferOffsets
6360       SDValue(),        // offset -- will be set by setBufferOffsets
6361       DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6362       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6363     };
6364     setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6365     EVT VT = Op.getValueType();
6366 
6367     auto *M = cast<MemSDNode>(Op);
6368     unsigned Opcode = 0;
6369 
6370     switch (IntrID) {
6371     case Intrinsic::amdgcn_buffer_atomic_swap:
6372       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6373       break;
6374     case Intrinsic::amdgcn_buffer_atomic_add:
6375       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6376       break;
6377     case Intrinsic::amdgcn_buffer_atomic_sub:
6378       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6379       break;
6380     case Intrinsic::amdgcn_buffer_atomic_smin:
6381       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6382       break;
6383     case Intrinsic::amdgcn_buffer_atomic_umin:
6384       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6385       break;
6386     case Intrinsic::amdgcn_buffer_atomic_smax:
6387       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6388       break;
6389     case Intrinsic::amdgcn_buffer_atomic_umax:
6390       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6391       break;
6392     case Intrinsic::amdgcn_buffer_atomic_and:
6393       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6394       break;
6395     case Intrinsic::amdgcn_buffer_atomic_or:
6396       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6397       break;
6398     case Intrinsic::amdgcn_buffer_atomic_xor:
6399       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6400       break;
6401     default:
6402       llvm_unreachable("unhandled atomic opcode");
6403     }
6404 
6405     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6406                                    M->getMemOperand());
6407   }
6408   case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6409   case Intrinsic::amdgcn_raw_buffer_atomic_add:
6410   case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6411   case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6412   case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6413   case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6414   case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6415   case Intrinsic::amdgcn_raw_buffer_atomic_and:
6416   case Intrinsic::amdgcn_raw_buffer_atomic_or:
6417   case Intrinsic::amdgcn_raw_buffer_atomic_xor: {
6418     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6419     SDValue Ops[] = {
6420       Op.getOperand(0), // Chain
6421       Op.getOperand(2), // vdata
6422       Op.getOperand(3), // rsrc
6423       DAG.getConstant(0, DL, MVT::i32), // vindex
6424       Offsets.first,    // voffset
6425       Op.getOperand(5), // soffset
6426       Offsets.second,   // offset
6427       Op.getOperand(6), // cachepolicy
6428       DAG.getConstant(0, DL, MVT::i1), // idxen
6429     };
6430     EVT VT = Op.getValueType();
6431 
6432     auto *M = cast<MemSDNode>(Op);
6433     unsigned Opcode = 0;
6434 
6435     switch (IntrID) {
6436     case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6437       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6438       break;
6439     case Intrinsic::amdgcn_raw_buffer_atomic_add:
6440       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6441       break;
6442     case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6443       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6444       break;
6445     case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6446       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6447       break;
6448     case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6449       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6450       break;
6451     case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6452       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6453       break;
6454     case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6455       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6456       break;
6457     case Intrinsic::amdgcn_raw_buffer_atomic_and:
6458       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6459       break;
6460     case Intrinsic::amdgcn_raw_buffer_atomic_or:
6461       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6462       break;
6463     case Intrinsic::amdgcn_raw_buffer_atomic_xor:
6464       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6465       break;
6466     default:
6467       llvm_unreachable("unhandled atomic opcode");
6468     }
6469 
6470     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6471                                    M->getMemOperand());
6472   }
6473   case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6474   case Intrinsic::amdgcn_struct_buffer_atomic_add:
6475   case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6476   case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6477   case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6478   case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6479   case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6480   case Intrinsic::amdgcn_struct_buffer_atomic_and:
6481   case Intrinsic::amdgcn_struct_buffer_atomic_or:
6482   case Intrinsic::amdgcn_struct_buffer_atomic_xor: {
6483     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6484     SDValue Ops[] = {
6485       Op.getOperand(0), // Chain
6486       Op.getOperand(2), // vdata
6487       Op.getOperand(3), // rsrc
6488       Op.getOperand(4), // vindex
6489       Offsets.first,    // voffset
6490       Op.getOperand(6), // soffset
6491       Offsets.second,   // offset
6492       Op.getOperand(7), // cachepolicy
6493       DAG.getConstant(1, DL, MVT::i1), // idxen
6494     };
6495     EVT VT = Op.getValueType();
6496 
6497     auto *M = cast<MemSDNode>(Op);
6498     unsigned Opcode = 0;
6499 
6500     switch (IntrID) {
6501     case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6502       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6503       break;
6504     case Intrinsic::amdgcn_struct_buffer_atomic_add:
6505       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6506       break;
6507     case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6508       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6509       break;
6510     case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6511       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6512       break;
6513     case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6514       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6515       break;
6516     case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6517       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6518       break;
6519     case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6520       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6521       break;
6522     case Intrinsic::amdgcn_struct_buffer_atomic_and:
6523       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6524       break;
6525     case Intrinsic::amdgcn_struct_buffer_atomic_or:
6526       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6527       break;
6528     case Intrinsic::amdgcn_struct_buffer_atomic_xor:
6529       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6530       break;
6531     default:
6532       llvm_unreachable("unhandled atomic opcode");
6533     }
6534 
6535     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6536                                    M->getMemOperand());
6537   }
6538   case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
6539     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6540     unsigned IdxEn = 1;
6541     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5)))
6542       IdxEn = Idx->getZExtValue() != 0;
6543     SDValue Ops[] = {
6544       Op.getOperand(0), // Chain
6545       Op.getOperand(2), // src
6546       Op.getOperand(3), // cmp
6547       Op.getOperand(4), // rsrc
6548       Op.getOperand(5), // vindex
6549       SDValue(),        // voffset -- will be set by setBufferOffsets
6550       SDValue(),        // soffset -- will be set by setBufferOffsets
6551       SDValue(),        // offset -- will be set by setBufferOffsets
6552       DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6553       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6554     };
6555     setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]);
6556     EVT VT = Op.getValueType();
6557     auto *M = cast<MemSDNode>(Op);
6558 
6559     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6560                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6561   }
6562   case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: {
6563     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6564     SDValue Ops[] = {
6565       Op.getOperand(0), // Chain
6566       Op.getOperand(2), // src
6567       Op.getOperand(3), // cmp
6568       Op.getOperand(4), // rsrc
6569       DAG.getConstant(0, DL, MVT::i32), // vindex
6570       Offsets.first,    // voffset
6571       Op.getOperand(6), // soffset
6572       Offsets.second,   // offset
6573       Op.getOperand(7), // cachepolicy
6574       DAG.getConstant(0, DL, MVT::i1), // idxen
6575     };
6576     EVT VT = Op.getValueType();
6577     auto *M = cast<MemSDNode>(Op);
6578 
6579     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6580                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6581   }
6582   case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: {
6583     auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG);
6584     SDValue Ops[] = {
6585       Op.getOperand(0), // Chain
6586       Op.getOperand(2), // src
6587       Op.getOperand(3), // cmp
6588       Op.getOperand(4), // rsrc
6589       Op.getOperand(5), // vindex
6590       Offsets.first,    // voffset
6591       Op.getOperand(7), // soffset
6592       Offsets.second,   // offset
6593       Op.getOperand(8), // cachepolicy
6594       DAG.getConstant(1, DL, MVT::i1), // idxen
6595     };
6596     EVT VT = Op.getValueType();
6597     auto *M = cast<MemSDNode>(Op);
6598 
6599     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6600                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6601   }
6602 
6603   default:
6604     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6605             AMDGPU::getImageDimIntrinsicInfo(IntrID))
6606       return lowerImage(Op, ImageDimIntr, DAG);
6607 
6608     return SDValue();
6609   }
6610 }
6611 
6612 // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to
6613 // dwordx4 if on SI.
6614 SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL,
6615                                               SDVTList VTList,
6616                                               ArrayRef<SDValue> Ops, EVT MemVT,
6617                                               MachineMemOperand *MMO,
6618                                               SelectionDAG &DAG) const {
6619   EVT VT = VTList.VTs[0];
6620   EVT WidenedVT = VT;
6621   EVT WidenedMemVT = MemVT;
6622   if (!Subtarget->hasDwordx3LoadStores() &&
6623       (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) {
6624     WidenedVT = EVT::getVectorVT(*DAG.getContext(),
6625                                  WidenedVT.getVectorElementType(), 4);
6626     WidenedMemVT = EVT::getVectorVT(*DAG.getContext(),
6627                                     WidenedMemVT.getVectorElementType(), 4);
6628     MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16);
6629   }
6630 
6631   assert(VTList.NumVTs == 2);
6632   SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]);
6633 
6634   auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops,
6635                                        WidenedMemVT, MMO);
6636   if (WidenedVT != VT) {
6637     auto Extract = DAG.getNode(
6638         ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp,
6639         DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
6640     NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL);
6641   }
6642   return NewOp;
6643 }
6644 
6645 SDValue SITargetLowering::handleD16VData(SDValue VData,
6646                                          SelectionDAG &DAG) const {
6647   EVT StoreVT = VData.getValueType();
6648 
6649   // No change for f16 and legal vector D16 types.
6650   if (!StoreVT.isVector())
6651     return VData;
6652 
6653   SDLoc DL(VData);
6654   assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
6655 
6656   if (Subtarget->hasUnpackedD16VMem()) {
6657     // We need to unpack the packed data to store.
6658     EVT IntStoreVT = StoreVT.changeTypeToInteger();
6659     SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
6660 
6661     EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
6662                                         StoreVT.getVectorNumElements());
6663     SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
6664     return DAG.UnrollVectorOp(ZExt.getNode());
6665   }
6666 
6667   assert(isTypeLegal(StoreVT));
6668   return VData;
6669 }
6670 
6671 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
6672                                               SelectionDAG &DAG) const {
6673   SDLoc DL(Op);
6674   SDValue Chain = Op.getOperand(0);
6675   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6676   MachineFunction &MF = DAG.getMachineFunction();
6677 
6678   switch (IntrinsicID) {
6679   case Intrinsic::amdgcn_exp: {
6680     const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6681     const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6682     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
6683     const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
6684 
6685     const SDValue Ops[] = {
6686       Chain,
6687       DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6688       DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8),  // en
6689       Op.getOperand(4), // src0
6690       Op.getOperand(5), // src1
6691       Op.getOperand(6), // src2
6692       Op.getOperand(7), // src3
6693       DAG.getTargetConstant(0, DL, MVT::i1), // compr
6694       DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6695     };
6696 
6697     unsigned Opc = Done->isNullValue() ?
6698       AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6699     return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6700   }
6701   case Intrinsic::amdgcn_exp_compr: {
6702     const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6703     const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6704     SDValue Src0 = Op.getOperand(4);
6705     SDValue Src1 = Op.getOperand(5);
6706     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
6707     const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
6708 
6709     SDValue Undef = DAG.getUNDEF(MVT::f32);
6710     const SDValue Ops[] = {
6711       Chain,
6712       DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6713       DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8),  // en
6714       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
6715       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
6716       Undef, // src2
6717       Undef, // src3
6718       DAG.getTargetConstant(1, DL, MVT::i1), // compr
6719       DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6720     };
6721 
6722     unsigned Opc = Done->isNullValue() ?
6723       AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6724     return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6725   }
6726   case Intrinsic::amdgcn_init_exec: {
6727     return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
6728                        Op.getOperand(2));
6729   }
6730   case Intrinsic::amdgcn_init_exec_from_input: {
6731     return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
6732                        Op.getOperand(2), Op.getOperand(3));
6733   }
6734   case Intrinsic::amdgcn_s_barrier: {
6735     if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
6736       const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
6737       unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
6738       if (WGSize <= ST.getWavefrontSize())
6739         return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
6740                                           Op.getOperand(0)), 0);
6741     }
6742     return SDValue();
6743   };
6744   case Intrinsic::amdgcn_tbuffer_store: {
6745     SDValue VData = Op.getOperand(2);
6746     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6747     if (IsD16)
6748       VData = handleD16VData(VData, DAG);
6749     unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6750     unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6751     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6752     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue();
6753     unsigned IdxEn = 1;
6754     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6755       IdxEn = Idx->getZExtValue() != 0;
6756     SDValue Ops[] = {
6757       Chain,
6758       VData,             // vdata
6759       Op.getOperand(3),  // rsrc
6760       Op.getOperand(4),  // vindex
6761       Op.getOperand(5),  // voffset
6762       Op.getOperand(6),  // soffset
6763       Op.getOperand(7),  // offset
6764       DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6765       DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6766       DAG.getConstant(IdxEn, DL, MVT::i1), // idexen
6767     };
6768     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6769                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6770     MemSDNode *M = cast<MemSDNode>(Op);
6771     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6772                                    M->getMemoryVT(), M->getMemOperand());
6773   }
6774 
6775   case Intrinsic::amdgcn_struct_tbuffer_store: {
6776     SDValue VData = Op.getOperand(2);
6777     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6778     if (IsD16)
6779       VData = handleD16VData(VData, DAG);
6780     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6781     SDValue Ops[] = {
6782       Chain,
6783       VData,             // vdata
6784       Op.getOperand(3),  // rsrc
6785       Op.getOperand(4),  // vindex
6786       Offsets.first,     // voffset
6787       Op.getOperand(6),  // soffset
6788       Offsets.second,    // offset
6789       Op.getOperand(7),  // format
6790       Op.getOperand(8),  // cachepolicy
6791       DAG.getConstant(1, DL, MVT::i1), // idexen
6792     };
6793     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6794                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6795     MemSDNode *M = cast<MemSDNode>(Op);
6796     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6797                                    M->getMemoryVT(), M->getMemOperand());
6798   }
6799 
6800   case Intrinsic::amdgcn_raw_tbuffer_store: {
6801     SDValue VData = Op.getOperand(2);
6802     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6803     if (IsD16)
6804       VData = handleD16VData(VData, DAG);
6805     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6806     SDValue Ops[] = {
6807       Chain,
6808       VData,             // vdata
6809       Op.getOperand(3),  // rsrc
6810       DAG.getConstant(0, DL, MVT::i32), // vindex
6811       Offsets.first,     // voffset
6812       Op.getOperand(5),  // soffset
6813       Offsets.second,    // offset
6814       Op.getOperand(6),  // format
6815       Op.getOperand(7),  // cachepolicy
6816       DAG.getConstant(0, DL, MVT::i1), // idexen
6817     };
6818     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6819                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6820     MemSDNode *M = cast<MemSDNode>(Op);
6821     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6822                                    M->getMemoryVT(), M->getMemOperand());
6823   }
6824 
6825   case Intrinsic::amdgcn_buffer_store:
6826   case Intrinsic::amdgcn_buffer_store_format: {
6827     SDValue VData = Op.getOperand(2);
6828     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6829     if (IsD16)
6830       VData = handleD16VData(VData, DAG);
6831     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6832     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6833     unsigned IdxEn = 1;
6834     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6835       IdxEn = Idx->getZExtValue() != 0;
6836     SDValue Ops[] = {
6837       Chain,
6838       VData,
6839       Op.getOperand(3), // rsrc
6840       Op.getOperand(4), // vindex
6841       SDValue(), // voffset -- will be set by setBufferOffsets
6842       SDValue(), // soffset -- will be set by setBufferOffsets
6843       SDValue(), // offset -- will be set by setBufferOffsets
6844       DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6845       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6846     };
6847     setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6848     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
6849                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6850     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6851     MemSDNode *M = cast<MemSDNode>(Op);
6852 
6853     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6854     EVT VDataType = VData.getValueType().getScalarType();
6855     if (VDataType == MVT::i8 || VDataType == MVT::i16)
6856       return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6857 
6858     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6859                                    M->getMemoryVT(), M->getMemOperand());
6860   }
6861 
6862   case Intrinsic::amdgcn_raw_buffer_store:
6863   case Intrinsic::amdgcn_raw_buffer_store_format: {
6864     SDValue VData = Op.getOperand(2);
6865     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6866     if (IsD16)
6867       VData = handleD16VData(VData, DAG);
6868     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6869     SDValue Ops[] = {
6870       Chain,
6871       VData,
6872       Op.getOperand(3), // rsrc
6873       DAG.getConstant(0, DL, MVT::i32), // vindex
6874       Offsets.first,    // voffset
6875       Op.getOperand(5), // soffset
6876       Offsets.second,   // offset
6877       Op.getOperand(6), // cachepolicy
6878       DAG.getConstant(0, DL, MVT::i1), // idxen
6879     };
6880     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_raw_buffer_store ?
6881                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6882     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6883     MemSDNode *M = cast<MemSDNode>(Op);
6884 
6885     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6886     EVT VDataType = VData.getValueType().getScalarType();
6887     if (VDataType == MVT::i8 || VDataType == MVT::i16)
6888       return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6889 
6890     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6891                                    M->getMemoryVT(), M->getMemOperand());
6892   }
6893 
6894   case Intrinsic::amdgcn_struct_buffer_store:
6895   case Intrinsic::amdgcn_struct_buffer_store_format: {
6896     SDValue VData = Op.getOperand(2);
6897     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6898     if (IsD16)
6899       VData = handleD16VData(VData, DAG);
6900     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6901     SDValue Ops[] = {
6902       Chain,
6903       VData,
6904       Op.getOperand(3), // rsrc
6905       Op.getOperand(4), // vindex
6906       Offsets.first,    // voffset
6907       Op.getOperand(6), // soffset
6908       Offsets.second,   // offset
6909       Op.getOperand(7), // cachepolicy
6910       DAG.getConstant(1, DL, MVT::i1), // idxen
6911     };
6912     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ?
6913                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6914     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6915     MemSDNode *M = cast<MemSDNode>(Op);
6916 
6917     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6918     EVT VDataType = VData.getValueType().getScalarType();
6919     if (VDataType == MVT::i8 || VDataType == MVT::i16)
6920       return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6921 
6922     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6923                                    M->getMemoryVT(), M->getMemOperand());
6924   }
6925 
6926   case Intrinsic::amdgcn_buffer_atomic_fadd: {
6927     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6928     unsigned IdxEn = 1;
6929     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6930       IdxEn = Idx->getZExtValue() != 0;
6931     SDValue Ops[] = {
6932       Chain,
6933       Op.getOperand(2), // vdata
6934       Op.getOperand(3), // rsrc
6935       Op.getOperand(4), // vindex
6936       SDValue(),        // voffset -- will be set by setBufferOffsets
6937       SDValue(),        // soffset -- will be set by setBufferOffsets
6938       SDValue(),        // offset -- will be set by setBufferOffsets
6939       DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6940       DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6941     };
6942     setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6943     EVT VT = Op.getOperand(2).getValueType();
6944 
6945     auto *M = cast<MemSDNode>(Op);
6946     unsigned Opcode = VT.isVector() ? AMDGPUISD::BUFFER_ATOMIC_PK_FADD
6947                                     : AMDGPUISD::BUFFER_ATOMIC_FADD;
6948 
6949     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6950                                    M->getMemOperand());
6951   }
6952 
6953   case Intrinsic::amdgcn_global_atomic_fadd: {
6954     SDValue Ops[] = {
6955       Chain,
6956       Op.getOperand(2), // ptr
6957       Op.getOperand(3)  // vdata
6958     };
6959     EVT VT = Op.getOperand(3).getValueType();
6960 
6961     auto *M = cast<MemSDNode>(Op);
6962     unsigned Opcode = VT.isVector() ? AMDGPUISD::ATOMIC_PK_FADD
6963                                     : AMDGPUISD::ATOMIC_FADD;
6964 
6965     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6966                                    M->getMemOperand());
6967   }
6968 
6969   case Intrinsic::amdgcn_end_cf:
6970     return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other,
6971                                       Op->getOperand(2), Chain), 0);
6972 
6973   default: {
6974     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6975             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
6976       return lowerImage(Op, ImageDimIntr, DAG);
6977 
6978     return Op;
6979   }
6980   }
6981 }
6982 
6983 // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args:
6984 // offset (the offset that is included in bounds checking and swizzling, to be
6985 // split between the instruction's voffset and immoffset fields) and soffset
6986 // (the offset that is excluded from bounds checking and swizzling, to go in
6987 // the instruction's soffset field).  This function takes the first kind of
6988 // offset and figures out how to split it between voffset and immoffset.
6989 std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets(
6990     SDValue Offset, SelectionDAG &DAG) const {
6991   SDLoc DL(Offset);
6992   const unsigned MaxImm = 4095;
6993   SDValue N0 = Offset;
6994   ConstantSDNode *C1 = nullptr;
6995 
6996   if ((C1 = dyn_cast<ConstantSDNode>(N0)))
6997     N0 = SDValue();
6998   else if (DAG.isBaseWithConstantOffset(N0)) {
6999     C1 = cast<ConstantSDNode>(N0.getOperand(1));
7000     N0 = N0.getOperand(0);
7001   }
7002 
7003   if (C1) {
7004     unsigned ImmOffset = C1->getZExtValue();
7005     // If the immediate value is too big for the immoffset field, put the value
7006     // and -4096 into the immoffset field so that the value that is copied/added
7007     // for the voffset field is a multiple of 4096, and it stands more chance
7008     // of being CSEd with the copy/add for another similar load/store.
7009     // However, do not do that rounding down to a multiple of 4096 if that is a
7010     // negative number, as it appears to be illegal to have a negative offset
7011     // in the vgpr, even if adding the immediate offset makes it positive.
7012     unsigned Overflow = ImmOffset & ~MaxImm;
7013     ImmOffset -= Overflow;
7014     if ((int32_t)Overflow < 0) {
7015       Overflow += ImmOffset;
7016       ImmOffset = 0;
7017     }
7018     C1 = cast<ConstantSDNode>(DAG.getConstant(ImmOffset, DL, MVT::i32));
7019     if (Overflow) {
7020       auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32);
7021       if (!N0)
7022         N0 = OverflowVal;
7023       else {
7024         SDValue Ops[] = { N0, OverflowVal };
7025         N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops);
7026       }
7027     }
7028   }
7029   if (!N0)
7030     N0 = DAG.getConstant(0, DL, MVT::i32);
7031   if (!C1)
7032     C1 = cast<ConstantSDNode>(DAG.getConstant(0, DL, MVT::i32));
7033   return {N0, SDValue(C1, 0)};
7034 }
7035 
7036 // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the
7037 // three offsets (voffset, soffset and instoffset) into the SDValue[3] array
7038 // pointed to by Offsets.
7039 void SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
7040                                         SelectionDAG &DAG, SDValue *Offsets,
7041                                         unsigned Align) const {
7042   SDLoc DL(CombinedOffset);
7043   if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) {
7044     uint32_t Imm = C->getZExtValue();
7045     uint32_t SOffset, ImmOffset;
7046     if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, Align)) {
7047       Offsets[0] = DAG.getConstant(0, DL, MVT::i32);
7048       Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
7049       Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32);
7050       return;
7051     }
7052   }
7053   if (DAG.isBaseWithConstantOffset(CombinedOffset)) {
7054     SDValue N0 = CombinedOffset.getOperand(0);
7055     SDValue N1 = CombinedOffset.getOperand(1);
7056     uint32_t SOffset, ImmOffset;
7057     int Offset = cast<ConstantSDNode>(N1)->getSExtValue();
7058     if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset,
7059                                                 Subtarget, Align)) {
7060       Offsets[0] = N0;
7061       Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
7062       Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32);
7063       return;
7064     }
7065   }
7066   Offsets[0] = CombinedOffset;
7067   Offsets[1] = DAG.getConstant(0, DL, MVT::i32);
7068   Offsets[2] = DAG.getConstant(0, DL, MVT::i32);
7069 }
7070 
7071 // Handle 8 bit and 16 bit buffer loads
7072 SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG,
7073                                                      EVT LoadVT, SDLoc DL,
7074                                                      ArrayRef<SDValue> Ops,
7075                                                      MemSDNode *M) const {
7076   EVT IntVT = LoadVT.changeTypeToInteger();
7077   unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ?
7078          AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT;
7079 
7080   SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other);
7081   SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList,
7082                                                Ops, IntVT,
7083                                                M->getMemOperand());
7084   SDValue BufferLoadTrunc = DAG.getNode(ISD::TRUNCATE, DL,
7085                                         LoadVT.getScalarType(), BufferLoad);
7086   return DAG.getMergeValues({BufferLoadTrunc, BufferLoad.getValue(1)}, DL);
7087 }
7088 
7089 // Handle 8 bit and 16 bit buffer stores
7090 SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG,
7091                                                       EVT VDataType, SDLoc DL,
7092                                                       SDValue Ops[],
7093                                                       MemSDNode *M) const {
7094   SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]);
7095   Ops[1] = BufferStoreExt;
7096   unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE :
7097                                  AMDGPUISD::BUFFER_STORE_SHORT;
7098   ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9);
7099   return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType,
7100                                      M->getMemOperand());
7101 }
7102 
7103 static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
7104                                  ISD::LoadExtType ExtType, SDValue Op,
7105                                  const SDLoc &SL, EVT VT) {
7106   if (VT.bitsLT(Op.getValueType()))
7107     return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
7108 
7109   switch (ExtType) {
7110   case ISD::SEXTLOAD:
7111     return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
7112   case ISD::ZEXTLOAD:
7113     return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
7114   case ISD::EXTLOAD:
7115     return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
7116   case ISD::NON_EXTLOAD:
7117     return Op;
7118   }
7119 
7120   llvm_unreachable("invalid ext type");
7121 }
7122 
7123 SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
7124   SelectionDAG &DAG = DCI.DAG;
7125   if (Ld->getAlignment() < 4 || Ld->isDivergent())
7126     return SDValue();
7127 
7128   // FIXME: Constant loads should all be marked invariant.
7129   unsigned AS = Ld->getAddressSpace();
7130   if (AS != AMDGPUAS::CONSTANT_ADDRESS &&
7131       AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
7132       (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
7133     return SDValue();
7134 
7135   // Don't do this early, since it may interfere with adjacent load merging for
7136   // illegal types. We can avoid losing alignment information for exotic types
7137   // pre-legalize.
7138   EVT MemVT = Ld->getMemoryVT();
7139   if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
7140       MemVT.getSizeInBits() >= 32)
7141     return SDValue();
7142 
7143   SDLoc SL(Ld);
7144 
7145   assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
7146          "unexpected vector extload");
7147 
7148   // TODO: Drop only high part of range.
7149   SDValue Ptr = Ld->getBasePtr();
7150   SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
7151                                 MVT::i32, SL, Ld->getChain(), Ptr,
7152                                 Ld->getOffset(),
7153                                 Ld->getPointerInfo(), MVT::i32,
7154                                 Ld->getAlignment(),
7155                                 Ld->getMemOperand()->getFlags(),
7156                                 Ld->getAAInfo(),
7157                                 nullptr); // Drop ranges
7158 
7159   EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
7160   if (MemVT.isFloatingPoint()) {
7161     assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
7162            "unexpected fp extload");
7163     TruncVT = MemVT.changeTypeToInteger();
7164   }
7165 
7166   SDValue Cvt = NewLoad;
7167   if (Ld->getExtensionType() == ISD::SEXTLOAD) {
7168     Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
7169                       DAG.getValueType(TruncVT));
7170   } else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
7171              Ld->getExtensionType() == ISD::NON_EXTLOAD) {
7172     Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
7173   } else {
7174     assert(Ld->getExtensionType() == ISD::EXTLOAD);
7175   }
7176 
7177   EVT VT = Ld->getValueType(0);
7178   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
7179 
7180   DCI.AddToWorklist(Cvt.getNode());
7181 
7182   // We may need to handle exotic cases, such as i16->i64 extloads, so insert
7183   // the appropriate extension from the 32-bit load.
7184   Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
7185   DCI.AddToWorklist(Cvt.getNode());
7186 
7187   // Handle conversion back to floating point if necessary.
7188   Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
7189 
7190   return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
7191 }
7192 
7193 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7194   SDLoc DL(Op);
7195   LoadSDNode *Load = cast<LoadSDNode>(Op);
7196   ISD::LoadExtType ExtType = Load->getExtensionType();
7197   EVT MemVT = Load->getMemoryVT();
7198 
7199   if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
7200     if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
7201       return SDValue();
7202 
7203     // FIXME: Copied from PPC
7204     // First, load into 32 bits, then truncate to 1 bit.
7205 
7206     SDValue Chain = Load->getChain();
7207     SDValue BasePtr = Load->getBasePtr();
7208     MachineMemOperand *MMO = Load->getMemOperand();
7209 
7210     EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
7211 
7212     SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
7213                                    BasePtr, RealMemVT, MMO);
7214 
7215     if (!MemVT.isVector()) {
7216       SDValue Ops[] = {
7217         DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
7218         NewLD.getValue(1)
7219       };
7220 
7221       return DAG.getMergeValues(Ops, DL);
7222     }
7223 
7224     SmallVector<SDValue, 3> Elts;
7225     for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) {
7226       SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD,
7227                                 DAG.getConstant(I, DL, MVT::i32));
7228 
7229       Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt));
7230     }
7231 
7232     SDValue Ops[] = {
7233       DAG.getBuildVector(MemVT, DL, Elts),
7234       NewLD.getValue(1)
7235     };
7236 
7237     return DAG.getMergeValues(Ops, DL);
7238   }
7239 
7240   if (!MemVT.isVector())
7241     return SDValue();
7242 
7243   assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
7244          "Custom lowering for non-i32 vectors hasn't been implemented.");
7245 
7246   if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
7247                           *Load->getMemOperand())) {
7248     SDValue Ops[2];
7249     std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
7250     return DAG.getMergeValues(Ops, DL);
7251   }
7252 
7253   unsigned Alignment = Load->getAlignment();
7254   unsigned AS = Load->getAddressSpace();
7255   if (Subtarget->hasLDSMisalignedBug() &&
7256       AS == AMDGPUAS::FLAT_ADDRESS &&
7257       Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) {
7258     return SplitVectorLoad(Op, DAG);
7259   }
7260 
7261   MachineFunction &MF = DAG.getMachineFunction();
7262   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
7263   // If there is a possibilty that flat instruction access scratch memory
7264   // then we need to use the same legalization rules we use for private.
7265   if (AS == AMDGPUAS::FLAT_ADDRESS)
7266     AS = MFI->hasFlatScratchInit() ?
7267          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
7268 
7269   unsigned NumElements = MemVT.getVectorNumElements();
7270 
7271   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7272       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
7273     if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) {
7274       if (MemVT.isPow2VectorType())
7275         return SDValue();
7276       if (NumElements == 3)
7277         return WidenVectorLoad(Op, DAG);
7278       return SplitVectorLoad(Op, DAG);
7279     }
7280     // Non-uniform loads will be selected to MUBUF instructions, so they
7281     // have the same legalization requirements as global and private
7282     // loads.
7283     //
7284   }
7285 
7286   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7287       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
7288       AS == AMDGPUAS::GLOBAL_ADDRESS) {
7289     if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
7290         !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
7291         Alignment >= 4 && NumElements < 32) {
7292       if (MemVT.isPow2VectorType())
7293         return SDValue();
7294       if (NumElements == 3)
7295         return WidenVectorLoad(Op, DAG);
7296       return SplitVectorLoad(Op, DAG);
7297     }
7298     // Non-uniform loads will be selected to MUBUF instructions, so they
7299     // have the same legalization requirements as global and private
7300     // loads.
7301     //
7302   }
7303   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7304       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
7305       AS == AMDGPUAS::GLOBAL_ADDRESS ||
7306       AS == AMDGPUAS::FLAT_ADDRESS) {
7307     if (NumElements > 4)
7308       return SplitVectorLoad(Op, DAG);
7309     // v3 loads not supported on SI.
7310     if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7311       return WidenVectorLoad(Op, DAG);
7312     // v3 and v4 loads are supported for private and global memory.
7313     return SDValue();
7314   }
7315   if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
7316     // Depending on the setting of the private_element_size field in the
7317     // resource descriptor, we can only make private accesses up to a certain
7318     // size.
7319     switch (Subtarget->getMaxPrivateElementSize()) {
7320     case 4:
7321       return scalarizeVectorLoad(Load, DAG);
7322     case 8:
7323       if (NumElements > 2)
7324         return SplitVectorLoad(Op, DAG);
7325       return SDValue();
7326     case 16:
7327       // Same as global/flat
7328       if (NumElements > 4)
7329         return SplitVectorLoad(Op, DAG);
7330       // v3 loads not supported on SI.
7331       if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7332         return WidenVectorLoad(Op, DAG);
7333       return SDValue();
7334     default:
7335       llvm_unreachable("unsupported private_element_size");
7336     }
7337   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
7338     // Use ds_read_b128 if possible.
7339     if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
7340         MemVT.getStoreSize() == 16)
7341       return SDValue();
7342 
7343     if (NumElements > 2)
7344       return SplitVectorLoad(Op, DAG);
7345 
7346     // SI has a hardware bug in the LDS / GDS boounds checking: if the base
7347     // address is negative, then the instruction is incorrectly treated as
7348     // out-of-bounds even if base + offsets is in bounds. Split vectorized
7349     // loads here to avoid emitting ds_read2_b32. We may re-combine the
7350     // load later in the SILoadStoreOptimizer.
7351     if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
7352         NumElements == 2 && MemVT.getStoreSize() == 8 &&
7353         Load->getAlignment() < 8) {
7354       return SplitVectorLoad(Op, DAG);
7355     }
7356   }
7357   return SDValue();
7358 }
7359 
7360 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
7361   EVT VT = Op.getValueType();
7362   assert(VT.getSizeInBits() == 64);
7363 
7364   SDLoc DL(Op);
7365   SDValue Cond = Op.getOperand(0);
7366 
7367   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
7368   SDValue One = DAG.getConstant(1, DL, MVT::i32);
7369 
7370   SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
7371   SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
7372 
7373   SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
7374   SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
7375 
7376   SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
7377 
7378   SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
7379   SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
7380 
7381   SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
7382 
7383   SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
7384   return DAG.getNode(ISD::BITCAST, DL, VT, Res);
7385 }
7386 
7387 // Catch division cases where we can use shortcuts with rcp and rsq
7388 // instructions.
7389 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
7390                                               SelectionDAG &DAG) const {
7391   SDLoc SL(Op);
7392   SDValue LHS = Op.getOperand(0);
7393   SDValue RHS = Op.getOperand(1);
7394   EVT VT = Op.getValueType();
7395   const SDNodeFlags Flags = Op->getFlags();
7396   bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || Flags.hasAllowReciprocal();
7397 
7398   if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
7399     return SDValue();
7400 
7401   if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
7402     if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
7403       if (CLHS->isExactlyValue(1.0)) {
7404         // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
7405         // the CI documentation has a worst case error of 1 ulp.
7406         // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
7407         // use it as long as we aren't trying to use denormals.
7408         //
7409         // v_rcp_f16 and v_rsq_f16 DO support denormals.
7410 
7411         // 1.0 / sqrt(x) -> rsq(x)
7412 
7413         // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
7414         // error seems really high at 2^29 ULP.
7415         if (RHS.getOpcode() == ISD::FSQRT)
7416           return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
7417 
7418         // 1.0 / x -> rcp(x)
7419         return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
7420       }
7421 
7422       // Same as for 1.0, but expand the sign out of the constant.
7423       if (CLHS->isExactlyValue(-1.0)) {
7424         // -1.0 / x -> rcp (fneg x)
7425         SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
7426         return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
7427       }
7428     }
7429   }
7430 
7431   if (Unsafe) {
7432     // Turn into multiply by the reciprocal.
7433     // x / y -> x * (1.0 / y)
7434     SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
7435     return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
7436   }
7437 
7438   return SDValue();
7439 }
7440 
7441 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
7442                           EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
7443   if (GlueChain->getNumValues() <= 1) {
7444     return DAG.getNode(Opcode, SL, VT, A, B);
7445   }
7446 
7447   assert(GlueChain->getNumValues() == 3);
7448 
7449   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
7450   switch (Opcode) {
7451   default: llvm_unreachable("no chain equivalent for opcode");
7452   case ISD::FMUL:
7453     Opcode = AMDGPUISD::FMUL_W_CHAIN;
7454     break;
7455   }
7456 
7457   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
7458                      GlueChain.getValue(2));
7459 }
7460 
7461 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
7462                            EVT VT, SDValue A, SDValue B, SDValue C,
7463                            SDValue GlueChain) {
7464   if (GlueChain->getNumValues() <= 1) {
7465     return DAG.getNode(Opcode, SL, VT, A, B, C);
7466   }
7467 
7468   assert(GlueChain->getNumValues() == 3);
7469 
7470   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
7471   switch (Opcode) {
7472   default: llvm_unreachable("no chain equivalent for opcode");
7473   case ISD::FMA:
7474     Opcode = AMDGPUISD::FMA_W_CHAIN;
7475     break;
7476   }
7477 
7478   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
7479                      GlueChain.getValue(2));
7480 }
7481 
7482 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
7483   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
7484     return FastLowered;
7485 
7486   SDLoc SL(Op);
7487   SDValue Src0 = Op.getOperand(0);
7488   SDValue Src1 = Op.getOperand(1);
7489 
7490   SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
7491   SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
7492 
7493   SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
7494   SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
7495 
7496   SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
7497   SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
7498 
7499   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
7500 }
7501 
7502 // Faster 2.5 ULP division that does not support denormals.
7503 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
7504   SDLoc SL(Op);
7505   SDValue LHS = Op.getOperand(1);
7506   SDValue RHS = Op.getOperand(2);
7507 
7508   SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
7509 
7510   const APFloat K0Val(BitsToFloat(0x6f800000));
7511   const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
7512 
7513   const APFloat K1Val(BitsToFloat(0x2f800000));
7514   const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
7515 
7516   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
7517 
7518   EVT SetCCVT =
7519     getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
7520 
7521   SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
7522 
7523   SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
7524 
7525   // TODO: Should this propagate fast-math-flags?
7526   r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
7527 
7528   // rcp does not support denormals.
7529   SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
7530 
7531   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
7532 
7533   return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
7534 }
7535 
7536 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
7537   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
7538     return FastLowered;
7539 
7540   SDLoc SL(Op);
7541   SDValue LHS = Op.getOperand(0);
7542   SDValue RHS = Op.getOperand(1);
7543 
7544   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
7545 
7546   SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
7547 
7548   SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7549                                           RHS, RHS, LHS);
7550   SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7551                                         LHS, RHS, LHS);
7552 
7553   // Denominator is scaled to not be denormal, so using rcp is ok.
7554   SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
7555                                   DenominatorScaled);
7556   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
7557                                      DenominatorScaled);
7558 
7559   const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
7560                                (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
7561                                (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
7562 
7563   const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
7564 
7565   if (!Subtarget->hasFP32Denormals()) {
7566     SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
7567     const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
7568                                                       SL, MVT::i32);
7569     SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
7570                                        DAG.getEntryNode(),
7571                                        EnableDenormValue, BitField);
7572     SDValue Ops[3] = {
7573       NegDivScale0,
7574       EnableDenorm.getValue(0),
7575       EnableDenorm.getValue(1)
7576     };
7577 
7578     NegDivScale0 = DAG.getMergeValues(Ops, SL);
7579   }
7580 
7581   SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
7582                              ApproxRcp, One, NegDivScale0);
7583 
7584   SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
7585                              ApproxRcp, Fma0);
7586 
7587   SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
7588                            Fma1, Fma1);
7589 
7590   SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
7591                              NumeratorScaled, Mul);
7592 
7593   SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
7594 
7595   SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
7596                              NumeratorScaled, Fma3);
7597 
7598   if (!Subtarget->hasFP32Denormals()) {
7599     const SDValue DisableDenormValue =
7600         DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
7601     SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
7602                                         Fma4.getValue(1),
7603                                         DisableDenormValue,
7604                                         BitField,
7605                                         Fma4.getValue(2));
7606 
7607     SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
7608                                       DisableDenorm, DAG.getRoot());
7609     DAG.setRoot(OutputChain);
7610   }
7611 
7612   SDValue Scale = NumeratorScaled.getValue(1);
7613   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
7614                              Fma4, Fma1, Fma3, Scale);
7615 
7616   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
7617 }
7618 
7619 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
7620   if (DAG.getTarget().Options.UnsafeFPMath)
7621     return lowerFastUnsafeFDIV(Op, DAG);
7622 
7623   SDLoc SL(Op);
7624   SDValue X = Op.getOperand(0);
7625   SDValue Y = Op.getOperand(1);
7626 
7627   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
7628 
7629   SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
7630 
7631   SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
7632 
7633   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
7634 
7635   SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
7636 
7637   SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
7638 
7639   SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
7640 
7641   SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
7642 
7643   SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
7644 
7645   SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
7646   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
7647 
7648   SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
7649                              NegDivScale0, Mul, DivScale1);
7650 
7651   SDValue Scale;
7652 
7653   if (!Subtarget->hasUsableDivScaleConditionOutput()) {
7654     // Workaround a hardware bug on SI where the condition output from div_scale
7655     // is not usable.
7656 
7657     const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
7658 
7659     // Figure out if the scale to use for div_fmas.
7660     SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
7661     SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
7662     SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
7663     SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
7664 
7665     SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
7666     SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
7667 
7668     SDValue Scale0Hi
7669       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
7670     SDValue Scale1Hi
7671       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
7672 
7673     SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
7674     SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
7675     Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
7676   } else {
7677     Scale = DivScale1.getValue(1);
7678   }
7679 
7680   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
7681                              Fma4, Fma3, Mul, Scale);
7682 
7683   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
7684 }
7685 
7686 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
7687   EVT VT = Op.getValueType();
7688 
7689   if (VT == MVT::f32)
7690     return LowerFDIV32(Op, DAG);
7691 
7692   if (VT == MVT::f64)
7693     return LowerFDIV64(Op, DAG);
7694 
7695   if (VT == MVT::f16)
7696     return LowerFDIV16(Op, DAG);
7697 
7698   llvm_unreachable("Unexpected type for fdiv");
7699 }
7700 
7701 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7702   SDLoc DL(Op);
7703   StoreSDNode *Store = cast<StoreSDNode>(Op);
7704   EVT VT = Store->getMemoryVT();
7705 
7706   if (VT == MVT::i1) {
7707     return DAG.getTruncStore(Store->getChain(), DL,
7708        DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
7709        Store->getBasePtr(), MVT::i1, Store->getMemOperand());
7710   }
7711 
7712   assert(VT.isVector() &&
7713          Store->getValue().getValueType().getScalarType() == MVT::i32);
7714 
7715   if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
7716                           *Store->getMemOperand())) {
7717     return expandUnalignedStore(Store, DAG);
7718   }
7719 
7720   unsigned AS = Store->getAddressSpace();
7721   if (Subtarget->hasLDSMisalignedBug() &&
7722       AS == AMDGPUAS::FLAT_ADDRESS &&
7723       Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) {
7724     return SplitVectorStore(Op, DAG);
7725   }
7726 
7727   MachineFunction &MF = DAG.getMachineFunction();
7728   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
7729   // If there is a possibilty that flat instruction access scratch memory
7730   // then we need to use the same legalization rules we use for private.
7731   if (AS == AMDGPUAS::FLAT_ADDRESS)
7732     AS = MFI->hasFlatScratchInit() ?
7733          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
7734 
7735   unsigned NumElements = VT.getVectorNumElements();
7736   if (AS == AMDGPUAS::GLOBAL_ADDRESS ||
7737       AS == AMDGPUAS::FLAT_ADDRESS) {
7738     if (NumElements > 4)
7739       return SplitVectorStore(Op, DAG);
7740     // v3 stores not supported on SI.
7741     if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7742       return SplitVectorStore(Op, DAG);
7743     return SDValue();
7744   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
7745     switch (Subtarget->getMaxPrivateElementSize()) {
7746     case 4:
7747       return scalarizeVectorStore(Store, DAG);
7748     case 8:
7749       if (NumElements > 2)
7750         return SplitVectorStore(Op, DAG);
7751       return SDValue();
7752     case 16:
7753       if (NumElements > 4 || NumElements == 3)
7754         return SplitVectorStore(Op, DAG);
7755       return SDValue();
7756     default:
7757       llvm_unreachable("unsupported private_element_size");
7758     }
7759   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
7760     // Use ds_write_b128 if possible.
7761     if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
7762         VT.getStoreSize() == 16 && NumElements != 3)
7763       return SDValue();
7764 
7765     if (NumElements > 2)
7766       return SplitVectorStore(Op, DAG);
7767 
7768     // SI has a hardware bug in the LDS / GDS boounds checking: if the base
7769     // address is negative, then the instruction is incorrectly treated as
7770     // out-of-bounds even if base + offsets is in bounds. Split vectorized
7771     // stores here to avoid emitting ds_write2_b32. We may re-combine the
7772     // store later in the SILoadStoreOptimizer.
7773     if (!Subtarget->hasUsableDSOffset() &&
7774         NumElements == 2 && VT.getStoreSize() == 8 &&
7775         Store->getAlignment() < 8) {
7776       return SplitVectorStore(Op, DAG);
7777     }
7778 
7779     return SDValue();
7780   } else {
7781     llvm_unreachable("unhandled address space");
7782   }
7783 }
7784 
7785 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
7786   SDLoc DL(Op);
7787   EVT VT = Op.getValueType();
7788   SDValue Arg = Op.getOperand(0);
7789   SDValue TrigVal;
7790 
7791   // TODO: Should this propagate fast-math-flags?
7792 
7793   SDValue OneOver2Pi = DAG.getConstantFP(0.5 / M_PI, DL, VT);
7794 
7795   if (Subtarget->hasTrigReducedRange()) {
7796     SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7797     TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal);
7798   } else {
7799     TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7800   }
7801 
7802   switch (Op.getOpcode()) {
7803   case ISD::FCOS:
7804     return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal);
7805   case ISD::FSIN:
7806     return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal);
7807   default:
7808     llvm_unreachable("Wrong trig opcode");
7809   }
7810 }
7811 
7812 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
7813   AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
7814   assert(AtomicNode->isCompareAndSwap());
7815   unsigned AS = AtomicNode->getAddressSpace();
7816 
7817   // No custom lowering required for local address space
7818   if (!isFlatGlobalAddrSpace(AS))
7819     return Op;
7820 
7821   // Non-local address space requires custom lowering for atomic compare
7822   // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
7823   SDLoc DL(Op);
7824   SDValue ChainIn = Op.getOperand(0);
7825   SDValue Addr = Op.getOperand(1);
7826   SDValue Old = Op.getOperand(2);
7827   SDValue New = Op.getOperand(3);
7828   EVT VT = Op.getValueType();
7829   MVT SimpleVT = VT.getSimpleVT();
7830   MVT VecType = MVT::getVectorVT(SimpleVT, 2);
7831 
7832   SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
7833   SDValue Ops[] = { ChainIn, Addr, NewOld };
7834 
7835   return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
7836                                  Ops, VT, AtomicNode->getMemOperand());
7837 }
7838 
7839 //===----------------------------------------------------------------------===//
7840 // Custom DAG optimizations
7841 //===----------------------------------------------------------------------===//
7842 
7843 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
7844                                                      DAGCombinerInfo &DCI) const {
7845   EVT VT = N->getValueType(0);
7846   EVT ScalarVT = VT.getScalarType();
7847   if (ScalarVT != MVT::f32)
7848     return SDValue();
7849 
7850   SelectionDAG &DAG = DCI.DAG;
7851   SDLoc DL(N);
7852 
7853   SDValue Src = N->getOperand(0);
7854   EVT SrcVT = Src.getValueType();
7855 
7856   // TODO: We could try to match extracting the higher bytes, which would be
7857   // easier if i8 vectors weren't promoted to i32 vectors, particularly after
7858   // types are legalized. v4i8 -> v4f32 is probably the only case to worry
7859   // about in practice.
7860   if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
7861     if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
7862       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
7863       DCI.AddToWorklist(Cvt.getNode());
7864       return Cvt;
7865     }
7866   }
7867 
7868   return SDValue();
7869 }
7870 
7871 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
7872 
7873 // This is a variant of
7874 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
7875 //
7876 // The normal DAG combiner will do this, but only if the add has one use since
7877 // that would increase the number of instructions.
7878 //
7879 // This prevents us from seeing a constant offset that can be folded into a
7880 // memory instruction's addressing mode. If we know the resulting add offset of
7881 // a pointer can be folded into an addressing offset, we can replace the pointer
7882 // operand with the add of new constant offset. This eliminates one of the uses,
7883 // and may allow the remaining use to also be simplified.
7884 //
7885 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
7886                                                unsigned AddrSpace,
7887                                                EVT MemVT,
7888                                                DAGCombinerInfo &DCI) const {
7889   SDValue N0 = N->getOperand(0);
7890   SDValue N1 = N->getOperand(1);
7891 
7892   // We only do this to handle cases where it's profitable when there are
7893   // multiple uses of the add, so defer to the standard combine.
7894   if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
7895       N0->hasOneUse())
7896     return SDValue();
7897 
7898   const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
7899   if (!CN1)
7900     return SDValue();
7901 
7902   const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
7903   if (!CAdd)
7904     return SDValue();
7905 
7906   // If the resulting offset is too large, we can't fold it into the addressing
7907   // mode offset.
7908   APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
7909   Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
7910 
7911   AddrMode AM;
7912   AM.HasBaseReg = true;
7913   AM.BaseOffs = Offset.getSExtValue();
7914   if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
7915     return SDValue();
7916 
7917   SelectionDAG &DAG = DCI.DAG;
7918   SDLoc SL(N);
7919   EVT VT = N->getValueType(0);
7920 
7921   SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
7922   SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
7923 
7924   SDNodeFlags Flags;
7925   Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
7926                           (N0.getOpcode() == ISD::OR ||
7927                            N0->getFlags().hasNoUnsignedWrap()));
7928 
7929   return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
7930 }
7931 
7932 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
7933                                                   DAGCombinerInfo &DCI) const {
7934   SDValue Ptr = N->getBasePtr();
7935   SelectionDAG &DAG = DCI.DAG;
7936   SDLoc SL(N);
7937 
7938   // TODO: We could also do this for multiplies.
7939   if (Ptr.getOpcode() == ISD::SHL) {
7940     SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(),  N->getAddressSpace(),
7941                                           N->getMemoryVT(), DCI);
7942     if (NewPtr) {
7943       SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
7944 
7945       NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
7946       return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
7947     }
7948   }
7949 
7950   return SDValue();
7951 }
7952 
7953 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
7954   return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
7955          (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
7956          (Opc == ISD::XOR && Val == 0);
7957 }
7958 
7959 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
7960 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
7961 // integer combine opportunities since most 64-bit operations are decomposed
7962 // this way.  TODO: We won't want this for SALU especially if it is an inline
7963 // immediate.
7964 SDValue SITargetLowering::splitBinaryBitConstantOp(
7965   DAGCombinerInfo &DCI,
7966   const SDLoc &SL,
7967   unsigned Opc, SDValue LHS,
7968   const ConstantSDNode *CRHS) const {
7969   uint64_t Val = CRHS->getZExtValue();
7970   uint32_t ValLo = Lo_32(Val);
7971   uint32_t ValHi = Hi_32(Val);
7972   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7973 
7974     if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
7975          bitOpWithConstantIsReducible(Opc, ValHi)) ||
7976         (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
7977     // If we need to materialize a 64-bit immediate, it will be split up later
7978     // anyway. Avoid creating the harder to understand 64-bit immediate
7979     // materialization.
7980     return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
7981   }
7982 
7983   return SDValue();
7984 }
7985 
7986 // Returns true if argument is a boolean value which is not serialized into
7987 // memory or argument and does not require v_cmdmask_b32 to be deserialized.
7988 static bool isBoolSGPR(SDValue V) {
7989   if (V.getValueType() != MVT::i1)
7990     return false;
7991   switch (V.getOpcode()) {
7992   default: break;
7993   case ISD::SETCC:
7994   case ISD::AND:
7995   case ISD::OR:
7996   case ISD::XOR:
7997   case AMDGPUISD::FP_CLASS:
7998     return true;
7999   }
8000   return false;
8001 }
8002 
8003 // If a constant has all zeroes or all ones within each byte return it.
8004 // Otherwise return 0.
8005 static uint32_t getConstantPermuteMask(uint32_t C) {
8006   // 0xff for any zero byte in the mask
8007   uint32_t ZeroByteMask = 0;
8008   if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
8009   if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
8010   if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
8011   if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
8012   uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte
8013   if ((NonZeroByteMask & C) != NonZeroByteMask)
8014     return 0; // Partial bytes selected.
8015   return C;
8016 }
8017 
8018 // Check if a node selects whole bytes from its operand 0 starting at a byte
8019 // boundary while masking the rest. Returns select mask as in the v_perm_b32
8020 // or -1 if not succeeded.
8021 // Note byte select encoding:
8022 // value 0-3 selects corresponding source byte;
8023 // value 0xc selects zero;
8024 // value 0xff selects 0xff.
8025 static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
8026   assert(V.getValueSizeInBits() == 32);
8027 
8028   if (V.getNumOperands() != 2)
8029     return ~0;
8030 
8031   ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
8032   if (!N1)
8033     return ~0;
8034 
8035   uint32_t C = N1->getZExtValue();
8036 
8037   switch (V.getOpcode()) {
8038   default:
8039     break;
8040   case ISD::AND:
8041     if (uint32_t ConstMask = getConstantPermuteMask(C)) {
8042       return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
8043     }
8044     break;
8045 
8046   case ISD::OR:
8047     if (uint32_t ConstMask = getConstantPermuteMask(C)) {
8048       return (0x03020100 & ~ConstMask) | ConstMask;
8049     }
8050     break;
8051 
8052   case ISD::SHL:
8053     if (C % 8)
8054       return ~0;
8055 
8056     return uint32_t((0x030201000c0c0c0cull << C) >> 32);
8057 
8058   case ISD::SRL:
8059     if (C % 8)
8060       return ~0;
8061 
8062     return uint32_t(0x0c0c0c0c03020100ull >> C);
8063   }
8064 
8065   return ~0;
8066 }
8067 
8068 SDValue SITargetLowering::performAndCombine(SDNode *N,
8069                                             DAGCombinerInfo &DCI) const {
8070   if (DCI.isBeforeLegalize())
8071     return SDValue();
8072 
8073   SelectionDAG &DAG = DCI.DAG;
8074   EVT VT = N->getValueType(0);
8075   SDValue LHS = N->getOperand(0);
8076   SDValue RHS = N->getOperand(1);
8077 
8078 
8079   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
8080   if (VT == MVT::i64 && CRHS) {
8081     if (SDValue Split
8082         = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
8083       return Split;
8084   }
8085 
8086   if (CRHS && VT == MVT::i32) {
8087     // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
8088     // nb = number of trailing zeroes in mask
8089     // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
8090     // given that we are selecting 8 or 16 bit fields starting at byte boundary.
8091     uint64_t Mask = CRHS->getZExtValue();
8092     unsigned Bits = countPopulation(Mask);
8093     if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
8094         (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
8095       if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
8096         unsigned Shift = CShift->getZExtValue();
8097         unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
8098         unsigned Offset = NB + Shift;
8099         if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
8100           SDLoc SL(N);
8101           SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
8102                                     LHS->getOperand(0),
8103                                     DAG.getConstant(Offset, SL, MVT::i32),
8104                                     DAG.getConstant(Bits, SL, MVT::i32));
8105           EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
8106           SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
8107                                     DAG.getValueType(NarrowVT));
8108           SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
8109                                     DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
8110           return Shl;
8111         }
8112       }
8113     }
8114 
8115     // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
8116     if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
8117         isa<ConstantSDNode>(LHS.getOperand(2))) {
8118       uint32_t Sel = getConstantPermuteMask(Mask);
8119       if (!Sel)
8120         return SDValue();
8121 
8122       // Select 0xc for all zero bytes
8123       Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
8124       SDLoc DL(N);
8125       return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
8126                          LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
8127     }
8128   }
8129 
8130   // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
8131   // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
8132   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
8133     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8134     ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
8135 
8136     SDValue X = LHS.getOperand(0);
8137     SDValue Y = RHS.getOperand(0);
8138     if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
8139       return SDValue();
8140 
8141     if (LCC == ISD::SETO) {
8142       if (X != LHS.getOperand(1))
8143         return SDValue();
8144 
8145       if (RCC == ISD::SETUNE) {
8146         const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
8147         if (!C1 || !C1->isInfinity() || C1->isNegative())
8148           return SDValue();
8149 
8150         const uint32_t Mask = SIInstrFlags::N_NORMAL |
8151                               SIInstrFlags::N_SUBNORMAL |
8152                               SIInstrFlags::N_ZERO |
8153                               SIInstrFlags::P_ZERO |
8154                               SIInstrFlags::P_SUBNORMAL |
8155                               SIInstrFlags::P_NORMAL;
8156 
8157         static_assert(((~(SIInstrFlags::S_NAN |
8158                           SIInstrFlags::Q_NAN |
8159                           SIInstrFlags::N_INFINITY |
8160                           SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
8161                       "mask not equal");
8162 
8163         SDLoc DL(N);
8164         return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
8165                            X, DAG.getConstant(Mask, DL, MVT::i32));
8166       }
8167     }
8168   }
8169 
8170   if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS)
8171     std::swap(LHS, RHS);
8172 
8173   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS &&
8174       RHS.hasOneUse()) {
8175     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8176     // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan)
8177     // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan)
8178     const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8179     if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask &&
8180         (RHS.getOperand(0) == LHS.getOperand(0) &&
8181          LHS.getOperand(0) == LHS.getOperand(1))) {
8182       const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN;
8183       unsigned NewMask = LCC == ISD::SETO ?
8184         Mask->getZExtValue() & ~OrdMask :
8185         Mask->getZExtValue() & OrdMask;
8186 
8187       SDLoc DL(N);
8188       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0),
8189                          DAG.getConstant(NewMask, DL, MVT::i32));
8190     }
8191   }
8192 
8193   if (VT == MVT::i32 &&
8194       (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
8195     // and x, (sext cc from i1) => select cc, x, 0
8196     if (RHS.getOpcode() != ISD::SIGN_EXTEND)
8197       std::swap(LHS, RHS);
8198     if (isBoolSGPR(RHS.getOperand(0)))
8199       return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
8200                            LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
8201   }
8202 
8203   // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
8204   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8205   if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
8206       N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
8207     uint32_t LHSMask = getPermuteMask(DAG, LHS);
8208     uint32_t RHSMask = getPermuteMask(DAG, RHS);
8209     if (LHSMask != ~0u && RHSMask != ~0u) {
8210       // Canonicalize the expression in an attempt to have fewer unique masks
8211       // and therefore fewer registers used to hold the masks.
8212       if (LHSMask > RHSMask) {
8213         std::swap(LHSMask, RHSMask);
8214         std::swap(LHS, RHS);
8215       }
8216 
8217       // Select 0xc for each lane used from source operand. Zero has 0xc mask
8218       // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
8219       uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8220       uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8221 
8222       // Check of we need to combine values from two sources within a byte.
8223       if (!(LHSUsedLanes & RHSUsedLanes) &&
8224           // If we select high and lower word keep it for SDWA.
8225           // TODO: teach SDWA to work with v_perm_b32 and remove the check.
8226           !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
8227         // Each byte in each mask is either selector mask 0-3, or has higher
8228         // bits set in either of masks, which can be 0xff for 0xff or 0x0c for
8229         // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise
8230         // mask which is not 0xff wins. By anding both masks we have a correct
8231         // result except that 0x0c shall be corrected to give 0x0c only.
8232         uint32_t Mask = LHSMask & RHSMask;
8233         for (unsigned I = 0; I < 32; I += 8) {
8234           uint32_t ByteSel = 0xff << I;
8235           if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
8236             Mask &= (0x0c << I) & 0xffffffff;
8237         }
8238 
8239         // Add 4 to each active LHS lane. It will not affect any existing 0xff
8240         // or 0x0c.
8241         uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
8242         SDLoc DL(N);
8243 
8244         return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
8245                            LHS.getOperand(0), RHS.getOperand(0),
8246                            DAG.getConstant(Sel, DL, MVT::i32));
8247       }
8248     }
8249   }
8250 
8251   return SDValue();
8252 }
8253 
8254 SDValue SITargetLowering::performOrCombine(SDNode *N,
8255                                            DAGCombinerInfo &DCI) const {
8256   SelectionDAG &DAG = DCI.DAG;
8257   SDValue LHS = N->getOperand(0);
8258   SDValue RHS = N->getOperand(1);
8259 
8260   EVT VT = N->getValueType(0);
8261   if (VT == MVT::i1) {
8262     // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
8263     if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
8264         RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
8265       SDValue Src = LHS.getOperand(0);
8266       if (Src != RHS.getOperand(0))
8267         return SDValue();
8268 
8269       const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
8270       const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8271       if (!CLHS || !CRHS)
8272         return SDValue();
8273 
8274       // Only 10 bits are used.
8275       static const uint32_t MaxMask = 0x3ff;
8276 
8277       uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
8278       SDLoc DL(N);
8279       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
8280                          Src, DAG.getConstant(NewMask, DL, MVT::i32));
8281     }
8282 
8283     return SDValue();
8284   }
8285 
8286   // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
8287   if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
8288       LHS.getOpcode() == AMDGPUISD::PERM &&
8289       isa<ConstantSDNode>(LHS.getOperand(2))) {
8290     uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
8291     if (!Sel)
8292       return SDValue();
8293 
8294     Sel |= LHS.getConstantOperandVal(2);
8295     SDLoc DL(N);
8296     return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
8297                        LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
8298   }
8299 
8300   // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
8301   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8302   if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
8303       N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
8304     uint32_t LHSMask = getPermuteMask(DAG, LHS);
8305     uint32_t RHSMask = getPermuteMask(DAG, RHS);
8306     if (LHSMask != ~0u && RHSMask != ~0u) {
8307       // Canonicalize the expression in an attempt to have fewer unique masks
8308       // and therefore fewer registers used to hold the masks.
8309       if (LHSMask > RHSMask) {
8310         std::swap(LHSMask, RHSMask);
8311         std::swap(LHS, RHS);
8312       }
8313 
8314       // Select 0xc for each lane used from source operand. Zero has 0xc mask
8315       // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
8316       uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8317       uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8318 
8319       // Check of we need to combine values from two sources within a byte.
8320       if (!(LHSUsedLanes & RHSUsedLanes) &&
8321           // If we select high and lower word keep it for SDWA.
8322           // TODO: teach SDWA to work with v_perm_b32 and remove the check.
8323           !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
8324         // Kill zero bytes selected by other mask. Zero value is 0xc.
8325         LHSMask &= ~RHSUsedLanes;
8326         RHSMask &= ~LHSUsedLanes;
8327         // Add 4 to each active LHS lane
8328         LHSMask |= LHSUsedLanes & 0x04040404;
8329         // Combine masks
8330         uint32_t Sel = LHSMask | RHSMask;
8331         SDLoc DL(N);
8332 
8333         return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
8334                            LHS.getOperand(0), RHS.getOperand(0),
8335                            DAG.getConstant(Sel, DL, MVT::i32));
8336       }
8337     }
8338   }
8339 
8340   if (VT != MVT::i64)
8341     return SDValue();
8342 
8343   // TODO: This could be a generic combine with a predicate for extracting the
8344   // high half of an integer being free.
8345 
8346   // (or i64:x, (zero_extend i32:y)) ->
8347   //   i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
8348   if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
8349       RHS.getOpcode() != ISD::ZERO_EXTEND)
8350     std::swap(LHS, RHS);
8351 
8352   if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
8353     SDValue ExtSrc = RHS.getOperand(0);
8354     EVT SrcVT = ExtSrc.getValueType();
8355     if (SrcVT == MVT::i32) {
8356       SDLoc SL(N);
8357       SDValue LowLHS, HiBits;
8358       std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
8359       SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
8360 
8361       DCI.AddToWorklist(LowOr.getNode());
8362       DCI.AddToWorklist(HiBits.getNode());
8363 
8364       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
8365                                 LowOr, HiBits);
8366       return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
8367     }
8368   }
8369 
8370   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
8371   if (CRHS) {
8372     if (SDValue Split
8373           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
8374       return Split;
8375   }
8376 
8377   return SDValue();
8378 }
8379 
8380 SDValue SITargetLowering::performXorCombine(SDNode *N,
8381                                             DAGCombinerInfo &DCI) const {
8382   EVT VT = N->getValueType(0);
8383   if (VT != MVT::i64)
8384     return SDValue();
8385 
8386   SDValue LHS = N->getOperand(0);
8387   SDValue RHS = N->getOperand(1);
8388 
8389   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
8390   if (CRHS) {
8391     if (SDValue Split
8392           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
8393       return Split;
8394   }
8395 
8396   return SDValue();
8397 }
8398 
8399 // Instructions that will be lowered with a final instruction that zeros the
8400 // high result bits.
8401 // XXX - probably only need to list legal operations.
8402 static bool fp16SrcZerosHighBits(unsigned Opc) {
8403   switch (Opc) {
8404   case ISD::FADD:
8405   case ISD::FSUB:
8406   case ISD::FMUL:
8407   case ISD::FDIV:
8408   case ISD::FREM:
8409   case ISD::FMA:
8410   case ISD::FMAD:
8411   case ISD::FCANONICALIZE:
8412   case ISD::FP_ROUND:
8413   case ISD::UINT_TO_FP:
8414   case ISD::SINT_TO_FP:
8415   case ISD::FABS:
8416     // Fabs is lowered to a bit operation, but it's an and which will clear the
8417     // high bits anyway.
8418   case ISD::FSQRT:
8419   case ISD::FSIN:
8420   case ISD::FCOS:
8421   case ISD::FPOWI:
8422   case ISD::FPOW:
8423   case ISD::FLOG:
8424   case ISD::FLOG2:
8425   case ISD::FLOG10:
8426   case ISD::FEXP:
8427   case ISD::FEXP2:
8428   case ISD::FCEIL:
8429   case ISD::FTRUNC:
8430   case ISD::FRINT:
8431   case ISD::FNEARBYINT:
8432   case ISD::FROUND:
8433   case ISD::FFLOOR:
8434   case ISD::FMINNUM:
8435   case ISD::FMAXNUM:
8436   case AMDGPUISD::FRACT:
8437   case AMDGPUISD::CLAMP:
8438   case AMDGPUISD::COS_HW:
8439   case AMDGPUISD::SIN_HW:
8440   case AMDGPUISD::FMIN3:
8441   case AMDGPUISD::FMAX3:
8442   case AMDGPUISD::FMED3:
8443   case AMDGPUISD::FMAD_FTZ:
8444   case AMDGPUISD::RCP:
8445   case AMDGPUISD::RSQ:
8446   case AMDGPUISD::RCP_IFLAG:
8447   case AMDGPUISD::LDEXP:
8448     return true;
8449   default:
8450     // fcopysign, select and others may be lowered to 32-bit bit operations
8451     // which don't zero the high bits.
8452     return false;
8453   }
8454 }
8455 
8456 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
8457                                                    DAGCombinerInfo &DCI) const {
8458   if (!Subtarget->has16BitInsts() ||
8459       DCI.getDAGCombineLevel() < AfterLegalizeDAG)
8460     return SDValue();
8461 
8462   EVT VT = N->getValueType(0);
8463   if (VT != MVT::i32)
8464     return SDValue();
8465 
8466   SDValue Src = N->getOperand(0);
8467   if (Src.getValueType() != MVT::i16)
8468     return SDValue();
8469 
8470   // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
8471   // FIXME: It is not universally true that the high bits are zeroed on gfx9.
8472   if (Src.getOpcode() == ISD::BITCAST) {
8473     SDValue BCSrc = Src.getOperand(0);
8474     if (BCSrc.getValueType() == MVT::f16 &&
8475         fp16SrcZerosHighBits(BCSrc.getOpcode()))
8476       return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
8477   }
8478 
8479   return SDValue();
8480 }
8481 
8482 SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N,
8483                                                         DAGCombinerInfo &DCI)
8484                                                         const {
8485   SDValue Src = N->getOperand(0);
8486   auto *VTSign = cast<VTSDNode>(N->getOperand(1));
8487 
8488   if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE &&
8489       VTSign->getVT() == MVT::i8) ||
8490       (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT &&
8491       VTSign->getVT() == MVT::i16)) &&
8492       Src.hasOneUse()) {
8493     auto *M = cast<MemSDNode>(Src);
8494     SDValue Ops[] = {
8495       Src.getOperand(0), // Chain
8496       Src.getOperand(1), // rsrc
8497       Src.getOperand(2), // vindex
8498       Src.getOperand(3), // voffset
8499       Src.getOperand(4), // soffset
8500       Src.getOperand(5), // offset
8501       Src.getOperand(6),
8502       Src.getOperand(7)
8503     };
8504     // replace with BUFFER_LOAD_BYTE/SHORT
8505     SDVTList ResList = DCI.DAG.getVTList(MVT::i32,
8506                                          Src.getOperand(0).getValueType());
8507     unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ?
8508                    AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT;
8509     SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N),
8510                                                           ResList,
8511                                                           Ops, M->getMemoryVT(),
8512                                                           M->getMemOperand());
8513     return DCI.DAG.getMergeValues({BufferLoadSignExt,
8514                                   BufferLoadSignExt.getValue(1)}, SDLoc(N));
8515   }
8516   return SDValue();
8517 }
8518 
8519 SDValue SITargetLowering::performClassCombine(SDNode *N,
8520                                               DAGCombinerInfo &DCI) const {
8521   SelectionDAG &DAG = DCI.DAG;
8522   SDValue Mask = N->getOperand(1);
8523 
8524   // fp_class x, 0 -> false
8525   if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
8526     if (CMask->isNullValue())
8527       return DAG.getConstant(0, SDLoc(N), MVT::i1);
8528   }
8529 
8530   if (N->getOperand(0).isUndef())
8531     return DAG.getUNDEF(MVT::i1);
8532 
8533   return SDValue();
8534 }
8535 
8536 SDValue SITargetLowering::performRcpCombine(SDNode *N,
8537                                             DAGCombinerInfo &DCI) const {
8538   EVT VT = N->getValueType(0);
8539   SDValue N0 = N->getOperand(0);
8540 
8541   if (N0.isUndef())
8542     return N0;
8543 
8544   if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP ||
8545                          N0.getOpcode() == ISD::SINT_TO_FP)) {
8546     return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0,
8547                            N->getFlags());
8548   }
8549 
8550   return AMDGPUTargetLowering::performRcpCombine(N, DCI);
8551 }
8552 
8553 bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
8554                                        unsigned MaxDepth) const {
8555   unsigned Opcode = Op.getOpcode();
8556   if (Opcode == ISD::FCANONICALIZE)
8557     return true;
8558 
8559   if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8560     auto F = CFP->getValueAPF();
8561     if (F.isNaN() && F.isSignaling())
8562       return false;
8563     return !F.isDenormal() || denormalsEnabledForType(Op.getValueType());
8564   }
8565 
8566   // If source is a result of another standard FP operation it is already in
8567   // canonical form.
8568   if (MaxDepth == 0)
8569     return false;
8570 
8571   switch (Opcode) {
8572   // These will flush denorms if required.
8573   case ISD::FADD:
8574   case ISD::FSUB:
8575   case ISD::FMUL:
8576   case ISD::FCEIL:
8577   case ISD::FFLOOR:
8578   case ISD::FMA:
8579   case ISD::FMAD:
8580   case ISD::FSQRT:
8581   case ISD::FDIV:
8582   case ISD::FREM:
8583   case ISD::FP_ROUND:
8584   case ISD::FP_EXTEND:
8585   case AMDGPUISD::FMUL_LEGACY:
8586   case AMDGPUISD::FMAD_FTZ:
8587   case AMDGPUISD::RCP:
8588   case AMDGPUISD::RSQ:
8589   case AMDGPUISD::RSQ_CLAMP:
8590   case AMDGPUISD::RCP_LEGACY:
8591   case AMDGPUISD::RSQ_LEGACY:
8592   case AMDGPUISD::RCP_IFLAG:
8593   case AMDGPUISD::TRIG_PREOP:
8594   case AMDGPUISD::DIV_SCALE:
8595   case AMDGPUISD::DIV_FMAS:
8596   case AMDGPUISD::DIV_FIXUP:
8597   case AMDGPUISD::FRACT:
8598   case AMDGPUISD::LDEXP:
8599   case AMDGPUISD::CVT_PKRTZ_F16_F32:
8600   case AMDGPUISD::CVT_F32_UBYTE0:
8601   case AMDGPUISD::CVT_F32_UBYTE1:
8602   case AMDGPUISD::CVT_F32_UBYTE2:
8603   case AMDGPUISD::CVT_F32_UBYTE3:
8604     return true;
8605 
8606   // It can/will be lowered or combined as a bit operation.
8607   // Need to check their input recursively to handle.
8608   case ISD::FNEG:
8609   case ISD::FABS:
8610   case ISD::FCOPYSIGN:
8611     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
8612 
8613   case ISD::FSIN:
8614   case ISD::FCOS:
8615   case ISD::FSINCOS:
8616     return Op.getValueType().getScalarType() != MVT::f16;
8617 
8618   case ISD::FMINNUM:
8619   case ISD::FMAXNUM:
8620   case ISD::FMINNUM_IEEE:
8621   case ISD::FMAXNUM_IEEE:
8622   case AMDGPUISD::CLAMP:
8623   case AMDGPUISD::FMED3:
8624   case AMDGPUISD::FMAX3:
8625   case AMDGPUISD::FMIN3: {
8626     // FIXME: Shouldn't treat the generic operations different based these.
8627     // However, we aren't really required to flush the result from
8628     // minnum/maxnum..
8629 
8630     // snans will be quieted, so we only need to worry about denormals.
8631     if (Subtarget->supportsMinMaxDenormModes() ||
8632         denormalsEnabledForType(Op.getValueType()))
8633       return true;
8634 
8635     // Flushing may be required.
8636     // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such
8637     // targets need to check their input recursively.
8638 
8639     // FIXME: Does this apply with clamp? It's implemented with max.
8640     for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) {
8641       if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1))
8642         return false;
8643     }
8644 
8645     return true;
8646   }
8647   case ISD::SELECT: {
8648     return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) &&
8649            isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1);
8650   }
8651   case ISD::BUILD_VECTOR: {
8652     for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
8653       SDValue SrcOp = Op.getOperand(i);
8654       if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1))
8655         return false;
8656     }
8657 
8658     return true;
8659   }
8660   case ISD::EXTRACT_VECTOR_ELT:
8661   case ISD::EXTRACT_SUBVECTOR: {
8662     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
8663   }
8664   case ISD::INSERT_VECTOR_ELT: {
8665     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
8666            isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
8667   }
8668   case ISD::UNDEF:
8669     // Could be anything.
8670     return false;
8671 
8672   case ISD::BITCAST: {
8673     // Hack round the mess we make when legalizing extract_vector_elt
8674     SDValue Src = Op.getOperand(0);
8675     if (Src.getValueType() == MVT::i16 &&
8676         Src.getOpcode() == ISD::TRUNCATE) {
8677       SDValue TruncSrc = Src.getOperand(0);
8678       if (TruncSrc.getValueType() == MVT::i32 &&
8679           TruncSrc.getOpcode() == ISD::BITCAST &&
8680           TruncSrc.getOperand(0).getValueType() == MVT::v2f16) {
8681         return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1);
8682       }
8683     }
8684 
8685     return false;
8686   }
8687   case ISD::INTRINSIC_WO_CHAIN: {
8688     unsigned IntrinsicID
8689       = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8690     // TODO: Handle more intrinsics
8691     switch (IntrinsicID) {
8692     case Intrinsic::amdgcn_cvt_pkrtz:
8693     case Intrinsic::amdgcn_cubeid:
8694     case Intrinsic::amdgcn_frexp_mant:
8695     case Intrinsic::amdgcn_fdot2:
8696       return true;
8697     default:
8698       break;
8699     }
8700 
8701     LLVM_FALLTHROUGH;
8702   }
8703   default:
8704     return denormalsEnabledForType(Op.getValueType()) &&
8705            DAG.isKnownNeverSNaN(Op);
8706   }
8707 
8708   llvm_unreachable("invalid operation");
8709 }
8710 
8711 // Constant fold canonicalize.
8712 SDValue SITargetLowering::getCanonicalConstantFP(
8713   SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const {
8714   // Flush denormals to 0 if not enabled.
8715   if (C.isDenormal() && !denormalsEnabledForType(VT))
8716     return DAG.getConstantFP(0.0, SL, VT);
8717 
8718   if (C.isNaN()) {
8719     APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
8720     if (C.isSignaling()) {
8721       // Quiet a signaling NaN.
8722       // FIXME: Is this supposed to preserve payload bits?
8723       return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8724     }
8725 
8726     // Make sure it is the canonical NaN bitpattern.
8727     //
8728     // TODO: Can we use -1 as the canonical NaN value since it's an inline
8729     // immediate?
8730     if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
8731       return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8732   }
8733 
8734   // Already canonical.
8735   return DAG.getConstantFP(C, SL, VT);
8736 }
8737 
8738 static bool vectorEltWillFoldAway(SDValue Op) {
8739   return Op.isUndef() || isa<ConstantFPSDNode>(Op);
8740 }
8741 
8742 SDValue SITargetLowering::performFCanonicalizeCombine(
8743   SDNode *N,
8744   DAGCombinerInfo &DCI) const {
8745   SelectionDAG &DAG = DCI.DAG;
8746   SDValue N0 = N->getOperand(0);
8747   EVT VT = N->getValueType(0);
8748 
8749   // fcanonicalize undef -> qnan
8750   if (N0.isUndef()) {
8751     APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT));
8752     return DAG.getConstantFP(QNaN, SDLoc(N), VT);
8753   }
8754 
8755   if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) {
8756     EVT VT = N->getValueType(0);
8757     return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF());
8758   }
8759 
8760   // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x),
8761   //                                                   (fcanonicalize k)
8762   //
8763   // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0
8764 
8765   // TODO: This could be better with wider vectors that will be split to v2f16,
8766   // and to consider uses since there aren't that many packed operations.
8767   if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 &&
8768       isTypeLegal(MVT::v2f16)) {
8769     SDLoc SL(N);
8770     SDValue NewElts[2];
8771     SDValue Lo = N0.getOperand(0);
8772     SDValue Hi = N0.getOperand(1);
8773     EVT EltVT = Lo.getValueType();
8774 
8775     if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) {
8776       for (unsigned I = 0; I != 2; ++I) {
8777         SDValue Op = N0.getOperand(I);
8778         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8779           NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT,
8780                                               CFP->getValueAPF());
8781         } else if (Op.isUndef()) {
8782           // Handled below based on what the other operand is.
8783           NewElts[I] = Op;
8784         } else {
8785           NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op);
8786         }
8787       }
8788 
8789       // If one half is undef, and one is constant, perfer a splat vector rather
8790       // than the normal qNaN. If it's a register, prefer 0.0 since that's
8791       // cheaper to use and may be free with a packed operation.
8792       if (NewElts[0].isUndef()) {
8793         if (isa<ConstantFPSDNode>(NewElts[1]))
8794           NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ?
8795             NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT);
8796       }
8797 
8798       if (NewElts[1].isUndef()) {
8799         NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ?
8800           NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT);
8801       }
8802 
8803       return DAG.getBuildVector(VT, SL, NewElts);
8804     }
8805   }
8806 
8807   unsigned SrcOpc = N0.getOpcode();
8808 
8809   // If it's free to do so, push canonicalizes further up the source, which may
8810   // find a canonical source.
8811   //
8812   // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for
8813   // sNaNs.
8814   if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) {
8815     auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
8816     if (CRHS && N0.hasOneUse()) {
8817       SDLoc SL(N);
8818       SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT,
8819                                    N0.getOperand(0));
8820       SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF());
8821       DCI.AddToWorklist(Canon0.getNode());
8822 
8823       return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1);
8824     }
8825   }
8826 
8827   return isCanonicalized(DAG, N0) ? N0 : SDValue();
8828 }
8829 
8830 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
8831   switch (Opc) {
8832   case ISD::FMAXNUM:
8833   case ISD::FMAXNUM_IEEE:
8834     return AMDGPUISD::FMAX3;
8835   case ISD::SMAX:
8836     return AMDGPUISD::SMAX3;
8837   case ISD::UMAX:
8838     return AMDGPUISD::UMAX3;
8839   case ISD::FMINNUM:
8840   case ISD::FMINNUM_IEEE:
8841     return AMDGPUISD::FMIN3;
8842   case ISD::SMIN:
8843     return AMDGPUISD::SMIN3;
8844   case ISD::UMIN:
8845     return AMDGPUISD::UMIN3;
8846   default:
8847     llvm_unreachable("Not a min/max opcode");
8848   }
8849 }
8850 
8851 SDValue SITargetLowering::performIntMed3ImmCombine(
8852   SelectionDAG &DAG, const SDLoc &SL,
8853   SDValue Op0, SDValue Op1, bool Signed) const {
8854   ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
8855   if (!K1)
8856     return SDValue();
8857 
8858   ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
8859   if (!K0)
8860     return SDValue();
8861 
8862   if (Signed) {
8863     if (K0->getAPIntValue().sge(K1->getAPIntValue()))
8864       return SDValue();
8865   } else {
8866     if (K0->getAPIntValue().uge(K1->getAPIntValue()))
8867       return SDValue();
8868   }
8869 
8870   EVT VT = K0->getValueType(0);
8871   unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
8872   if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
8873     return DAG.getNode(Med3Opc, SL, VT,
8874                        Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
8875   }
8876 
8877   // If there isn't a 16-bit med3 operation, convert to 32-bit.
8878   MVT NVT = MVT::i32;
8879   unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
8880 
8881   SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
8882   SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
8883   SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
8884 
8885   SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
8886   return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
8887 }
8888 
8889 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
8890   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
8891     return C;
8892 
8893   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
8894     if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
8895       return C;
8896   }
8897 
8898   return nullptr;
8899 }
8900 
8901 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
8902                                                   const SDLoc &SL,
8903                                                   SDValue Op0,
8904                                                   SDValue Op1) const {
8905   ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
8906   if (!K1)
8907     return SDValue();
8908 
8909   ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
8910   if (!K0)
8911     return SDValue();
8912 
8913   // Ordered >= (although NaN inputs should have folded away by now).
8914   APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
8915   if (Cmp == APFloat::cmpGreaterThan)
8916     return SDValue();
8917 
8918   const MachineFunction &MF = DAG.getMachineFunction();
8919   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
8920 
8921   // TODO: Check IEEE bit enabled?
8922   EVT VT = Op0.getValueType();
8923   if (Info->getMode().DX10Clamp) {
8924     // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
8925     // hardware fmed3 behavior converting to a min.
8926     // FIXME: Should this be allowing -0.0?
8927     if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
8928       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
8929   }
8930 
8931   // med3 for f16 is only available on gfx9+, and not available for v2f16.
8932   if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
8933     // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
8934     // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
8935     // then give the other result, which is different from med3 with a NaN
8936     // input.
8937     SDValue Var = Op0.getOperand(0);
8938     if (!DAG.isKnownNeverSNaN(Var))
8939       return SDValue();
8940 
8941     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8942 
8943     if ((!K0->hasOneUse() ||
8944          TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) &&
8945         (!K1->hasOneUse() ||
8946          TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) {
8947       return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
8948                          Var, SDValue(K0, 0), SDValue(K1, 0));
8949     }
8950   }
8951 
8952   return SDValue();
8953 }
8954 
8955 SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
8956                                                DAGCombinerInfo &DCI) const {
8957   SelectionDAG &DAG = DCI.DAG;
8958 
8959   EVT VT = N->getValueType(0);
8960   unsigned Opc = N->getOpcode();
8961   SDValue Op0 = N->getOperand(0);
8962   SDValue Op1 = N->getOperand(1);
8963 
8964   // Only do this if the inner op has one use since this will just increases
8965   // register pressure for no benefit.
8966 
8967   if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
8968       !VT.isVector() &&
8969       (VT == MVT::i32 || VT == MVT::f32 ||
8970        ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) {
8971     // max(max(a, b), c) -> max3(a, b, c)
8972     // min(min(a, b), c) -> min3(a, b, c)
8973     if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
8974       SDLoc DL(N);
8975       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
8976                          DL,
8977                          N->getValueType(0),
8978                          Op0.getOperand(0),
8979                          Op0.getOperand(1),
8980                          Op1);
8981     }
8982 
8983     // Try commuted.
8984     // max(a, max(b, c)) -> max3(a, b, c)
8985     // min(a, min(b, c)) -> min3(a, b, c)
8986     if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
8987       SDLoc DL(N);
8988       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
8989                          DL,
8990                          N->getValueType(0),
8991                          Op0,
8992                          Op1.getOperand(0),
8993                          Op1.getOperand(1));
8994     }
8995   }
8996 
8997   // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
8998   if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
8999     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
9000       return Med3;
9001   }
9002 
9003   if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
9004     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
9005       return Med3;
9006   }
9007 
9008   // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
9009   if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
9010        (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) ||
9011        (Opc == AMDGPUISD::FMIN_LEGACY &&
9012         Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
9013       (VT == MVT::f32 || VT == MVT::f64 ||
9014        (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
9015        (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
9016       Op0.hasOneUse()) {
9017     if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
9018       return Res;
9019   }
9020 
9021   return SDValue();
9022 }
9023 
9024 static bool isClampZeroToOne(SDValue A, SDValue B) {
9025   if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
9026     if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
9027       // FIXME: Should this be allowing -0.0?
9028       return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
9029              (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
9030     }
9031   }
9032 
9033   return false;
9034 }
9035 
9036 // FIXME: Should only worry about snans for version with chain.
9037 SDValue SITargetLowering::performFMed3Combine(SDNode *N,
9038                                               DAGCombinerInfo &DCI) const {
9039   EVT VT = N->getValueType(0);
9040   // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
9041   // NaNs. With a NaN input, the order of the operands may change the result.
9042 
9043   SelectionDAG &DAG = DCI.DAG;
9044   SDLoc SL(N);
9045 
9046   SDValue Src0 = N->getOperand(0);
9047   SDValue Src1 = N->getOperand(1);
9048   SDValue Src2 = N->getOperand(2);
9049 
9050   if (isClampZeroToOne(Src0, Src1)) {
9051     // const_a, const_b, x -> clamp is safe in all cases including signaling
9052     // nans.
9053     // FIXME: Should this be allowing -0.0?
9054     return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
9055   }
9056 
9057   const MachineFunction &MF = DAG.getMachineFunction();
9058   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
9059 
9060   // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
9061   // handling no dx10-clamp?
9062   if (Info->getMode().DX10Clamp) {
9063     // If NaNs is clamped to 0, we are free to reorder the inputs.
9064 
9065     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
9066       std::swap(Src0, Src1);
9067 
9068     if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
9069       std::swap(Src1, Src2);
9070 
9071     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
9072       std::swap(Src0, Src1);
9073 
9074     if (isClampZeroToOne(Src1, Src2))
9075       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
9076   }
9077 
9078   return SDValue();
9079 }
9080 
9081 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
9082                                                  DAGCombinerInfo &DCI) const {
9083   SDValue Src0 = N->getOperand(0);
9084   SDValue Src1 = N->getOperand(1);
9085   if (Src0.isUndef() && Src1.isUndef())
9086     return DCI.DAG.getUNDEF(N->getValueType(0));
9087   return SDValue();
9088 }
9089 
9090 SDValue SITargetLowering::performExtractVectorEltCombine(
9091   SDNode *N, DAGCombinerInfo &DCI) const {
9092   SDValue Vec = N->getOperand(0);
9093   SelectionDAG &DAG = DCI.DAG;
9094 
9095   EVT VecVT = Vec.getValueType();
9096   EVT EltVT = VecVT.getVectorElementType();
9097 
9098   if ((Vec.getOpcode() == ISD::FNEG ||
9099        Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
9100     SDLoc SL(N);
9101     EVT EltVT = N->getValueType(0);
9102     SDValue Idx = N->getOperand(1);
9103     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9104                               Vec.getOperand(0), Idx);
9105     return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
9106   }
9107 
9108   // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
9109   //    =>
9110   // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
9111   // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
9112   // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
9113   if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
9114     SDLoc SL(N);
9115     EVT EltVT = N->getValueType(0);
9116     SDValue Idx = N->getOperand(1);
9117     unsigned Opc = Vec.getOpcode();
9118 
9119     switch(Opc) {
9120     default:
9121       break;
9122       // TODO: Support other binary operations.
9123     case ISD::FADD:
9124     case ISD::FSUB:
9125     case ISD::FMUL:
9126     case ISD::ADD:
9127     case ISD::UMIN:
9128     case ISD::UMAX:
9129     case ISD::SMIN:
9130     case ISD::SMAX:
9131     case ISD::FMAXNUM:
9132     case ISD::FMINNUM:
9133     case ISD::FMAXNUM_IEEE:
9134     case ISD::FMINNUM_IEEE: {
9135       SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9136                                  Vec.getOperand(0), Idx);
9137       SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9138                                  Vec.getOperand(1), Idx);
9139 
9140       DCI.AddToWorklist(Elt0.getNode());
9141       DCI.AddToWorklist(Elt1.getNode());
9142       return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags());
9143     }
9144     }
9145   }
9146 
9147   unsigned VecSize = VecVT.getSizeInBits();
9148   unsigned EltSize = EltVT.getSizeInBits();
9149 
9150   // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx)
9151   // This elminates non-constant index and subsequent movrel or scratch access.
9152   // Sub-dword vectors of size 2 dword or less have better implementation.
9153   // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
9154   // instructions.
9155   if (VecSize <= 256 && (VecSize > 64 || EltSize >= 32) &&
9156       !isa<ConstantSDNode>(N->getOperand(1))) {
9157     SDLoc SL(N);
9158     SDValue Idx = N->getOperand(1);
9159     EVT IdxVT = Idx.getValueType();
9160     SDValue V;
9161     for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
9162       SDValue IC = DAG.getConstant(I, SL, IdxVT);
9163       SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
9164       if (I == 0)
9165         V = Elt;
9166       else
9167         V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ);
9168     }
9169     return V;
9170   }
9171 
9172   if (!DCI.isBeforeLegalize())
9173     return SDValue();
9174 
9175   // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
9176   // elements. This exposes more load reduction opportunities by replacing
9177   // multiple small extract_vector_elements with a single 32-bit extract.
9178   auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
9179   if (isa<MemSDNode>(Vec) &&
9180       EltSize <= 16 &&
9181       EltVT.isByteSized() &&
9182       VecSize > 32 &&
9183       VecSize % 32 == 0 &&
9184       Idx) {
9185     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
9186 
9187     unsigned BitIndex = Idx->getZExtValue() * EltSize;
9188     unsigned EltIdx = BitIndex / 32;
9189     unsigned LeftoverBitIdx = BitIndex % 32;
9190     SDLoc SL(N);
9191 
9192     SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
9193     DCI.AddToWorklist(Cast.getNode());
9194 
9195     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
9196                               DAG.getConstant(EltIdx, SL, MVT::i32));
9197     DCI.AddToWorklist(Elt.getNode());
9198     SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
9199                               DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
9200     DCI.AddToWorklist(Srl.getNode());
9201 
9202     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
9203     DCI.AddToWorklist(Trunc.getNode());
9204     return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
9205   }
9206 
9207   return SDValue();
9208 }
9209 
9210 SDValue
9211 SITargetLowering::performInsertVectorEltCombine(SDNode *N,
9212                                                 DAGCombinerInfo &DCI) const {
9213   SDValue Vec = N->getOperand(0);
9214   SDValue Idx = N->getOperand(2);
9215   EVT VecVT = Vec.getValueType();
9216   EVT EltVT = VecVT.getVectorElementType();
9217   unsigned VecSize = VecVT.getSizeInBits();
9218   unsigned EltSize = EltVT.getSizeInBits();
9219 
9220   // INSERT_VECTOR_ELT (<n x e>, var-idx)
9221   // => BUILD_VECTOR n x select (e, const-idx)
9222   // This elminates non-constant index and subsequent movrel or scratch access.
9223   // Sub-dword vectors of size 2 dword or less have better implementation.
9224   // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
9225   // instructions.
9226   if (isa<ConstantSDNode>(Idx) ||
9227       VecSize > 256 || (VecSize <= 64 && EltSize < 32))
9228     return SDValue();
9229 
9230   SelectionDAG &DAG = DCI.DAG;
9231   SDLoc SL(N);
9232   SDValue Ins = N->getOperand(1);
9233   EVT IdxVT = Idx.getValueType();
9234 
9235   SmallVector<SDValue, 16> Ops;
9236   for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
9237     SDValue IC = DAG.getConstant(I, SL, IdxVT);
9238     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
9239     SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ);
9240     Ops.push_back(V);
9241   }
9242 
9243   return DAG.getBuildVector(VecVT, SL, Ops);
9244 }
9245 
9246 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
9247                                           const SDNode *N0,
9248                                           const SDNode *N1) const {
9249   EVT VT = N0->getValueType(0);
9250 
9251   // Only do this if we are not trying to support denormals. v_mad_f32 does not
9252   // support denormals ever.
9253   if (((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
9254        (VT == MVT::f16 && !Subtarget->hasFP16Denormals() &&
9255         getSubtarget()->hasMadF16())) &&
9256        isOperationLegal(ISD::FMAD, VT))
9257     return ISD::FMAD;
9258 
9259   const TargetOptions &Options = DAG.getTarget().Options;
9260   if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
9261        (N0->getFlags().hasAllowContract() &&
9262         N1->getFlags().hasAllowContract())) &&
9263       isFMAFasterThanFMulAndFAdd(VT)) {
9264     return ISD::FMA;
9265   }
9266 
9267   return 0;
9268 }
9269 
9270 // For a reassociatable opcode perform:
9271 // op x, (op y, z) -> op (op x, z), y, if x and z are uniform
9272 SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
9273                                                SelectionDAG &DAG) const {
9274   EVT VT = N->getValueType(0);
9275   if (VT != MVT::i32 && VT != MVT::i64)
9276     return SDValue();
9277 
9278   unsigned Opc = N->getOpcode();
9279   SDValue Op0 = N->getOperand(0);
9280   SDValue Op1 = N->getOperand(1);
9281 
9282   if (!(Op0->isDivergent() ^ Op1->isDivergent()))
9283     return SDValue();
9284 
9285   if (Op0->isDivergent())
9286     std::swap(Op0, Op1);
9287 
9288   if (Op1.getOpcode() != Opc || !Op1.hasOneUse())
9289     return SDValue();
9290 
9291   SDValue Op2 = Op1.getOperand(1);
9292   Op1 = Op1.getOperand(0);
9293   if (!(Op1->isDivergent() ^ Op2->isDivergent()))
9294     return SDValue();
9295 
9296   if (Op1->isDivergent())
9297     std::swap(Op1, Op2);
9298 
9299   // If either operand is constant this will conflict with
9300   // DAGCombiner::ReassociateOps().
9301   if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) ||
9302       DAG.isConstantIntBuildVectorOrConstantInt(Op1))
9303     return SDValue();
9304 
9305   SDLoc SL(N);
9306   SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1);
9307   return DAG.getNode(Opc, SL, VT, Add1, Op2);
9308 }
9309 
9310 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
9311                            EVT VT,
9312                            SDValue N0, SDValue N1, SDValue N2,
9313                            bool Signed) {
9314   unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
9315   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
9316   SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
9317   return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
9318 }
9319 
9320 SDValue SITargetLowering::performAddCombine(SDNode *N,
9321                                             DAGCombinerInfo &DCI) const {
9322   SelectionDAG &DAG = DCI.DAG;
9323   EVT VT = N->getValueType(0);
9324   SDLoc SL(N);
9325   SDValue LHS = N->getOperand(0);
9326   SDValue RHS = N->getOperand(1);
9327 
9328   if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
9329       && Subtarget->hasMad64_32() &&
9330       !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
9331       VT.getScalarSizeInBits() <= 64) {
9332     if (LHS.getOpcode() != ISD::MUL)
9333       std::swap(LHS, RHS);
9334 
9335     SDValue MulLHS = LHS.getOperand(0);
9336     SDValue MulRHS = LHS.getOperand(1);
9337     SDValue AddRHS = RHS;
9338 
9339     // TODO: Maybe restrict if SGPR inputs.
9340     if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
9341         numBitsUnsigned(MulRHS, DAG) <= 32) {
9342       MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
9343       MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
9344       AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
9345       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
9346     }
9347 
9348     if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
9349       MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
9350       MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
9351       AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
9352       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
9353     }
9354 
9355     return SDValue();
9356   }
9357 
9358   if (SDValue V = reassociateScalarOps(N, DAG)) {
9359     return V;
9360   }
9361 
9362   if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
9363     return SDValue();
9364 
9365   // add x, zext (setcc) => addcarry x, 0, setcc
9366   // add x, sext (setcc) => subcarry x, 0, setcc
9367   unsigned Opc = LHS.getOpcode();
9368   if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
9369       Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
9370     std::swap(RHS, LHS);
9371 
9372   Opc = RHS.getOpcode();
9373   switch (Opc) {
9374   default: break;
9375   case ISD::ZERO_EXTEND:
9376   case ISD::SIGN_EXTEND:
9377   case ISD::ANY_EXTEND: {
9378     auto Cond = RHS.getOperand(0);
9379     if (!isBoolSGPR(Cond))
9380       break;
9381     SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
9382     SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
9383     Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
9384     return DAG.getNode(Opc, SL, VTList, Args);
9385   }
9386   case ISD::ADDCARRY: {
9387     // add x, (addcarry y, 0, cc) => addcarry x, y, cc
9388     auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
9389     if (!C || C->getZExtValue() != 0) break;
9390     SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
9391     return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
9392   }
9393   }
9394   return SDValue();
9395 }
9396 
9397 SDValue SITargetLowering::performSubCombine(SDNode *N,
9398                                             DAGCombinerInfo &DCI) const {
9399   SelectionDAG &DAG = DCI.DAG;
9400   EVT VT = N->getValueType(0);
9401 
9402   if (VT != MVT::i32)
9403     return SDValue();
9404 
9405   SDLoc SL(N);
9406   SDValue LHS = N->getOperand(0);
9407   SDValue RHS = N->getOperand(1);
9408 
9409   if (LHS.getOpcode() == ISD::SUBCARRY) {
9410     // sub (subcarry x, 0, cc), y => subcarry x, y, cc
9411     auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
9412     if (!C || !C->isNullValue())
9413       return SDValue();
9414     SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
9415     return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
9416   }
9417   return SDValue();
9418 }
9419 
9420 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
9421   DAGCombinerInfo &DCI) const {
9422 
9423   if (N->getValueType(0) != MVT::i32)
9424     return SDValue();
9425 
9426   auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
9427   if (!C || C->getZExtValue() != 0)
9428     return SDValue();
9429 
9430   SelectionDAG &DAG = DCI.DAG;
9431   SDValue LHS = N->getOperand(0);
9432 
9433   // addcarry (add x, y), 0, cc => addcarry x, y, cc
9434   // subcarry (sub x, y), 0, cc => subcarry x, y, cc
9435   unsigned LHSOpc = LHS.getOpcode();
9436   unsigned Opc = N->getOpcode();
9437   if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
9438       (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
9439     SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
9440     return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
9441   }
9442   return SDValue();
9443 }
9444 
9445 SDValue SITargetLowering::performFAddCombine(SDNode *N,
9446                                              DAGCombinerInfo &DCI) const {
9447   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9448     return SDValue();
9449 
9450   SelectionDAG &DAG = DCI.DAG;
9451   EVT VT = N->getValueType(0);
9452 
9453   SDLoc SL(N);
9454   SDValue LHS = N->getOperand(0);
9455   SDValue RHS = N->getOperand(1);
9456 
9457   // These should really be instruction patterns, but writing patterns with
9458   // source modiifiers is a pain.
9459 
9460   // fadd (fadd (a, a), b) -> mad 2.0, a, b
9461   if (LHS.getOpcode() == ISD::FADD) {
9462     SDValue A = LHS.getOperand(0);
9463     if (A == LHS.getOperand(1)) {
9464       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
9465       if (FusedOp != 0) {
9466         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9467         return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
9468       }
9469     }
9470   }
9471 
9472   // fadd (b, fadd (a, a)) -> mad 2.0, a, b
9473   if (RHS.getOpcode() == ISD::FADD) {
9474     SDValue A = RHS.getOperand(0);
9475     if (A == RHS.getOperand(1)) {
9476       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
9477       if (FusedOp != 0) {
9478         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9479         return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
9480       }
9481     }
9482   }
9483 
9484   return SDValue();
9485 }
9486 
9487 SDValue SITargetLowering::performFSubCombine(SDNode *N,
9488                                              DAGCombinerInfo &DCI) const {
9489   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9490     return SDValue();
9491 
9492   SelectionDAG &DAG = DCI.DAG;
9493   SDLoc SL(N);
9494   EVT VT = N->getValueType(0);
9495   assert(!VT.isVector());
9496 
9497   // Try to get the fneg to fold into the source modifier. This undoes generic
9498   // DAG combines and folds them into the mad.
9499   //
9500   // Only do this if we are not trying to support denormals. v_mad_f32 does
9501   // not support denormals ever.
9502   SDValue LHS = N->getOperand(0);
9503   SDValue RHS = N->getOperand(1);
9504   if (LHS.getOpcode() == ISD::FADD) {
9505     // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
9506     SDValue A = LHS.getOperand(0);
9507     if (A == LHS.getOperand(1)) {
9508       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
9509       if (FusedOp != 0){
9510         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9511         SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
9512 
9513         return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
9514       }
9515     }
9516   }
9517 
9518   if (RHS.getOpcode() == ISD::FADD) {
9519     // (fsub c, (fadd a, a)) -> mad -2.0, a, c
9520 
9521     SDValue A = RHS.getOperand(0);
9522     if (A == RHS.getOperand(1)) {
9523       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
9524       if (FusedOp != 0){
9525         const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
9526         return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
9527       }
9528     }
9529   }
9530 
9531   return SDValue();
9532 }
9533 
9534 SDValue SITargetLowering::performFMACombine(SDNode *N,
9535                                             DAGCombinerInfo &DCI) const {
9536   SelectionDAG &DAG = DCI.DAG;
9537   EVT VT = N->getValueType(0);
9538   SDLoc SL(N);
9539 
9540   if (!Subtarget->hasDot2Insts() || VT != MVT::f32)
9541     return SDValue();
9542 
9543   // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) ->
9544   //   FDOT2((V2F16)S0, (V2F16)S1, (F32)z))
9545   SDValue Op1 = N->getOperand(0);
9546   SDValue Op2 = N->getOperand(1);
9547   SDValue FMA = N->getOperand(2);
9548 
9549   if (FMA.getOpcode() != ISD::FMA ||
9550       Op1.getOpcode() != ISD::FP_EXTEND ||
9551       Op2.getOpcode() != ISD::FP_EXTEND)
9552     return SDValue();
9553 
9554   // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
9555   // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract
9556   // is sufficient to allow generaing fdot2.
9557   const TargetOptions &Options = DAG.getTarget().Options;
9558   if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
9559       (N->getFlags().hasAllowContract() &&
9560        FMA->getFlags().hasAllowContract())) {
9561     Op1 = Op1.getOperand(0);
9562     Op2 = Op2.getOperand(0);
9563     if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9564         Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9565       return SDValue();
9566 
9567     SDValue Vec1 = Op1.getOperand(0);
9568     SDValue Idx1 = Op1.getOperand(1);
9569     SDValue Vec2 = Op2.getOperand(0);
9570 
9571     SDValue FMAOp1 = FMA.getOperand(0);
9572     SDValue FMAOp2 = FMA.getOperand(1);
9573     SDValue FMAAcc = FMA.getOperand(2);
9574 
9575     if (FMAOp1.getOpcode() != ISD::FP_EXTEND ||
9576         FMAOp2.getOpcode() != ISD::FP_EXTEND)
9577       return SDValue();
9578 
9579     FMAOp1 = FMAOp1.getOperand(0);
9580     FMAOp2 = FMAOp2.getOperand(0);
9581     if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9582         FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9583       return SDValue();
9584 
9585     SDValue Vec3 = FMAOp1.getOperand(0);
9586     SDValue Vec4 = FMAOp2.getOperand(0);
9587     SDValue Idx2 = FMAOp1.getOperand(1);
9588 
9589     if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) ||
9590         // Idx1 and Idx2 cannot be the same.
9591         Idx1 == Idx2)
9592       return SDValue();
9593 
9594     if (Vec1 == Vec2 || Vec3 == Vec4)
9595       return SDValue();
9596 
9597     if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16)
9598       return SDValue();
9599 
9600     if ((Vec1 == Vec3 && Vec2 == Vec4) ||
9601         (Vec1 == Vec4 && Vec2 == Vec3)) {
9602       return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc,
9603                          DAG.getTargetConstant(0, SL, MVT::i1));
9604     }
9605   }
9606   return SDValue();
9607 }
9608 
9609 SDValue SITargetLowering::performSetCCCombine(SDNode *N,
9610                                               DAGCombinerInfo &DCI) const {
9611   SelectionDAG &DAG = DCI.DAG;
9612   SDLoc SL(N);
9613 
9614   SDValue LHS = N->getOperand(0);
9615   SDValue RHS = N->getOperand(1);
9616   EVT VT = LHS.getValueType();
9617   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
9618 
9619   auto CRHS = dyn_cast<ConstantSDNode>(RHS);
9620   if (!CRHS) {
9621     CRHS = dyn_cast<ConstantSDNode>(LHS);
9622     if (CRHS) {
9623       std::swap(LHS, RHS);
9624       CC = getSetCCSwappedOperands(CC);
9625     }
9626   }
9627 
9628   if (CRHS) {
9629     if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
9630         isBoolSGPR(LHS.getOperand(0))) {
9631       // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
9632       // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
9633       // setcc (sext from i1 cc),  0, eq|sge|ule) => not cc => xor cc, -1
9634       // setcc (sext from i1 cc),  0, ne|ugt|slt) => cc
9635       if ((CRHS->isAllOnesValue() &&
9636            (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
9637           (CRHS->isNullValue() &&
9638            (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
9639         return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9640                            DAG.getConstant(-1, SL, MVT::i1));
9641       if ((CRHS->isAllOnesValue() &&
9642            (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
9643           (CRHS->isNullValue() &&
9644            (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
9645         return LHS.getOperand(0);
9646     }
9647 
9648     uint64_t CRHSVal = CRHS->getZExtValue();
9649     if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
9650         LHS.getOpcode() == ISD::SELECT &&
9651         isa<ConstantSDNode>(LHS.getOperand(1)) &&
9652         isa<ConstantSDNode>(LHS.getOperand(2)) &&
9653         LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) &&
9654         isBoolSGPR(LHS.getOperand(0))) {
9655       // Given CT != FT:
9656       // setcc (select cc, CT, CF), CF, eq => xor cc, -1
9657       // setcc (select cc, CT, CF), CF, ne => cc
9658       // setcc (select cc, CT, CF), CT, ne => xor cc, -1
9659       // setcc (select cc, CT, CF), CT, eq => cc
9660       uint64_t CT = LHS.getConstantOperandVal(1);
9661       uint64_t CF = LHS.getConstantOperandVal(2);
9662 
9663       if ((CF == CRHSVal && CC == ISD::SETEQ) ||
9664           (CT == CRHSVal && CC == ISD::SETNE))
9665         return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9666                            DAG.getConstant(-1, SL, MVT::i1));
9667       if ((CF == CRHSVal && CC == ISD::SETNE) ||
9668           (CT == CRHSVal && CC == ISD::SETEQ))
9669         return LHS.getOperand(0);
9670     }
9671   }
9672 
9673   if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
9674                                            VT != MVT::f16))
9675     return SDValue();
9676 
9677   // Match isinf/isfinite pattern
9678   // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
9679   // (fcmp one (fabs x), inf) -> (fp_class x,
9680   // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero)
9681   if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) {
9682     const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
9683     if (!CRHS)
9684       return SDValue();
9685 
9686     const APFloat &APF = CRHS->getValueAPF();
9687     if (APF.isInfinity() && !APF.isNegative()) {
9688       const unsigned IsInfMask = SIInstrFlags::P_INFINITY |
9689                                  SIInstrFlags::N_INFINITY;
9690       const unsigned IsFiniteMask = SIInstrFlags::N_ZERO |
9691                                     SIInstrFlags::P_ZERO |
9692                                     SIInstrFlags::N_NORMAL |
9693                                     SIInstrFlags::P_NORMAL |
9694                                     SIInstrFlags::N_SUBNORMAL |
9695                                     SIInstrFlags::P_SUBNORMAL;
9696       unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask;
9697       return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
9698                          DAG.getConstant(Mask, SL, MVT::i32));
9699     }
9700   }
9701 
9702   return SDValue();
9703 }
9704 
9705 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
9706                                                      DAGCombinerInfo &DCI) const {
9707   SelectionDAG &DAG = DCI.DAG;
9708   SDLoc SL(N);
9709   unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
9710 
9711   SDValue Src = N->getOperand(0);
9712   SDValue Srl = N->getOperand(0);
9713   if (Srl.getOpcode() == ISD::ZERO_EXTEND)
9714     Srl = Srl.getOperand(0);
9715 
9716   // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
9717   if (Srl.getOpcode() == ISD::SRL) {
9718     // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
9719     // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
9720     // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
9721 
9722     if (const ConstantSDNode *C =
9723         dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
9724       Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
9725                                EVT(MVT::i32));
9726 
9727       unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
9728       if (SrcOffset < 32 && SrcOffset % 8 == 0) {
9729         return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
9730                            MVT::f32, Srl);
9731       }
9732     }
9733   }
9734 
9735   APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
9736 
9737   KnownBits Known;
9738   TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
9739                                         !DCI.isBeforeLegalizeOps());
9740   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9741   if (TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
9742     DCI.CommitTargetLoweringOpt(TLO);
9743   }
9744 
9745   return SDValue();
9746 }
9747 
9748 SDValue SITargetLowering::performClampCombine(SDNode *N,
9749                                               DAGCombinerInfo &DCI) const {
9750   ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
9751   if (!CSrc)
9752     return SDValue();
9753 
9754   const MachineFunction &MF = DCI.DAG.getMachineFunction();
9755   const APFloat &F = CSrc->getValueAPF();
9756   APFloat Zero = APFloat::getZero(F.getSemantics());
9757   APFloat::cmpResult Cmp0 = F.compare(Zero);
9758   if (Cmp0 == APFloat::cmpLessThan ||
9759       (Cmp0 == APFloat::cmpUnordered &&
9760        MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) {
9761     return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
9762   }
9763 
9764   APFloat One(F.getSemantics(), "1.0");
9765   APFloat::cmpResult Cmp1 = F.compare(One);
9766   if (Cmp1 == APFloat::cmpGreaterThan)
9767     return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
9768 
9769   return SDValue(CSrc, 0);
9770 }
9771 
9772 
9773 SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
9774                                             DAGCombinerInfo &DCI) const {
9775   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
9776     return SDValue();
9777   switch (N->getOpcode()) {
9778   default:
9779     return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
9780   case ISD::ADD:
9781     return performAddCombine(N, DCI);
9782   case ISD::SUB:
9783     return performSubCombine(N, DCI);
9784   case ISD::ADDCARRY:
9785   case ISD::SUBCARRY:
9786     return performAddCarrySubCarryCombine(N, DCI);
9787   case ISD::FADD:
9788     return performFAddCombine(N, DCI);
9789   case ISD::FSUB:
9790     return performFSubCombine(N, DCI);
9791   case ISD::SETCC:
9792     return performSetCCCombine(N, DCI);
9793   case ISD::FMAXNUM:
9794   case ISD::FMINNUM:
9795   case ISD::FMAXNUM_IEEE:
9796   case ISD::FMINNUM_IEEE:
9797   case ISD::SMAX:
9798   case ISD::SMIN:
9799   case ISD::UMAX:
9800   case ISD::UMIN:
9801   case AMDGPUISD::FMIN_LEGACY:
9802   case AMDGPUISD::FMAX_LEGACY:
9803     return performMinMaxCombine(N, DCI);
9804   case ISD::FMA:
9805     return performFMACombine(N, DCI);
9806   case ISD::LOAD: {
9807     if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
9808       return Widended;
9809     LLVM_FALLTHROUGH;
9810   }
9811   case ISD::STORE:
9812   case ISD::ATOMIC_LOAD:
9813   case ISD::ATOMIC_STORE:
9814   case ISD::ATOMIC_CMP_SWAP:
9815   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
9816   case ISD::ATOMIC_SWAP:
9817   case ISD::ATOMIC_LOAD_ADD:
9818   case ISD::ATOMIC_LOAD_SUB:
9819   case ISD::ATOMIC_LOAD_AND:
9820   case ISD::ATOMIC_LOAD_OR:
9821   case ISD::ATOMIC_LOAD_XOR:
9822   case ISD::ATOMIC_LOAD_NAND:
9823   case ISD::ATOMIC_LOAD_MIN:
9824   case ISD::ATOMIC_LOAD_MAX:
9825   case ISD::ATOMIC_LOAD_UMIN:
9826   case ISD::ATOMIC_LOAD_UMAX:
9827   case ISD::ATOMIC_LOAD_FADD:
9828   case AMDGPUISD::ATOMIC_INC:
9829   case AMDGPUISD::ATOMIC_DEC:
9830   case AMDGPUISD::ATOMIC_LOAD_FMIN:
9831   case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
9832     if (DCI.isBeforeLegalize())
9833       break;
9834     return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
9835   case ISD::AND:
9836     return performAndCombine(N, DCI);
9837   case ISD::OR:
9838     return performOrCombine(N, DCI);
9839   case ISD::XOR:
9840     return performXorCombine(N, DCI);
9841   case ISD::ZERO_EXTEND:
9842     return performZeroExtendCombine(N, DCI);
9843   case ISD::SIGN_EXTEND_INREG:
9844     return performSignExtendInRegCombine(N , DCI);
9845   case AMDGPUISD::FP_CLASS:
9846     return performClassCombine(N, DCI);
9847   case ISD::FCANONICALIZE:
9848     return performFCanonicalizeCombine(N, DCI);
9849   case AMDGPUISD::RCP:
9850     return performRcpCombine(N, DCI);
9851   case AMDGPUISD::FRACT:
9852   case AMDGPUISD::RSQ:
9853   case AMDGPUISD::RCP_LEGACY:
9854   case AMDGPUISD::RSQ_LEGACY:
9855   case AMDGPUISD::RCP_IFLAG:
9856   case AMDGPUISD::RSQ_CLAMP:
9857   case AMDGPUISD::LDEXP: {
9858     SDValue Src = N->getOperand(0);
9859     if (Src.isUndef())
9860       return Src;
9861     break;
9862   }
9863   case ISD::SINT_TO_FP:
9864   case ISD::UINT_TO_FP:
9865     return performUCharToFloatCombine(N, DCI);
9866   case AMDGPUISD::CVT_F32_UBYTE0:
9867   case AMDGPUISD::CVT_F32_UBYTE1:
9868   case AMDGPUISD::CVT_F32_UBYTE2:
9869   case AMDGPUISD::CVT_F32_UBYTE3:
9870     return performCvtF32UByteNCombine(N, DCI);
9871   case AMDGPUISD::FMED3:
9872     return performFMed3Combine(N, DCI);
9873   case AMDGPUISD::CVT_PKRTZ_F16_F32:
9874     return performCvtPkRTZCombine(N, DCI);
9875   case AMDGPUISD::CLAMP:
9876     return performClampCombine(N, DCI);
9877   case ISD::SCALAR_TO_VECTOR: {
9878     SelectionDAG &DAG = DCI.DAG;
9879     EVT VT = N->getValueType(0);
9880 
9881     // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
9882     if (VT == MVT::v2i16 || VT == MVT::v2f16) {
9883       SDLoc SL(N);
9884       SDValue Src = N->getOperand(0);
9885       EVT EltVT = Src.getValueType();
9886       if (EltVT == MVT::f16)
9887         Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
9888 
9889       SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
9890       return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
9891     }
9892 
9893     break;
9894   }
9895   case ISD::EXTRACT_VECTOR_ELT:
9896     return performExtractVectorEltCombine(N, DCI);
9897   case ISD::INSERT_VECTOR_ELT:
9898     return performInsertVectorEltCombine(N, DCI);
9899   }
9900   return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
9901 }
9902 
9903 /// Helper function for adjustWritemask
9904 static unsigned SubIdx2Lane(unsigned Idx) {
9905   switch (Idx) {
9906   default: return 0;
9907   case AMDGPU::sub0: return 0;
9908   case AMDGPU::sub1: return 1;
9909   case AMDGPU::sub2: return 2;
9910   case AMDGPU::sub3: return 3;
9911   case AMDGPU::sub4: return 4; // Possible with TFE/LWE
9912   }
9913 }
9914 
9915 /// Adjust the writemask of MIMG instructions
9916 SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
9917                                           SelectionDAG &DAG) const {
9918   unsigned Opcode = Node->getMachineOpcode();
9919 
9920   // Subtract 1 because the vdata output is not a MachineSDNode operand.
9921   int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1;
9922   if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
9923     return Node; // not implemented for D16
9924 
9925   SDNode *Users[5] = { nullptr };
9926   unsigned Lane = 0;
9927   unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
9928   unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
9929   unsigned NewDmask = 0;
9930   unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1;
9931   unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1;
9932   bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) ||
9933                   Node->getConstantOperandVal(LWEIdx)) ? 1 : 0;
9934   unsigned TFCLane = 0;
9935   bool HasChain = Node->getNumValues() > 1;
9936 
9937   if (OldDmask == 0) {
9938     // These are folded out, but on the chance it happens don't assert.
9939     return Node;
9940   }
9941 
9942   unsigned OldBitsSet = countPopulation(OldDmask);
9943   // Work out which is the TFE/LWE lane if that is enabled.
9944   if (UsesTFC) {
9945     TFCLane = OldBitsSet;
9946   }
9947 
9948   // Try to figure out the used register components
9949   for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
9950        I != E; ++I) {
9951 
9952     // Don't look at users of the chain.
9953     if (I.getUse().getResNo() != 0)
9954       continue;
9955 
9956     // Abort if we can't understand the usage
9957     if (!I->isMachineOpcode() ||
9958         I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
9959       return Node;
9960 
9961     // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
9962     // Note that subregs are packed, i.e. Lane==0 is the first bit set
9963     // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
9964     // set, etc.
9965     Lane = SubIdx2Lane(I->getConstantOperandVal(1));
9966 
9967     // Check if the use is for the TFE/LWE generated result at VGPRn+1.
9968     if (UsesTFC && Lane == TFCLane) {
9969       Users[Lane] = *I;
9970     } else {
9971       // Set which texture component corresponds to the lane.
9972       unsigned Comp;
9973       for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) {
9974         Comp = countTrailingZeros(Dmask);
9975         Dmask &= ~(1 << Comp);
9976       }
9977 
9978       // Abort if we have more than one user per component.
9979       if (Users[Lane])
9980         return Node;
9981 
9982       Users[Lane] = *I;
9983       NewDmask |= 1 << Comp;
9984     }
9985   }
9986 
9987   // Don't allow 0 dmask, as hardware assumes one channel enabled.
9988   bool NoChannels = !NewDmask;
9989   if (NoChannels) {
9990     if (!UsesTFC) {
9991       // No uses of the result and not using TFC. Then do nothing.
9992       return Node;
9993     }
9994     // If the original dmask has one channel - then nothing to do
9995     if (OldBitsSet == 1)
9996       return Node;
9997     // Use an arbitrary dmask - required for the instruction to work
9998     NewDmask = 1;
9999   }
10000   // Abort if there's no change
10001   if (NewDmask == OldDmask)
10002     return Node;
10003 
10004   unsigned BitsSet = countPopulation(NewDmask);
10005 
10006   // Check for TFE or LWE - increase the number of channels by one to account
10007   // for the extra return value
10008   // This will need adjustment for D16 if this is also included in
10009   // adjustWriteMask (this function) but at present D16 are excluded.
10010   unsigned NewChannels = BitsSet + UsesTFC;
10011 
10012   int NewOpcode =
10013       AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels);
10014   assert(NewOpcode != -1 &&
10015          NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
10016          "failed to find equivalent MIMG op");
10017 
10018   // Adjust the writemask in the node
10019   SmallVector<SDValue, 12> Ops;
10020   Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
10021   Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
10022   Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
10023 
10024   MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
10025 
10026   MVT ResultVT = NewChannels == 1 ?
10027     SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 :
10028                            NewChannels == 5 ? 8 : NewChannels);
10029   SDVTList NewVTList = HasChain ?
10030     DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
10031 
10032 
10033   MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
10034                                               NewVTList, Ops);
10035 
10036   if (HasChain) {
10037     // Update chain.
10038     DAG.setNodeMemRefs(NewNode, Node->memoperands());
10039     DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
10040   }
10041 
10042   if (NewChannels == 1) {
10043     assert(Node->hasNUsesOfValue(1, 0));
10044     SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
10045                                       SDLoc(Node), Users[Lane]->getValueType(0),
10046                                       SDValue(NewNode, 0));
10047     DAG.ReplaceAllUsesWith(Users[Lane], Copy);
10048     return nullptr;
10049   }
10050 
10051   // Update the users of the node with the new indices
10052   for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) {
10053     SDNode *User = Users[i];
10054     if (!User) {
10055       // Handle the special case of NoChannels. We set NewDmask to 1 above, but
10056       // Users[0] is still nullptr because channel 0 doesn't really have a use.
10057       if (i || !NoChannels)
10058         continue;
10059     } else {
10060       SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
10061       DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
10062     }
10063 
10064     switch (Idx) {
10065     default: break;
10066     case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
10067     case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
10068     case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
10069     case AMDGPU::sub3: Idx = AMDGPU::sub4; break;
10070     }
10071   }
10072 
10073   DAG.RemoveDeadNode(Node);
10074   return nullptr;
10075 }
10076 
10077 static bool isFrameIndexOp(SDValue Op) {
10078   if (Op.getOpcode() == ISD::AssertZext)
10079     Op = Op.getOperand(0);
10080 
10081   return isa<FrameIndexSDNode>(Op);
10082 }
10083 
10084 /// Legalize target independent instructions (e.g. INSERT_SUBREG)
10085 /// with frame index operands.
10086 /// LLVM assumes that inputs are to these instructions are registers.
10087 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
10088                                                         SelectionDAG &DAG) const {
10089   if (Node->getOpcode() == ISD::CopyToReg) {
10090     RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
10091     SDValue SrcVal = Node->getOperand(2);
10092 
10093     // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
10094     // to try understanding copies to physical registers.
10095     if (SrcVal.getValueType() == MVT::i1 &&
10096         Register::isPhysicalRegister(DestReg->getReg())) {
10097       SDLoc SL(Node);
10098       MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
10099       SDValue VReg = DAG.getRegister(
10100         MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
10101 
10102       SDNode *Glued = Node->getGluedNode();
10103       SDValue ToVReg
10104         = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
10105                          SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
10106       SDValue ToResultReg
10107         = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
10108                            VReg, ToVReg.getValue(1));
10109       DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
10110       DAG.RemoveDeadNode(Node);
10111       return ToResultReg.getNode();
10112     }
10113   }
10114 
10115   SmallVector<SDValue, 8> Ops;
10116   for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
10117     if (!isFrameIndexOp(Node->getOperand(i))) {
10118       Ops.push_back(Node->getOperand(i));
10119       continue;
10120     }
10121 
10122     SDLoc DL(Node);
10123     Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
10124                                      Node->getOperand(i).getValueType(),
10125                                      Node->getOperand(i)), 0));
10126   }
10127 
10128   return DAG.UpdateNodeOperands(Node, Ops);
10129 }
10130 
10131 /// Fold the instructions after selecting them.
10132 /// Returns null if users were already updated.
10133 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
10134                                           SelectionDAG &DAG) const {
10135   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10136   unsigned Opcode = Node->getMachineOpcode();
10137 
10138   if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
10139       !TII->isGather4(Opcode)) {
10140     return adjustWritemask(Node, DAG);
10141   }
10142 
10143   if (Opcode == AMDGPU::INSERT_SUBREG ||
10144       Opcode == AMDGPU::REG_SEQUENCE) {
10145     legalizeTargetIndependentNode(Node, DAG);
10146     return Node;
10147   }
10148 
10149   switch (Opcode) {
10150   case AMDGPU::V_DIV_SCALE_F32:
10151   case AMDGPU::V_DIV_SCALE_F64: {
10152     // Satisfy the operand register constraint when one of the inputs is
10153     // undefined. Ordinarily each undef value will have its own implicit_def of
10154     // a vreg, so force these to use a single register.
10155     SDValue Src0 = Node->getOperand(0);
10156     SDValue Src1 = Node->getOperand(1);
10157     SDValue Src2 = Node->getOperand(2);
10158 
10159     if ((Src0.isMachineOpcode() &&
10160          Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
10161         (Src0 == Src1 || Src0 == Src2))
10162       break;
10163 
10164     MVT VT = Src0.getValueType().getSimpleVT();
10165     const TargetRegisterClass *RC =
10166         getRegClassFor(VT, Src0.getNode()->isDivergent());
10167 
10168     MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
10169     SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
10170 
10171     SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
10172                                       UndefReg, Src0, SDValue());
10173 
10174     // src0 must be the same register as src1 or src2, even if the value is
10175     // undefined, so make sure we don't violate this constraint.
10176     if (Src0.isMachineOpcode() &&
10177         Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
10178       if (Src1.isMachineOpcode() &&
10179           Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
10180         Src0 = Src1;
10181       else if (Src2.isMachineOpcode() &&
10182                Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
10183         Src0 = Src2;
10184       else {
10185         assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
10186         Src0 = UndefReg;
10187         Src1 = UndefReg;
10188       }
10189     } else
10190       break;
10191 
10192     SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
10193     for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
10194       Ops.push_back(Node->getOperand(I));
10195 
10196     Ops.push_back(ImpDef.getValue(1));
10197     return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
10198   }
10199   case AMDGPU::V_PERMLANE16_B32:
10200   case AMDGPU::V_PERMLANEX16_B32: {
10201     ConstantSDNode *FI = cast<ConstantSDNode>(Node->getOperand(0));
10202     ConstantSDNode *BC = cast<ConstantSDNode>(Node->getOperand(2));
10203     if (!FI->getZExtValue() && !BC->getZExtValue())
10204       break;
10205     SDValue VDstIn = Node->getOperand(6);
10206     if (VDstIn.isMachineOpcode()
10207         && VDstIn.getMachineOpcode() == AMDGPU::IMPLICIT_DEF)
10208       break;
10209     MachineSDNode *ImpDef = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
10210                                                SDLoc(Node), MVT::i32);
10211     SmallVector<SDValue, 8> Ops = { SDValue(FI, 0), Node->getOperand(1),
10212                                     SDValue(BC, 0), Node->getOperand(3),
10213                                     Node->getOperand(4), Node->getOperand(5),
10214                                     SDValue(ImpDef, 0), Node->getOperand(7) };
10215     return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
10216   }
10217   default:
10218     break;
10219   }
10220 
10221   return Node;
10222 }
10223 
10224 /// Assign the register class depending on the number of
10225 /// bits set in the writemask
10226 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
10227                                                      SDNode *Node) const {
10228   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10229 
10230   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
10231 
10232   if (TII->isVOP3(MI.getOpcode())) {
10233     // Make sure constant bus requirements are respected.
10234     TII->legalizeOperandsVOP3(MRI, MI);
10235 
10236     // Prefer VGPRs over AGPRs in mAI instructions where possible.
10237     // This saves a chain-copy of registers and better ballance register
10238     // use between vgpr and agpr as agpr tuples tend to be big.
10239     if (const MCOperandInfo *OpInfo = MI.getDesc().OpInfo) {
10240       unsigned Opc = MI.getOpcode();
10241       const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
10242       for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
10243                       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) {
10244         if (I == -1)
10245           break;
10246         MachineOperand &Op = MI.getOperand(I);
10247         if ((OpInfo[I].RegClass != llvm::AMDGPU::AV_64RegClassID &&
10248              OpInfo[I].RegClass != llvm::AMDGPU::AV_32RegClassID) ||
10249             !Register::isVirtualRegister(Op.getReg()) ||
10250             !TRI->isAGPR(MRI, Op.getReg()))
10251           continue;
10252         auto *Src = MRI.getUniqueVRegDef(Op.getReg());
10253         if (!Src || !Src->isCopy() ||
10254             !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg()))
10255           continue;
10256         auto *RC = TRI->getRegClassForReg(MRI, Op.getReg());
10257         auto *NewRC = TRI->getEquivalentVGPRClass(RC);
10258         // All uses of agpr64 and agpr32 can also accept vgpr except for
10259         // v_accvgpr_read, but we do not produce agpr reads during selection,
10260         // so no use checks are needed.
10261         MRI.setRegClass(Op.getReg(), NewRC);
10262       }
10263     }
10264 
10265     return;
10266   }
10267 
10268   // Replace unused atomics with the no return version.
10269   int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
10270   if (NoRetAtomicOp != -1) {
10271     if (!Node->hasAnyUseOfValue(0)) {
10272       MI.setDesc(TII->get(NoRetAtomicOp));
10273       MI.RemoveOperand(0);
10274       return;
10275     }
10276 
10277     // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
10278     // instruction, because the return type of these instructions is a vec2 of
10279     // the memory type, so it can be tied to the input operand.
10280     // This means these instructions always have a use, so we need to add a
10281     // special case to check if the atomic has only one extract_subreg use,
10282     // which itself has no uses.
10283     if ((Node->hasNUsesOfValue(1, 0) &&
10284          Node->use_begin()->isMachineOpcode() &&
10285          Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
10286          !Node->use_begin()->hasAnyUseOfValue(0))) {
10287       unsigned Def = MI.getOperand(0).getReg();
10288 
10289       // Change this into a noret atomic.
10290       MI.setDesc(TII->get(NoRetAtomicOp));
10291       MI.RemoveOperand(0);
10292 
10293       // If we only remove the def operand from the atomic instruction, the
10294       // extract_subreg will be left with a use of a vreg without a def.
10295       // So we need to insert an implicit_def to avoid machine verifier
10296       // errors.
10297       BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
10298               TII->get(AMDGPU::IMPLICIT_DEF), Def);
10299     }
10300     return;
10301   }
10302 }
10303 
10304 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
10305                               uint64_t Val) {
10306   SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
10307   return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
10308 }
10309 
10310 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
10311                                                 const SDLoc &DL,
10312                                                 SDValue Ptr) const {
10313   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10314 
10315   // Build the half of the subregister with the constants before building the
10316   // full 128-bit register. If we are building multiple resource descriptors,
10317   // this will allow CSEing of the 2-component register.
10318   const SDValue Ops0[] = {
10319     DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
10320     buildSMovImm32(DAG, DL, 0),
10321     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
10322     buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
10323     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
10324   };
10325 
10326   SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
10327                                                 MVT::v2i32, Ops0), 0);
10328 
10329   // Combine the constants and the pointer.
10330   const SDValue Ops1[] = {
10331     DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
10332     Ptr,
10333     DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
10334     SubRegHi,
10335     DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
10336   };
10337 
10338   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
10339 }
10340 
10341 /// Return a resource descriptor with the 'Add TID' bit enabled
10342 ///        The TID (Thread ID) is multiplied by the stride value (bits [61:48]
10343 ///        of the resource descriptor) to create an offset, which is added to
10344 ///        the resource pointer.
10345 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
10346                                            SDValue Ptr, uint32_t RsrcDword1,
10347                                            uint64_t RsrcDword2And3) const {
10348   SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
10349   SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
10350   if (RsrcDword1) {
10351     PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
10352                                      DAG.getConstant(RsrcDword1, DL, MVT::i32)),
10353                     0);
10354   }
10355 
10356   SDValue DataLo = buildSMovImm32(DAG, DL,
10357                                   RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
10358   SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
10359 
10360   const SDValue Ops[] = {
10361     DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
10362     PtrLo,
10363     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
10364     PtrHi,
10365     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
10366     DataLo,
10367     DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
10368     DataHi,
10369     DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
10370   };
10371 
10372   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
10373 }
10374 
10375 //===----------------------------------------------------------------------===//
10376 //                         SI Inline Assembly Support
10377 //===----------------------------------------------------------------------===//
10378 
10379 std::pair<unsigned, const TargetRegisterClass *>
10380 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
10381                                                StringRef Constraint,
10382                                                MVT VT) const {
10383   const TargetRegisterClass *RC = nullptr;
10384   if (Constraint.size() == 1) {
10385     switch (Constraint[0]) {
10386     default:
10387       return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10388     case 's':
10389     case 'r':
10390       switch (VT.getSizeInBits()) {
10391       default:
10392         return std::make_pair(0U, nullptr);
10393       case 32:
10394       case 16:
10395         RC = &AMDGPU::SReg_32_XM0RegClass;
10396         break;
10397       case 64:
10398         RC = &AMDGPU::SGPR_64RegClass;
10399         break;
10400       case 96:
10401         RC = &AMDGPU::SReg_96RegClass;
10402         break;
10403       case 128:
10404         RC = &AMDGPU::SReg_128RegClass;
10405         break;
10406       case 160:
10407         RC = &AMDGPU::SReg_160RegClass;
10408         break;
10409       case 256:
10410         RC = &AMDGPU::SReg_256RegClass;
10411         break;
10412       case 512:
10413         RC = &AMDGPU::SReg_512RegClass;
10414         break;
10415       }
10416       break;
10417     case 'v':
10418       switch (VT.getSizeInBits()) {
10419       default:
10420         return std::make_pair(0U, nullptr);
10421       case 32:
10422       case 16:
10423         RC = &AMDGPU::VGPR_32RegClass;
10424         break;
10425       case 64:
10426         RC = &AMDGPU::VReg_64RegClass;
10427         break;
10428       case 96:
10429         RC = &AMDGPU::VReg_96RegClass;
10430         break;
10431       case 128:
10432         RC = &AMDGPU::VReg_128RegClass;
10433         break;
10434       case 160:
10435         RC = &AMDGPU::VReg_160RegClass;
10436         break;
10437       case 256:
10438         RC = &AMDGPU::VReg_256RegClass;
10439         break;
10440       case 512:
10441         RC = &AMDGPU::VReg_512RegClass;
10442         break;
10443       }
10444       break;
10445     case 'a':
10446       if (!Subtarget->hasMAIInsts())
10447         break;
10448       switch (VT.getSizeInBits()) {
10449       default:
10450         return std::make_pair(0U, nullptr);
10451       case 32:
10452       case 16:
10453         RC = &AMDGPU::AGPR_32RegClass;
10454         break;
10455       case 64:
10456         RC = &AMDGPU::AReg_64RegClass;
10457         break;
10458       case 128:
10459         RC = &AMDGPU::AReg_128RegClass;
10460         break;
10461       case 512:
10462         RC = &AMDGPU::AReg_512RegClass;
10463         break;
10464       case 1024:
10465         RC = &AMDGPU::AReg_1024RegClass;
10466         // v32 types are not legal but we support them here.
10467         return std::make_pair(0U, RC);
10468       }
10469       break;
10470     }
10471     // We actually support i128, i16 and f16 as inline parameters
10472     // even if they are not reported as legal
10473     if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
10474                VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
10475       return std::make_pair(0U, RC);
10476   }
10477 
10478   if (Constraint.size() > 1) {
10479     if (Constraint[1] == 'v') {
10480       RC = &AMDGPU::VGPR_32RegClass;
10481     } else if (Constraint[1] == 's') {
10482       RC = &AMDGPU::SGPR_32RegClass;
10483     } else if (Constraint[1] == 'a') {
10484       RC = &AMDGPU::AGPR_32RegClass;
10485     }
10486 
10487     if (RC) {
10488       uint32_t Idx;
10489       bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
10490       if (!Failed && Idx < RC->getNumRegs())
10491         return std::make_pair(RC->getRegister(Idx), RC);
10492     }
10493   }
10494   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10495 }
10496 
10497 SITargetLowering::ConstraintType
10498 SITargetLowering::getConstraintType(StringRef Constraint) const {
10499   if (Constraint.size() == 1) {
10500     switch (Constraint[0]) {
10501     default: break;
10502     case 's':
10503     case 'v':
10504     case 'a':
10505       return C_RegisterClass;
10506     }
10507   }
10508   return TargetLowering::getConstraintType(Constraint);
10509 }
10510 
10511 // Figure out which registers should be reserved for stack access. Only after
10512 // the function is legalized do we know all of the non-spill stack objects or if
10513 // calls are present.
10514 void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
10515   MachineRegisterInfo &MRI = MF.getRegInfo();
10516   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10517   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
10518   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
10519 
10520   if (Info->isEntryFunction()) {
10521     // Callable functions have fixed registers used for stack access.
10522     reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
10523   }
10524 
10525   assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
10526                              Info->getStackPtrOffsetReg()));
10527   if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG)
10528     MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
10529 
10530   // We need to worry about replacing the default register with itself in case
10531   // of MIR testcases missing the MFI.
10532   if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG)
10533     MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
10534 
10535   if (Info->getFrameOffsetReg() != AMDGPU::FP_REG)
10536     MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
10537 
10538   if (Info->getScratchWaveOffsetReg() != AMDGPU::SCRATCH_WAVE_OFFSET_REG) {
10539     MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
10540                        Info->getScratchWaveOffsetReg());
10541   }
10542 
10543   Info->limitOccupancy(MF);
10544 
10545   if (ST.isWave32() && !MF.empty()) {
10546     // Add VCC_HI def because many instructions marked as imp-use VCC where
10547     // we may only define VCC_LO. If nothing defines VCC_HI we may end up
10548     // having a use of undef.
10549 
10550     const SIInstrInfo *TII = ST.getInstrInfo();
10551     DebugLoc DL;
10552 
10553     MachineBasicBlock &MBB = MF.front();
10554     MachineBasicBlock::iterator I = MBB.getFirstNonDebugInstr();
10555     BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), AMDGPU::VCC_HI);
10556 
10557     for (auto &MBB : MF) {
10558       for (auto &MI : MBB) {
10559         TII->fixImplicitOperands(MI);
10560       }
10561     }
10562   }
10563 
10564   TargetLoweringBase::finalizeLowering(MF);
10565 }
10566 
10567 void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
10568                                                      KnownBits &Known,
10569                                                      const APInt &DemandedElts,
10570                                                      const SelectionDAG &DAG,
10571                                                      unsigned Depth) const {
10572   TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
10573                                                 DAG, Depth);
10574 
10575   // Set the high bits to zero based on the maximum allowed scratch size per
10576   // wave. We can't use vaddr in MUBUF instructions if we don't know the address
10577   // calculation won't overflow, so assume the sign bit is never set.
10578   Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
10579 }
10580 
10581 unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
10582   const unsigned PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
10583   const unsigned CacheLineAlign = 6; // log2(64)
10584 
10585   // Pre-GFX10 target did not benefit from loop alignment
10586   if (!ML || DisableLoopAlignment ||
10587       (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
10588       getSubtarget()->hasInstFwdPrefetchBug())
10589     return PrefAlign;
10590 
10591   // On GFX10 I$ is 4 x 64 bytes cache lines.
10592   // By default prefetcher keeps one cache line behind and reads two ahead.
10593   // We can modify it with S_INST_PREFETCH for larger loops to have two lines
10594   // behind and one ahead.
10595   // Therefor we can benefit from aligning loop headers if loop fits 192 bytes.
10596   // If loop fits 64 bytes it always spans no more than two cache lines and
10597   // does not need an alignment.
10598   // Else if loop is less or equal 128 bytes we do not need to modify prefetch,
10599   // Else if loop is less or equal 192 bytes we need two lines behind.
10600 
10601   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10602   const MachineBasicBlock *Header = ML->getHeader();
10603   if (Header->getAlignment() != PrefAlign)
10604     return Header->getAlignment(); // Already processed.
10605 
10606   unsigned LoopSize = 0;
10607   for (const MachineBasicBlock *MBB : ML->blocks()) {
10608     // If inner loop block is aligned assume in average half of the alignment
10609     // size to be added as nops.
10610     if (MBB != Header)
10611       LoopSize += (1 << MBB->getAlignment()) / 2;
10612 
10613     for (const MachineInstr &MI : *MBB) {
10614       LoopSize += TII->getInstSizeInBytes(MI);
10615       if (LoopSize > 192)
10616         return PrefAlign;
10617     }
10618   }
10619 
10620   if (LoopSize <= 64)
10621     return PrefAlign;
10622 
10623   if (LoopSize <= 128)
10624     return CacheLineAlign;
10625 
10626   // If any of parent loops is surrounded by prefetch instructions do not
10627   // insert new for inner loop, which would reset parent's settings.
10628   for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) {
10629     if (MachineBasicBlock *Exit = P->getExitBlock()) {
10630       auto I = Exit->getFirstNonDebugInstr();
10631       if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
10632         return CacheLineAlign;
10633     }
10634   }
10635 
10636   MachineBasicBlock *Pre = ML->getLoopPreheader();
10637   MachineBasicBlock *Exit = ML->getExitBlock();
10638 
10639   if (Pre && Exit) {
10640     BuildMI(*Pre, Pre->getFirstTerminator(), DebugLoc(),
10641             TII->get(AMDGPU::S_INST_PREFETCH))
10642       .addImm(1); // prefetch 2 lines behind PC
10643 
10644     BuildMI(*Exit, Exit->getFirstNonDebugInstr(), DebugLoc(),
10645             TII->get(AMDGPU::S_INST_PREFETCH))
10646       .addImm(2); // prefetch 1 line behind PC
10647   }
10648 
10649   return CacheLineAlign;
10650 }
10651 
10652 LLVM_ATTRIBUTE_UNUSED
10653 static bool isCopyFromRegOfInlineAsm(const SDNode *N) {
10654   assert(N->getOpcode() == ISD::CopyFromReg);
10655   do {
10656     // Follow the chain until we find an INLINEASM node.
10657     N = N->getOperand(0).getNode();
10658     if (N->getOpcode() == ISD::INLINEASM ||
10659         N->getOpcode() == ISD::INLINEASM_BR)
10660       return true;
10661   } while (N->getOpcode() == ISD::CopyFromReg);
10662   return false;
10663 }
10664 
10665 bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
10666   FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const
10667 {
10668   switch (N->getOpcode()) {
10669     case ISD::CopyFromReg:
10670     {
10671       const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1));
10672       const MachineFunction * MF = FLI->MF;
10673       const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
10674       const MachineRegisterInfo &MRI = MF->getRegInfo();
10675       const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
10676       unsigned Reg = R->getReg();
10677       if (Register::isPhysicalRegister(Reg))
10678         return !TRI.isSGPRReg(MRI, Reg);
10679 
10680       if (MRI.isLiveIn(Reg)) {
10681         // workitem.id.x workitem.id.y workitem.id.z
10682         // Any VGPR formal argument is also considered divergent
10683         if (!TRI.isSGPRReg(MRI, Reg))
10684           return true;
10685         // Formal arguments of non-entry functions
10686         // are conservatively considered divergent
10687         else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv()))
10688           return true;
10689         return false;
10690       }
10691       const Value *V = FLI->getValueFromVirtualReg(Reg);
10692       if (V)
10693         return KDA->isDivergent(V);
10694       assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N));
10695       return !TRI.isSGPRReg(MRI, Reg);
10696     }
10697     break;
10698     case ISD::LOAD: {
10699       const LoadSDNode *L = cast<LoadSDNode>(N);
10700       unsigned AS = L->getAddressSpace();
10701       // A flat load may access private memory.
10702       return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
10703     } break;
10704     case ISD::CALLSEQ_END:
10705     return true;
10706     break;
10707     case ISD::INTRINSIC_WO_CHAIN:
10708     {
10709 
10710     }
10711       return AMDGPU::isIntrinsicSourceOfDivergence(
10712       cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
10713     case ISD::INTRINSIC_W_CHAIN:
10714       return AMDGPU::isIntrinsicSourceOfDivergence(
10715       cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
10716     // In some cases intrinsics that are a source of divergence have been
10717     // lowered to AMDGPUISD so we also need to check those too.
10718     case AMDGPUISD::INTERP_MOV:
10719     case AMDGPUISD::INTERP_P1:
10720     case AMDGPUISD::INTERP_P2:
10721       return true;
10722   }
10723   return false;
10724 }
10725 
10726 bool SITargetLowering::denormalsEnabledForType(EVT VT) const {
10727   switch (VT.getScalarType().getSimpleVT().SimpleTy) {
10728   case MVT::f32:
10729     return Subtarget->hasFP32Denormals();
10730   case MVT::f64:
10731     return Subtarget->hasFP64Denormals();
10732   case MVT::f16:
10733     return Subtarget->hasFP16Denormals();
10734   default:
10735     return false;
10736   }
10737 }
10738 
10739 bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
10740                                                     const SelectionDAG &DAG,
10741                                                     bool SNaN,
10742                                                     unsigned Depth) const {
10743   if (Op.getOpcode() == AMDGPUISD::CLAMP) {
10744     const MachineFunction &MF = DAG.getMachineFunction();
10745     const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10746 
10747     if (Info->getMode().DX10Clamp)
10748       return true; // Clamped to 0.
10749     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
10750   }
10751 
10752   return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG,
10753                                                             SNaN, Depth);
10754 }
10755 
10756 TargetLowering::AtomicExpansionKind
10757 SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
10758   switch (RMW->getOperation()) {
10759   case AtomicRMWInst::FAdd: {
10760     Type *Ty = RMW->getType();
10761 
10762     // We don't have a way to support 16-bit atomics now, so just leave them
10763     // as-is.
10764     if (Ty->isHalfTy())
10765       return AtomicExpansionKind::None;
10766 
10767     if (!Ty->isFloatTy())
10768       return AtomicExpansionKind::CmpXChg;
10769 
10770     // TODO: Do have these for flat. Older targets also had them for buffers.
10771     unsigned AS = RMW->getPointerAddressSpace();
10772     return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ?
10773       AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg;
10774   }
10775   default:
10776     break;
10777   }
10778 
10779   return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
10780 }
10781