1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Custom DAG lowering for SI
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #if defined(_MSC_VER) || defined(__MINGW32__)
15 // Provide M_PI.
16 #define _USE_MATH_DEFINES
17 #endif
18 
19 #include "SIISelLowering.h"
20 #include "AMDGPU.h"
21 #include "AMDGPUSubtarget.h"
22 #include "AMDGPUTargetMachine.h"
23 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
24 #include "SIDefines.h"
25 #include "SIInstrInfo.h"
26 #include "SIMachineFunctionInfo.h"
27 #include "SIRegisterInfo.h"
28 #include "Utils/AMDGPUBaseInfo.h"
29 #include "llvm/ADT/APFloat.h"
30 #include "llvm/ADT/APInt.h"
31 #include "llvm/ADT/ArrayRef.h"
32 #include "llvm/ADT/BitVector.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
39 #include "llvm/CodeGen/Analysis.h"
40 #include "llvm/CodeGen/CallingConvLower.h"
41 #include "llvm/CodeGen/DAGCombine.h"
42 #include "llvm/CodeGen/ISDOpcodes.h"
43 #include "llvm/CodeGen/MachineBasicBlock.h"
44 #include "llvm/CodeGen/MachineFrameInfo.h"
45 #include "llvm/CodeGen/MachineFunction.h"
46 #include "llvm/CodeGen/MachineInstr.h"
47 #include "llvm/CodeGen/MachineInstrBuilder.h"
48 #include "llvm/CodeGen/MachineLoopInfo.h"
49 #include "llvm/CodeGen/MachineMemOperand.h"
50 #include "llvm/CodeGen/MachineModuleInfo.h"
51 #include "llvm/CodeGen/MachineOperand.h"
52 #include "llvm/CodeGen/MachineRegisterInfo.h"
53 #include "llvm/CodeGen/SelectionDAG.h"
54 #include "llvm/CodeGen/SelectionDAGNodes.h"
55 #include "llvm/CodeGen/TargetCallingConv.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/Constants.h"
59 #include "llvm/IR/DataLayout.h"
60 #include "llvm/IR/DebugLoc.h"
61 #include "llvm/IR/DerivedTypes.h"
62 #include "llvm/IR/DiagnosticInfo.h"
63 #include "llvm/IR/Function.h"
64 #include "llvm/IR/GlobalValue.h"
65 #include "llvm/IR/InstrTypes.h"
66 #include "llvm/IR/Instruction.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/IntrinsicInst.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/Support/Casting.h"
71 #include "llvm/Support/CodeGen.h"
72 #include "llvm/Support/CommandLine.h"
73 #include "llvm/Support/Compiler.h"
74 #include "llvm/Support/ErrorHandling.h"
75 #include "llvm/Support/KnownBits.h"
76 #include "llvm/Support/MachineValueType.h"
77 #include "llvm/Support/MathExtras.h"
78 #include "llvm/Target/TargetOptions.h"
79 #include <cassert>
80 #include <cmath>
81 #include <cstdint>
82 #include <iterator>
83 #include <tuple>
84 #include <utility>
85 #include <vector>
86 
87 using namespace llvm;
88 
89 #define DEBUG_TYPE "si-lower"
90 
91 STATISTIC(NumTailCalls, "Number of tail calls");
92 
93 static cl::opt<bool> DisableLoopAlignment(
94   "amdgpu-disable-loop-alignment",
95   cl::desc("Do not align and prefetch loops"),
96   cl::init(false));
97 
98 static bool hasFP32Denormals(const MachineFunction &MF) {
99   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
100   return Info->getMode().allFP32Denormals();
101 }
102 
103 static bool hasFP64FP16Denormals(const MachineFunction &MF) {
104   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
105   return Info->getMode().allFP64FP16Denormals();
106 }
107 
108 static unsigned findFirstFreeSGPR(CCState &CCInfo) {
109   unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
110   for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
111     if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
112       return AMDGPU::SGPR0 + Reg;
113     }
114   }
115   llvm_unreachable("Cannot allocate sgpr");
116 }
117 
118 SITargetLowering::SITargetLowering(const TargetMachine &TM,
119                                    const GCNSubtarget &STI)
120     : AMDGPUTargetLowering(TM, STI),
121       Subtarget(&STI) {
122   addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
123   addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
124 
125   addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass);
126   addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
127 
128   addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
129   addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
130   addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
131 
132   addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
133   addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass);
134 
135   addRegisterClass(MVT::v2i64, &AMDGPU::SGPR_128RegClass);
136   addRegisterClass(MVT::v2f64, &AMDGPU::SGPR_128RegClass);
137 
138   addRegisterClass(MVT::v4i32, &AMDGPU::SGPR_128RegClass);
139   addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
140 
141   addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
142   addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass);
143 
144   addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
145   addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
146 
147   addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
148   addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
149 
150   if (Subtarget->has16BitInsts()) {
151     addRegisterClass(MVT::i16, &AMDGPU::SReg_32RegClass);
152     addRegisterClass(MVT::f16, &AMDGPU::SReg_32RegClass);
153 
154     // Unless there are also VOP3P operations, not operations are really legal.
155     addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32RegClass);
156     addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32RegClass);
157     addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
158     addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
159   }
160 
161   if (Subtarget->hasMAIInsts()) {
162     addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass);
163     addRegisterClass(MVT::v32f32, &AMDGPU::VReg_1024RegClass);
164   }
165 
166   computeRegisterProperties(Subtarget->getRegisterInfo());
167 
168   // The boolean content concept here is too inflexible. Compares only ever
169   // really produce a 1-bit result. Any copy/extend from these will turn into a
170   // select, and zext/1 or sext/-1 are equally cheap. Arbitrarily choose 0/1, as
171   // it's what most targets use.
172   setBooleanContents(ZeroOrOneBooleanContent);
173   setBooleanVectorContents(ZeroOrOneBooleanContent);
174 
175   // We need to custom lower vector stores from local memory
176   setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
177   setOperationAction(ISD::LOAD, MVT::v3i32, Custom);
178   setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
179   setOperationAction(ISD::LOAD, MVT::v5i32, Custom);
180   setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
181   setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
182   setOperationAction(ISD::LOAD, MVT::i1, Custom);
183   setOperationAction(ISD::LOAD, MVT::v32i32, Custom);
184 
185   setOperationAction(ISD::STORE, MVT::v2i32, Custom);
186   setOperationAction(ISD::STORE, MVT::v3i32, Custom);
187   setOperationAction(ISD::STORE, MVT::v4i32, Custom);
188   setOperationAction(ISD::STORE, MVT::v5i32, Custom);
189   setOperationAction(ISD::STORE, MVT::v8i32, Custom);
190   setOperationAction(ISD::STORE, MVT::v16i32, Custom);
191   setOperationAction(ISD::STORE, MVT::i1, Custom);
192   setOperationAction(ISD::STORE, MVT::v32i32, Custom);
193 
194   setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
195   setTruncStoreAction(MVT::v3i32, MVT::v3i16, Expand);
196   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
197   setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
198   setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
199   setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
200   setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
201   setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
202   setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
203   setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
204   setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
205 
206   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
207   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
208 
209   setOperationAction(ISD::SELECT, MVT::i1, Promote);
210   setOperationAction(ISD::SELECT, MVT::i64, Custom);
211   setOperationAction(ISD::SELECT, MVT::f64, Promote);
212   AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
213 
214   setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
215   setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
216   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
217   setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
218   setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
219 
220   setOperationAction(ISD::SETCC, MVT::i1, Promote);
221   setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
222   setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
223   AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
224 
225   setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
226   setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
227 
228   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
229   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
230   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
231   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
232   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
233   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v3i16, Custom);
234   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
235   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
236 
237   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
238   setOperationAction(ISD::BR_CC, MVT::i1, Expand);
239   setOperationAction(ISD::BR_CC, MVT::i32, Expand);
240   setOperationAction(ISD::BR_CC, MVT::i64, Expand);
241   setOperationAction(ISD::BR_CC, MVT::f32, Expand);
242   setOperationAction(ISD::BR_CC, MVT::f64, Expand);
243 
244   setOperationAction(ISD::UADDO, MVT::i32, Legal);
245   setOperationAction(ISD::USUBO, MVT::i32, Legal);
246 
247   setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
248   setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
249 
250   setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
251   setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
252   setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
253 
254 #if 0
255   setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
256   setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
257 #endif
258 
259   // We only support LOAD/STORE and vector manipulation ops for vectors
260   // with > 4 elements.
261   for (MVT VT : { MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
262                   MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16,
263                   MVT::v32i32, MVT::v32f32 }) {
264     for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
265       switch (Op) {
266       case ISD::LOAD:
267       case ISD::STORE:
268       case ISD::BUILD_VECTOR:
269       case ISD::BITCAST:
270       case ISD::EXTRACT_VECTOR_ELT:
271       case ISD::INSERT_VECTOR_ELT:
272       case ISD::INSERT_SUBVECTOR:
273       case ISD::EXTRACT_SUBVECTOR:
274       case ISD::SCALAR_TO_VECTOR:
275         break;
276       case ISD::CONCAT_VECTORS:
277         setOperationAction(Op, VT, Custom);
278         break;
279       default:
280         setOperationAction(Op, VT, Expand);
281         break;
282       }
283     }
284   }
285 
286   setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
287 
288   // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
289   // is expanded to avoid having two separate loops in case the index is a VGPR.
290 
291   // Most operations are naturally 32-bit vector operations. We only support
292   // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
293   for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
294     setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
295     AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
296 
297     setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
298     AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
299 
300     setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
301     AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
302 
303     setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
304     AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
305   }
306 
307   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
308   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
309   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
310   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
311 
312   setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom);
313   setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
314 
315   // Avoid stack access for these.
316   // TODO: Generalize to more vector types.
317   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
318   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
319   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
320   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
321 
322   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
323   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
324   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom);
325   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom);
326   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom);
327 
328   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom);
329   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
330   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom);
331 
332   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom);
333   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom);
334   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
335   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
336 
337   // Deal with vec3 vector operations when widened to vec4.
338   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom);
339   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom);
340   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom);
341   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom);
342 
343   // Deal with vec5 vector operations when widened to vec8.
344   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom);
345   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom);
346   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom);
347   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom);
348 
349   // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
350   // and output demarshalling
351   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
352   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
353 
354   // We can't return success/failure, only the old value,
355   // let LLVM add the comparison
356   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
357   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
358 
359   if (Subtarget->hasFlatAddressSpace()) {
360     setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
361     setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
362   }
363 
364   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
365 
366   // FIXME: This should be narrowed to i32, but that only happens if i64 is
367   // illegal.
368   // FIXME: Should lower sub-i32 bswaps to bit-ops without v_perm_b32.
369   setOperationAction(ISD::BSWAP, MVT::i64, Legal);
370   setOperationAction(ISD::BSWAP, MVT::i32, Legal);
371 
372   // On SI this is s_memtime and s_memrealtime on VI.
373   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
374   setOperationAction(ISD::TRAP, MVT::Other, Custom);
375   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
376 
377   if (Subtarget->has16BitInsts()) {
378     setOperationAction(ISD::FPOW, MVT::f16, Promote);
379     setOperationAction(ISD::FLOG, MVT::f16, Custom);
380     setOperationAction(ISD::FEXP, MVT::f16, Custom);
381     setOperationAction(ISD::FLOG10, MVT::f16, Custom);
382   }
383 
384   // v_mad_f32 does not support denormals. We report it as unconditionally
385   // legal, and the context where it is formed will disallow it when fp32
386   // denormals are enabled.
387   setOperationAction(ISD::FMAD, MVT::f32, Legal);
388 
389   if (!Subtarget->hasBFI()) {
390     // fcopysign can be done in a single instruction with BFI.
391     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
392     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
393   }
394 
395   if (!Subtarget->hasBCNT(32))
396     setOperationAction(ISD::CTPOP, MVT::i32, Expand);
397 
398   if (!Subtarget->hasBCNT(64))
399     setOperationAction(ISD::CTPOP, MVT::i64, Expand);
400 
401   if (Subtarget->hasFFBH())
402     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
403 
404   if (Subtarget->hasFFBL())
405     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
406 
407   // We only really have 32-bit BFE instructions (and 16-bit on VI).
408   //
409   // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
410   // effort to match them now. We want this to be false for i64 cases when the
411   // extraction isn't restricted to the upper or lower half. Ideally we would
412   // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
413   // span the midpoint are probably relatively rare, so don't worry about them
414   // for now.
415   if (Subtarget->hasBFE())
416     setHasExtractBitsInsn(true);
417 
418   setOperationAction(ISD::FMINNUM, MVT::f32, Custom);
419   setOperationAction(ISD::FMAXNUM, MVT::f32, Custom);
420   setOperationAction(ISD::FMINNUM, MVT::f64, Custom);
421   setOperationAction(ISD::FMAXNUM, MVT::f64, Custom);
422 
423 
424   // These are really only legal for ieee_mode functions. We should be avoiding
425   // them for functions that don't have ieee_mode enabled, so just say they are
426   // legal.
427   setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
428   setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
429   setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
430   setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
431 
432 
433   if (Subtarget->haveRoundOpsF64()) {
434     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
435     setOperationAction(ISD::FCEIL, MVT::f64, Legal);
436     setOperationAction(ISD::FRINT, MVT::f64, Legal);
437   } else {
438     setOperationAction(ISD::FCEIL, MVT::f64, Custom);
439     setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
440     setOperationAction(ISD::FRINT, MVT::f64, Custom);
441     setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
442   }
443 
444   setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
445 
446   setOperationAction(ISD::FSIN, MVT::f32, Custom);
447   setOperationAction(ISD::FCOS, MVT::f32, Custom);
448   setOperationAction(ISD::FDIV, MVT::f32, Custom);
449   setOperationAction(ISD::FDIV, MVT::f64, Custom);
450 
451   if (Subtarget->has16BitInsts()) {
452     setOperationAction(ISD::Constant, MVT::i16, Legal);
453 
454     setOperationAction(ISD::SMIN, MVT::i16, Legal);
455     setOperationAction(ISD::SMAX, MVT::i16, Legal);
456 
457     setOperationAction(ISD::UMIN, MVT::i16, Legal);
458     setOperationAction(ISD::UMAX, MVT::i16, Legal);
459 
460     setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
461     AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
462 
463     setOperationAction(ISD::ROTR, MVT::i16, Promote);
464     setOperationAction(ISD::ROTL, MVT::i16, Promote);
465 
466     setOperationAction(ISD::SDIV, MVT::i16, Promote);
467     setOperationAction(ISD::UDIV, MVT::i16, Promote);
468     setOperationAction(ISD::SREM, MVT::i16, Promote);
469     setOperationAction(ISD::UREM, MVT::i16, Promote);
470 
471     setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
472 
473     setOperationAction(ISD::CTTZ, MVT::i16, Promote);
474     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
475     setOperationAction(ISD::CTLZ, MVT::i16, Promote);
476     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
477     setOperationAction(ISD::CTPOP, MVT::i16, Promote);
478 
479     setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
480 
481     setOperationAction(ISD::BR_CC, MVT::i16, Expand);
482 
483     setOperationAction(ISD::LOAD, MVT::i16, Custom);
484 
485     setTruncStoreAction(MVT::i64, MVT::i16, Expand);
486 
487     setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
488     AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
489     setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
490     AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
491 
492     setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
493     setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
494 
495     // F16 - Constant Actions.
496     setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
497 
498     // F16 - Load/Store Actions.
499     setOperationAction(ISD::LOAD, MVT::f16, Promote);
500     AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
501     setOperationAction(ISD::STORE, MVT::f16, Promote);
502     AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
503 
504     // F16 - VOP1 Actions.
505     setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
506     setOperationAction(ISD::FCOS, MVT::f16, Custom);
507     setOperationAction(ISD::FSIN, MVT::f16, Custom);
508 
509     setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom);
510     setOperationAction(ISD::UINT_TO_FP, MVT::i16, Custom);
511 
512     setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
513     setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
514     setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
515     setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
516     setOperationAction(ISD::FROUND, MVT::f16, Custom);
517 
518     // F16 - VOP2 Actions.
519     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
520     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
521 
522     setOperationAction(ISD::FDIV, MVT::f16, Custom);
523 
524     // F16 - VOP3 Actions.
525     setOperationAction(ISD::FMA, MVT::f16, Legal);
526     if (STI.hasMadF16())
527       setOperationAction(ISD::FMAD, MVT::f16, Legal);
528 
529     for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
530       for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
531         switch (Op) {
532         case ISD::LOAD:
533         case ISD::STORE:
534         case ISD::BUILD_VECTOR:
535         case ISD::BITCAST:
536         case ISD::EXTRACT_VECTOR_ELT:
537         case ISD::INSERT_VECTOR_ELT:
538         case ISD::INSERT_SUBVECTOR:
539         case ISD::EXTRACT_SUBVECTOR:
540         case ISD::SCALAR_TO_VECTOR:
541           break;
542         case ISD::CONCAT_VECTORS:
543           setOperationAction(Op, VT, Custom);
544           break;
545         default:
546           setOperationAction(Op, VT, Expand);
547           break;
548         }
549       }
550     }
551 
552     // v_perm_b32 can handle either of these.
553     setOperationAction(ISD::BSWAP, MVT::i16, Legal);
554     setOperationAction(ISD::BSWAP, MVT::v2i16, Legal);
555     setOperationAction(ISD::BSWAP, MVT::v4i16, Custom);
556 
557     // XXX - Do these do anything? Vector constants turn into build_vector.
558     setOperationAction(ISD::Constant, MVT::v2i16, Legal);
559     setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
560 
561     setOperationAction(ISD::UNDEF, MVT::v2i16, Legal);
562     setOperationAction(ISD::UNDEF, MVT::v2f16, Legal);
563 
564     setOperationAction(ISD::STORE, MVT::v2i16, Promote);
565     AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
566     setOperationAction(ISD::STORE, MVT::v2f16, Promote);
567     AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
568 
569     setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
570     AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
571     setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
572     AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
573 
574     setOperationAction(ISD::AND, MVT::v2i16, Promote);
575     AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
576     setOperationAction(ISD::OR, MVT::v2i16, Promote);
577     AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
578     setOperationAction(ISD::XOR, MVT::v2i16, Promote);
579     AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
580 
581     setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
582     AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
583     setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
584     AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
585 
586     setOperationAction(ISD::STORE, MVT::v4i16, Promote);
587     AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
588     setOperationAction(ISD::STORE, MVT::v4f16, Promote);
589     AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
590 
591     setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
592     setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
593     setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
594     setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
595 
596     setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand);
597     setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand);
598     setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand);
599 
600     if (!Subtarget->hasVOP3PInsts()) {
601       setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
602       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
603     }
604 
605     setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
606     // This isn't really legal, but this avoids the legalizer unrolling it (and
607     // allows matching fneg (fabs x) patterns)
608     setOperationAction(ISD::FABS, MVT::v2f16, Legal);
609 
610     setOperationAction(ISD::FMAXNUM, MVT::f16, Custom);
611     setOperationAction(ISD::FMINNUM, MVT::f16, Custom);
612     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal);
613     setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal);
614 
615     setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom);
616     setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom);
617 
618     setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand);
619     setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand);
620   }
621 
622   if (Subtarget->hasVOP3PInsts()) {
623     setOperationAction(ISD::ADD, MVT::v2i16, Legal);
624     setOperationAction(ISD::SUB, MVT::v2i16, Legal);
625     setOperationAction(ISD::MUL, MVT::v2i16, Legal);
626     setOperationAction(ISD::SHL, MVT::v2i16, Legal);
627     setOperationAction(ISD::SRL, MVT::v2i16, Legal);
628     setOperationAction(ISD::SRA, MVT::v2i16, Legal);
629     setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
630     setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
631     setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
632     setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
633 
634     setOperationAction(ISD::FADD, MVT::v2f16, Legal);
635     setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
636     setOperationAction(ISD::FMA, MVT::v2f16, Legal);
637 
638     setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal);
639     setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal);
640 
641     setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal);
642 
643     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
644     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
645 
646     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f16, Custom);
647     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
648 
649     setOperationAction(ISD::SHL, MVT::v4i16, Custom);
650     setOperationAction(ISD::SRA, MVT::v4i16, Custom);
651     setOperationAction(ISD::SRL, MVT::v4i16, Custom);
652     setOperationAction(ISD::ADD, MVT::v4i16, Custom);
653     setOperationAction(ISD::SUB, MVT::v4i16, Custom);
654     setOperationAction(ISD::MUL, MVT::v4i16, Custom);
655 
656     setOperationAction(ISD::SMIN, MVT::v4i16, Custom);
657     setOperationAction(ISD::SMAX, MVT::v4i16, Custom);
658     setOperationAction(ISD::UMIN, MVT::v4i16, Custom);
659     setOperationAction(ISD::UMAX, MVT::v4i16, Custom);
660 
661     setOperationAction(ISD::FADD, MVT::v4f16, Custom);
662     setOperationAction(ISD::FMUL, MVT::v4f16, Custom);
663     setOperationAction(ISD::FMA, MVT::v4f16, Custom);
664 
665     setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom);
666     setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom);
667 
668     setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom);
669     setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom);
670     setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom);
671 
672     setOperationAction(ISD::FEXP, MVT::v2f16, Custom);
673     setOperationAction(ISD::SELECT, MVT::v4i16, Custom);
674     setOperationAction(ISD::SELECT, MVT::v4f16, Custom);
675   }
676 
677   setOperationAction(ISD::FNEG, MVT::v4f16, Custom);
678   setOperationAction(ISD::FABS, MVT::v4f16, Custom);
679 
680   if (Subtarget->has16BitInsts()) {
681     setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
682     AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
683     setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
684     AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
685   } else {
686     // Legalization hack.
687     setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
688     setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
689 
690     setOperationAction(ISD::FNEG, MVT::v2f16, Custom);
691     setOperationAction(ISD::FABS, MVT::v2f16, Custom);
692   }
693 
694   for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
695     setOperationAction(ISD::SELECT, VT, Custom);
696   }
697 
698   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
699   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
700   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
701   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
702   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom);
703   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
704   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
705 
706   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
707   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2i16, Custom);
708   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
709   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4i16, Custom);
710   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom);
711   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
712   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::f16, Custom);
713   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
714   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
715 
716   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
717   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
718   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
719   setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
720   setOperationAction(ISD::INTRINSIC_VOID, MVT::v4i16, Custom);
721   setOperationAction(ISD::INTRINSIC_VOID, MVT::f16, Custom);
722   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
723   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
724 
725   setTargetDAGCombine(ISD::ADD);
726   setTargetDAGCombine(ISD::ADDCARRY);
727   setTargetDAGCombine(ISD::SUB);
728   setTargetDAGCombine(ISD::SUBCARRY);
729   setTargetDAGCombine(ISD::FADD);
730   setTargetDAGCombine(ISD::FSUB);
731   setTargetDAGCombine(ISD::FMINNUM);
732   setTargetDAGCombine(ISD::FMAXNUM);
733   setTargetDAGCombine(ISD::FMINNUM_IEEE);
734   setTargetDAGCombine(ISD::FMAXNUM_IEEE);
735   setTargetDAGCombine(ISD::FMA);
736   setTargetDAGCombine(ISD::SMIN);
737   setTargetDAGCombine(ISD::SMAX);
738   setTargetDAGCombine(ISD::UMIN);
739   setTargetDAGCombine(ISD::UMAX);
740   setTargetDAGCombine(ISD::SETCC);
741   setTargetDAGCombine(ISD::AND);
742   setTargetDAGCombine(ISD::OR);
743   setTargetDAGCombine(ISD::XOR);
744   setTargetDAGCombine(ISD::SINT_TO_FP);
745   setTargetDAGCombine(ISD::UINT_TO_FP);
746   setTargetDAGCombine(ISD::FCANONICALIZE);
747   setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
748   setTargetDAGCombine(ISD::ZERO_EXTEND);
749   setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
750   setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
751   setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
752 
753   // All memory operations. Some folding on the pointer operand is done to help
754   // matching the constant offsets in the addressing modes.
755   setTargetDAGCombine(ISD::LOAD);
756   setTargetDAGCombine(ISD::STORE);
757   setTargetDAGCombine(ISD::ATOMIC_LOAD);
758   setTargetDAGCombine(ISD::ATOMIC_STORE);
759   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
760   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
761   setTargetDAGCombine(ISD::ATOMIC_SWAP);
762   setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
763   setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
764   setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
765   setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
766   setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
767   setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
768   setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
769   setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
770   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
771   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
772   setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD);
773 
774   setSchedulingPreference(Sched::RegPressure);
775 }
776 
777 const GCNSubtarget *SITargetLowering::getSubtarget() const {
778   return Subtarget;
779 }
780 
781 //===----------------------------------------------------------------------===//
782 // TargetLowering queries
783 //===----------------------------------------------------------------------===//
784 
785 // v_mad_mix* support a conversion from f16 to f32.
786 //
787 // There is only one special case when denormals are enabled we don't currently,
788 // where this is OK to use.
789 bool SITargetLowering::isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
790                                        EVT DestVT, EVT SrcVT) const {
791   return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
792           (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
793     DestVT.getScalarType() == MVT::f32 &&
794     SrcVT.getScalarType() == MVT::f16 &&
795     // TODO: This probably only requires no input flushing?
796     !hasFP32Denormals(DAG.getMachineFunction());
797 }
798 
799 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
800   // SI has some legal vector types, but no legal vector operations. Say no
801   // shuffles are legal in order to prefer scalarizing some vector operations.
802   return false;
803 }
804 
805 MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
806                                                     CallingConv::ID CC,
807                                                     EVT VT) const {
808   if (CC == CallingConv::AMDGPU_KERNEL)
809     return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
810 
811   if (VT.isVector()) {
812     EVT ScalarVT = VT.getScalarType();
813     unsigned Size = ScalarVT.getSizeInBits();
814     if (Size == 32)
815       return ScalarVT.getSimpleVT();
816 
817     if (Size > 32)
818       return MVT::i32;
819 
820     if (Size == 16 && Subtarget->has16BitInsts())
821       return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
822   } else if (VT.getSizeInBits() > 32)
823     return MVT::i32;
824 
825   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
826 }
827 
828 unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
829                                                          CallingConv::ID CC,
830                                                          EVT VT) const {
831   if (CC == CallingConv::AMDGPU_KERNEL)
832     return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
833 
834   if (VT.isVector()) {
835     unsigned NumElts = VT.getVectorNumElements();
836     EVT ScalarVT = VT.getScalarType();
837     unsigned Size = ScalarVT.getSizeInBits();
838 
839     if (Size == 32)
840       return NumElts;
841 
842     if (Size > 32)
843       return NumElts * ((Size + 31) / 32);
844 
845     if (Size == 16 && Subtarget->has16BitInsts())
846       return (NumElts + 1) / 2;
847   } else if (VT.getSizeInBits() > 32)
848     return (VT.getSizeInBits() + 31) / 32;
849 
850   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
851 }
852 
853 unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
854   LLVMContext &Context, CallingConv::ID CC,
855   EVT VT, EVT &IntermediateVT,
856   unsigned &NumIntermediates, MVT &RegisterVT) const {
857   if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
858     unsigned NumElts = VT.getVectorNumElements();
859     EVT ScalarVT = VT.getScalarType();
860     unsigned Size = ScalarVT.getSizeInBits();
861     if (Size == 32) {
862       RegisterVT = ScalarVT.getSimpleVT();
863       IntermediateVT = RegisterVT;
864       NumIntermediates = NumElts;
865       return NumIntermediates;
866     }
867 
868     if (Size > 32) {
869       RegisterVT = MVT::i32;
870       IntermediateVT = RegisterVT;
871       NumIntermediates = NumElts * ((Size + 31) / 32);
872       return NumIntermediates;
873     }
874 
875     // FIXME: We should fix the ABI to be the same on targets without 16-bit
876     // support, but unless we can properly handle 3-vectors, it will be still be
877     // inconsistent.
878     if (Size == 16 && Subtarget->has16BitInsts()) {
879       RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
880       IntermediateVT = RegisterVT;
881       NumIntermediates = (NumElts + 1) / 2;
882       return NumIntermediates;
883     }
884   }
885 
886   return TargetLowering::getVectorTypeBreakdownForCallingConv(
887     Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
888 }
889 
890 // Peek through TFE struct returns to only use the data size.
891 static EVT memVTFromImageReturn(Type *Ty) {
892   auto *ST = dyn_cast<StructType>(Ty);
893   if (!ST)
894     return EVT::getEVT(Ty, true);
895 
896   // Some intrinsics return an aggregate type - special case to work out the
897   // correct memVT.
898   //
899   // Only limited forms of aggregate type currently expected.
900   if (ST->getNumContainedTypes() != 2 ||
901       !ST->getContainedType(1)->isIntegerTy(32))
902     return EVT();
903   return EVT::getEVT(ST->getContainedType(0));
904 }
905 
906 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
907                                           const CallInst &CI,
908                                           MachineFunction &MF,
909                                           unsigned IntrID) const {
910   if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
911           AMDGPU::lookupRsrcIntrinsic(IntrID)) {
912     AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
913                                                   (Intrinsic::ID)IntrID);
914     if (Attr.hasFnAttribute(Attribute::ReadNone))
915       return false;
916 
917     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
918 
919     if (RsrcIntr->IsImage) {
920       Info.ptrVal = MFI->getImagePSV(
921         *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
922         CI.getArgOperand(RsrcIntr->RsrcArg));
923       Info.align.reset();
924     } else {
925       Info.ptrVal = MFI->getBufferPSV(
926         *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
927         CI.getArgOperand(RsrcIntr->RsrcArg));
928     }
929 
930     Info.flags = MachineMemOperand::MODereferenceable;
931     if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
932       Info.opc = ISD::INTRINSIC_W_CHAIN;
933       // TODO: Account for dmask reducing loaded size.
934       Info.memVT = memVTFromImageReturn(CI.getType());
935       Info.flags |= MachineMemOperand::MOLoad;
936     } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
937       Info.opc = ISD::INTRINSIC_VOID;
938       Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
939       Info.flags |= MachineMemOperand::MOStore;
940     } else {
941       // Atomic
942       Info.opc = ISD::INTRINSIC_W_CHAIN;
943       Info.memVT = MVT::getVT(CI.getType());
944       Info.flags = MachineMemOperand::MOLoad |
945                    MachineMemOperand::MOStore |
946                    MachineMemOperand::MODereferenceable;
947 
948       // XXX - Should this be volatile without known ordering?
949       Info.flags |= MachineMemOperand::MOVolatile;
950     }
951     return true;
952   }
953 
954   switch (IntrID) {
955   case Intrinsic::amdgcn_atomic_inc:
956   case Intrinsic::amdgcn_atomic_dec:
957   case Intrinsic::amdgcn_ds_ordered_add:
958   case Intrinsic::amdgcn_ds_ordered_swap:
959   case Intrinsic::amdgcn_ds_fadd:
960   case Intrinsic::amdgcn_ds_fmin:
961   case Intrinsic::amdgcn_ds_fmax: {
962     Info.opc = ISD::INTRINSIC_W_CHAIN;
963     Info.memVT = MVT::getVT(CI.getType());
964     Info.ptrVal = CI.getOperand(0);
965     Info.align.reset();
966     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
967 
968     const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4));
969     if (!Vol->isZero())
970       Info.flags |= MachineMemOperand::MOVolatile;
971 
972     return true;
973   }
974   case Intrinsic::amdgcn_buffer_atomic_fadd: {
975     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
976 
977     Info.opc = ISD::INTRINSIC_VOID;
978     Info.memVT = MVT::getVT(CI.getOperand(0)->getType());
979     Info.ptrVal = MFI->getBufferPSV(
980       *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
981       CI.getArgOperand(1));
982     Info.align.reset();
983     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
984 
985     const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
986     if (!Vol || !Vol->isZero())
987       Info.flags |= MachineMemOperand::MOVolatile;
988 
989     return true;
990   }
991   case Intrinsic::amdgcn_global_atomic_fadd: {
992     Info.opc = ISD::INTRINSIC_VOID;
993     Info.memVT = MVT::getVT(CI.getOperand(0)->getType()
994                             ->getPointerElementType());
995     Info.ptrVal = CI.getOperand(0);
996     Info.align.reset();
997     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
998 
999     return true;
1000   }
1001   case Intrinsic::amdgcn_ds_append:
1002   case Intrinsic::amdgcn_ds_consume: {
1003     Info.opc = ISD::INTRINSIC_W_CHAIN;
1004     Info.memVT = MVT::getVT(CI.getType());
1005     Info.ptrVal = CI.getOperand(0);
1006     Info.align.reset();
1007     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1008 
1009     const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1));
1010     if (!Vol->isZero())
1011       Info.flags |= MachineMemOperand::MOVolatile;
1012 
1013     return true;
1014   }
1015   case Intrinsic::amdgcn_ds_gws_init:
1016   case Intrinsic::amdgcn_ds_gws_barrier:
1017   case Intrinsic::amdgcn_ds_gws_sema_v:
1018   case Intrinsic::amdgcn_ds_gws_sema_br:
1019   case Intrinsic::amdgcn_ds_gws_sema_p:
1020   case Intrinsic::amdgcn_ds_gws_sema_release_all: {
1021     Info.opc = ISD::INTRINSIC_VOID;
1022 
1023     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1024     Info.ptrVal =
1025         MFI->getGWSPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo());
1026 
1027     // This is an abstract access, but we need to specify a type and size.
1028     Info.memVT = MVT::i32;
1029     Info.size = 4;
1030     Info.align = Align(4);
1031 
1032     Info.flags = MachineMemOperand::MOStore;
1033     if (IntrID == Intrinsic::amdgcn_ds_gws_barrier)
1034       Info.flags = MachineMemOperand::MOLoad;
1035     return true;
1036   }
1037   default:
1038     return false;
1039   }
1040 }
1041 
1042 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
1043                                             SmallVectorImpl<Value*> &Ops,
1044                                             Type *&AccessTy) const {
1045   switch (II->getIntrinsicID()) {
1046   case Intrinsic::amdgcn_atomic_inc:
1047   case Intrinsic::amdgcn_atomic_dec:
1048   case Intrinsic::amdgcn_ds_ordered_add:
1049   case Intrinsic::amdgcn_ds_ordered_swap:
1050   case Intrinsic::amdgcn_ds_fadd:
1051   case Intrinsic::amdgcn_ds_fmin:
1052   case Intrinsic::amdgcn_ds_fmax: {
1053     Value *Ptr = II->getArgOperand(0);
1054     AccessTy = II->getType();
1055     Ops.push_back(Ptr);
1056     return true;
1057   }
1058   default:
1059     return false;
1060   }
1061 }
1062 
1063 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
1064   if (!Subtarget->hasFlatInstOffsets()) {
1065     // Flat instructions do not have offsets, and only have the register
1066     // address.
1067     return AM.BaseOffs == 0 && AM.Scale == 0;
1068   }
1069 
1070   return AM.Scale == 0 &&
1071          (AM.BaseOffs == 0 || Subtarget->getInstrInfo()->isLegalFLATOffset(
1072                                   AM.BaseOffs, AMDGPUAS::FLAT_ADDRESS,
1073                                   /*Signed=*/false));
1074 }
1075 
1076 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
1077   if (Subtarget->hasFlatGlobalInsts())
1078     return AM.Scale == 0 &&
1079            (AM.BaseOffs == 0 || Subtarget->getInstrInfo()->isLegalFLATOffset(
1080                                     AM.BaseOffs, AMDGPUAS::GLOBAL_ADDRESS,
1081                                     /*Signed=*/true));
1082 
1083   if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
1084       // Assume the we will use FLAT for all global memory accesses
1085       // on VI.
1086       // FIXME: This assumption is currently wrong.  On VI we still use
1087       // MUBUF instructions for the r + i addressing mode.  As currently
1088       // implemented, the MUBUF instructions only work on buffer < 4GB.
1089       // It may be possible to support > 4GB buffers with MUBUF instructions,
1090       // by setting the stride value in the resource descriptor which would
1091       // increase the size limit to (stride * 4GB).  However, this is risky,
1092       // because it has never been validated.
1093     return isLegalFlatAddressingMode(AM);
1094   }
1095 
1096   return isLegalMUBUFAddressingMode(AM);
1097 }
1098 
1099 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
1100   // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
1101   // additionally can do r + r + i with addr64. 32-bit has more addressing
1102   // mode options. Depending on the resource constant, it can also do
1103   // (i64 r0) + (i32 r1) * (i14 i).
1104   //
1105   // Private arrays end up using a scratch buffer most of the time, so also
1106   // assume those use MUBUF instructions. Scratch loads / stores are currently
1107   // implemented as mubuf instructions with offen bit set, so slightly
1108   // different than the normal addr64.
1109   if (!isUInt<12>(AM.BaseOffs))
1110     return false;
1111 
1112   // FIXME: Since we can split immediate into soffset and immediate offset,
1113   // would it make sense to allow any immediate?
1114 
1115   switch (AM.Scale) {
1116   case 0: // r + i or just i, depending on HasBaseReg.
1117     return true;
1118   case 1:
1119     return true; // We have r + r or r + i.
1120   case 2:
1121     if (AM.HasBaseReg) {
1122       // Reject 2 * r + r.
1123       return false;
1124     }
1125 
1126     // Allow 2 * r as r + r
1127     // Or  2 * r + i is allowed as r + r + i.
1128     return true;
1129   default: // Don't allow n * r
1130     return false;
1131   }
1132 }
1133 
1134 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
1135                                              const AddrMode &AM, Type *Ty,
1136                                              unsigned AS, Instruction *I) const {
1137   // No global is ever allowed as a base.
1138   if (AM.BaseGV)
1139     return false;
1140 
1141   if (AS == AMDGPUAS::GLOBAL_ADDRESS)
1142     return isLegalGlobalAddressingMode(AM);
1143 
1144   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
1145       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
1146       AS == AMDGPUAS::BUFFER_FAT_POINTER) {
1147     // If the offset isn't a multiple of 4, it probably isn't going to be
1148     // correctly aligned.
1149     // FIXME: Can we get the real alignment here?
1150     if (AM.BaseOffs % 4 != 0)
1151       return isLegalMUBUFAddressingMode(AM);
1152 
1153     // There are no SMRD extloads, so if we have to do a small type access we
1154     // will use a MUBUF load.
1155     // FIXME?: We also need to do this if unaligned, but we don't know the
1156     // alignment here.
1157     if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
1158       return isLegalGlobalAddressingMode(AM);
1159 
1160     if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
1161       // SMRD instructions have an 8-bit, dword offset on SI.
1162       if (!isUInt<8>(AM.BaseOffs / 4))
1163         return false;
1164     } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
1165       // On CI+, this can also be a 32-bit literal constant offset. If it fits
1166       // in 8-bits, it can use a smaller encoding.
1167       if (!isUInt<32>(AM.BaseOffs / 4))
1168         return false;
1169     } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
1170       // On VI, these use the SMEM format and the offset is 20-bit in bytes.
1171       if (!isUInt<20>(AM.BaseOffs))
1172         return false;
1173     } else
1174       llvm_unreachable("unhandled generation");
1175 
1176     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1177       return true;
1178 
1179     if (AM.Scale == 1 && AM.HasBaseReg)
1180       return true;
1181 
1182     return false;
1183 
1184   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1185     return isLegalMUBUFAddressingMode(AM);
1186   } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
1187              AS == AMDGPUAS::REGION_ADDRESS) {
1188     // Basic, single offset DS instructions allow a 16-bit unsigned immediate
1189     // field.
1190     // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
1191     // an 8-bit dword offset but we don't know the alignment here.
1192     if (!isUInt<16>(AM.BaseOffs))
1193       return false;
1194 
1195     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1196       return true;
1197 
1198     if (AM.Scale == 1 && AM.HasBaseReg)
1199       return true;
1200 
1201     return false;
1202   } else if (AS == AMDGPUAS::FLAT_ADDRESS ||
1203              AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) {
1204     // For an unknown address space, this usually means that this is for some
1205     // reason being used for pure arithmetic, and not based on some addressing
1206     // computation. We don't have instructions that compute pointers with any
1207     // addressing modes, so treat them as having no offset like flat
1208     // instructions.
1209     return isLegalFlatAddressingMode(AM);
1210   } else {
1211     llvm_unreachable("unhandled address space");
1212   }
1213 }
1214 
1215 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1216                                         const SelectionDAG &DAG) const {
1217   if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
1218     return (MemVT.getSizeInBits() <= 4 * 32);
1219   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1220     unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1221     return (MemVT.getSizeInBits() <= MaxPrivateBits);
1222   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
1223     return (MemVT.getSizeInBits() <= 2 * 32);
1224   }
1225   return true;
1226 }
1227 
1228 bool SITargetLowering::allowsMisalignedMemoryAccessesImpl(
1229     unsigned Size, unsigned AddrSpace, unsigned Align,
1230     MachineMemOperand::Flags Flags, bool *IsFast) const {
1231   if (IsFast)
1232     *IsFast = false;
1233 
1234   if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1235       AddrSpace == AMDGPUAS::REGION_ADDRESS) {
1236     // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1237     // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1238     // with adjacent offsets.
1239     bool AlignedBy4 = (Align % 4 == 0);
1240     if (IsFast)
1241       *IsFast = AlignedBy4;
1242 
1243     return AlignedBy4;
1244   }
1245 
1246   // FIXME: We have to be conservative here and assume that flat operations
1247   // will access scratch.  If we had access to the IR function, then we
1248   // could determine if any private memory was used in the function.
1249   if (!Subtarget->hasUnalignedScratchAccess() &&
1250       (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1251        AddrSpace == AMDGPUAS::FLAT_ADDRESS)) {
1252     bool AlignedBy4 = Align >= 4;
1253     if (IsFast)
1254       *IsFast = AlignedBy4;
1255 
1256     return AlignedBy4;
1257   }
1258 
1259   if (Subtarget->hasUnalignedBufferAccess()) {
1260     // If we have an uniform constant load, it still requires using a slow
1261     // buffer instruction if unaligned.
1262     if (IsFast) {
1263       // Accesses can really be issued as 1-byte aligned or 4-byte aligned, so
1264       // 2-byte alignment is worse than 1 unless doing a 2-byte accesss.
1265       *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
1266                  AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
1267         Align >= 4 : Align != 2;
1268     }
1269 
1270     return true;
1271   }
1272 
1273   // Smaller than dword value must be aligned.
1274   if (Size < 32)
1275     return false;
1276 
1277   // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1278   // byte-address are ignored, thus forcing Dword alignment.
1279   // This applies to private, global, and constant memory.
1280   if (IsFast)
1281     *IsFast = true;
1282 
1283   return Size >= 32 && Align >= 4;
1284 }
1285 
1286 bool SITargetLowering::allowsMisalignedMemoryAccesses(
1287     EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
1288     bool *IsFast) const {
1289   if (IsFast)
1290     *IsFast = false;
1291 
1292   // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
1293   // which isn't a simple VT.
1294   // Until MVT is extended to handle this, simply check for the size and
1295   // rely on the condition below: allow accesses if the size is a multiple of 4.
1296   if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
1297                            VT.getStoreSize() > 16)) {
1298     return false;
1299   }
1300 
1301   return allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AddrSpace,
1302                                             Align, Flags, IsFast);
1303 }
1304 
1305 EVT SITargetLowering::getOptimalMemOpType(
1306     const MemOp &Op, const AttributeList &FuncAttributes) const {
1307   // FIXME: Should account for address space here.
1308 
1309   // The default fallback uses the private pointer size as a guess for a type to
1310   // use. Make sure we switch these to 64-bit accesses.
1311 
1312   if (Op.size() >= 16 &&
1313       Op.isDstAligned(Align(4))) // XXX: Should only do for global
1314     return MVT::v4i32;
1315 
1316   if (Op.size() >= 8 && Op.isDstAligned(Align(4)))
1317     return MVT::v2i32;
1318 
1319   // Use the default.
1320   return MVT::Other;
1321 }
1322 
1323 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1324                                            unsigned DestAS) const {
1325   return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
1326 }
1327 
1328 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
1329   const MemSDNode *MemNode = cast<MemSDNode>(N);
1330   const Value *Ptr = MemNode->getMemOperand()->getValue();
1331   const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
1332   return I && I->getMetadata("amdgpu.noclobber");
1333 }
1334 
1335 bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS,
1336                                            unsigned DestAS) const {
1337   // Flat -> private/local is a simple truncate.
1338   // Flat -> global is no-op
1339   if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
1340     return true;
1341 
1342   return isNoopAddrSpaceCast(SrcAS, DestAS);
1343 }
1344 
1345 bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1346   const MemSDNode *MemNode = cast<MemSDNode>(N);
1347 
1348   return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
1349 }
1350 
1351 TargetLoweringBase::LegalizeTypeAction
1352 SITargetLowering::getPreferredVectorAction(MVT VT) const {
1353   int NumElts = VT.getVectorNumElements();
1354   if (NumElts != 1 && VT.getScalarType().bitsLE(MVT::i16))
1355     return VT.isPow2VectorType() ? TypeSplitVector : TypeWidenVector;
1356   return TargetLoweringBase::getPreferredVectorAction(VT);
1357 }
1358 
1359 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1360                                                          Type *Ty) const {
1361   // FIXME: Could be smarter if called for vector constants.
1362   return true;
1363 }
1364 
1365 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
1366   if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1367     switch (Op) {
1368     case ISD::LOAD:
1369     case ISD::STORE:
1370 
1371     // These operations are done with 32-bit instructions anyway.
1372     case ISD::AND:
1373     case ISD::OR:
1374     case ISD::XOR:
1375     case ISD::SELECT:
1376       // TODO: Extensions?
1377       return true;
1378     default:
1379       return false;
1380     }
1381   }
1382 
1383   // SimplifySetCC uses this function to determine whether or not it should
1384   // create setcc with i1 operands.  We don't have instructions for i1 setcc.
1385   if (VT == MVT::i1 && Op == ISD::SETCC)
1386     return false;
1387 
1388   return TargetLowering::isTypeDesirableForOp(Op, VT);
1389 }
1390 
1391 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1392                                                    const SDLoc &SL,
1393                                                    SDValue Chain,
1394                                                    uint64_t Offset) const {
1395   const DataLayout &DL = DAG.getDataLayout();
1396   MachineFunction &MF = DAG.getMachineFunction();
1397   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1398 
1399   const ArgDescriptor *InputPtrReg;
1400   const TargetRegisterClass *RC;
1401 
1402   std::tie(InputPtrReg, RC)
1403     = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1404 
1405   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1406   MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
1407   SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
1408     MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1409 
1410   return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
1411 }
1412 
1413 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1414                                             const SDLoc &SL) const {
1415   uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1416                                                FIRST_IMPLICIT);
1417   return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1418 }
1419 
1420 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1421                                          const SDLoc &SL, SDValue Val,
1422                                          bool Signed,
1423                                          const ISD::InputArg *Arg) const {
1424   // First, if it is a widened vector, narrow it.
1425   if (VT.isVector() &&
1426       VT.getVectorNumElements() != MemVT.getVectorNumElements()) {
1427     EVT NarrowedVT =
1428         EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(),
1429                          VT.getVectorNumElements());
1430     Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val,
1431                       DAG.getConstant(0, SL, MVT::i32));
1432   }
1433 
1434   // Then convert the vector elements or scalar value.
1435   if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1436       VT.bitsLT(MemVT)) {
1437     unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1438     Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1439   }
1440 
1441   if (MemVT.isFloatingPoint())
1442     Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
1443   else if (Signed)
1444     Val = DAG.getSExtOrTrunc(Val, SL, VT);
1445   else
1446     Val = DAG.getZExtOrTrunc(Val, SL, VT);
1447 
1448   return Val;
1449 }
1450 
1451 SDValue SITargetLowering::lowerKernargMemParameter(
1452   SelectionDAG &DAG, EVT VT, EVT MemVT,
1453   const SDLoc &SL, SDValue Chain,
1454   uint64_t Offset, unsigned Align, bool Signed,
1455   const ISD::InputArg *Arg) const {
1456   MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
1457 
1458   // Try to avoid using an extload by loading earlier than the argument address,
1459   // and extracting the relevant bits. The load should hopefully be merged with
1460   // the previous argument.
1461   if (MemVT.getStoreSize() < 4 && Align < 4) {
1462     // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
1463     int64_t AlignDownOffset = alignDown(Offset, 4);
1464     int64_t OffsetDiff = Offset - AlignDownOffset;
1465 
1466     EVT IntVT = MemVT.changeTypeToInteger();
1467 
1468     // TODO: If we passed in the base kernel offset we could have a better
1469     // alignment than 4, but we don't really need it.
1470     SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1471     SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1472                                MachineMemOperand::MODereferenceable |
1473                                MachineMemOperand::MOInvariant);
1474 
1475     SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1476     SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1477 
1478     SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1479     ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1480     ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1481 
1482 
1483     return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1484   }
1485 
1486   SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1487   SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
1488                              MachineMemOperand::MODereferenceable |
1489                              MachineMemOperand::MOInvariant);
1490 
1491   SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
1492   return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
1493 }
1494 
1495 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1496                                               const SDLoc &SL, SDValue Chain,
1497                                               const ISD::InputArg &Arg) const {
1498   MachineFunction &MF = DAG.getMachineFunction();
1499   MachineFrameInfo &MFI = MF.getFrameInfo();
1500 
1501   if (Arg.Flags.isByVal()) {
1502     unsigned Size = Arg.Flags.getByValSize();
1503     int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1504     return DAG.getFrameIndex(FrameIdx, MVT::i32);
1505   }
1506 
1507   unsigned ArgOffset = VA.getLocMemOffset();
1508   unsigned ArgSize = VA.getValVT().getStoreSize();
1509 
1510   int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1511 
1512   // Create load nodes to retrieve arguments from the stack.
1513   SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1514   SDValue ArgValue;
1515 
1516   // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1517   ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1518   MVT MemVT = VA.getValVT();
1519 
1520   switch (VA.getLocInfo()) {
1521   default:
1522     break;
1523   case CCValAssign::BCvt:
1524     MemVT = VA.getLocVT();
1525     break;
1526   case CCValAssign::SExt:
1527     ExtType = ISD::SEXTLOAD;
1528     break;
1529   case CCValAssign::ZExt:
1530     ExtType = ISD::ZEXTLOAD;
1531     break;
1532   case CCValAssign::AExt:
1533     ExtType = ISD::EXTLOAD;
1534     break;
1535   }
1536 
1537   ArgValue = DAG.getExtLoad(
1538     ExtType, SL, VA.getLocVT(), Chain, FIN,
1539     MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1540     MemVT);
1541   return ArgValue;
1542 }
1543 
1544 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1545   const SIMachineFunctionInfo &MFI,
1546   EVT VT,
1547   AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1548   const ArgDescriptor *Reg;
1549   const TargetRegisterClass *RC;
1550 
1551   std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1552   return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1553 }
1554 
1555 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1556                                    CallingConv::ID CallConv,
1557                                    ArrayRef<ISD::InputArg> Ins,
1558                                    BitVector &Skipped,
1559                                    FunctionType *FType,
1560                                    SIMachineFunctionInfo *Info) {
1561   for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1562     const ISD::InputArg *Arg = &Ins[I];
1563 
1564     assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1565            "vector type argument should have been split");
1566 
1567     // First check if it's a PS input addr.
1568     if (CallConv == CallingConv::AMDGPU_PS &&
1569         !Arg->Flags.isInReg() && PSInputNum <= 15) {
1570       bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1571 
1572       // Inconveniently only the first part of the split is marked as isSplit,
1573       // so skip to the end. We only want to increment PSInputNum once for the
1574       // entire split argument.
1575       if (Arg->Flags.isSplit()) {
1576         while (!Arg->Flags.isSplitEnd()) {
1577           assert((!Arg->VT.isVector() ||
1578                   Arg->VT.getScalarSizeInBits() == 16) &&
1579                  "unexpected vector split in ps argument type");
1580           if (!SkipArg)
1581             Splits.push_back(*Arg);
1582           Arg = &Ins[++I];
1583         }
1584       }
1585 
1586       if (SkipArg) {
1587         // We can safely skip PS inputs.
1588         Skipped.set(Arg->getOrigArgIndex());
1589         ++PSInputNum;
1590         continue;
1591       }
1592 
1593       Info->markPSInputAllocated(PSInputNum);
1594       if (Arg->Used)
1595         Info->markPSInputEnabled(PSInputNum);
1596 
1597       ++PSInputNum;
1598     }
1599 
1600     Splits.push_back(*Arg);
1601   }
1602 }
1603 
1604 // Allocate special inputs passed in VGPRs.
1605 void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1606                                                       MachineFunction &MF,
1607                                                       const SIRegisterInfo &TRI,
1608                                                       SIMachineFunctionInfo &Info) const {
1609   const LLT S32 = LLT::scalar(32);
1610   MachineRegisterInfo &MRI = MF.getRegInfo();
1611 
1612   if (Info.hasWorkItemIDX()) {
1613     Register Reg = AMDGPU::VGPR0;
1614     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1615 
1616     CCInfo.AllocateReg(Reg);
1617     Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1618   }
1619 
1620   if (Info.hasWorkItemIDY()) {
1621     Register Reg = AMDGPU::VGPR1;
1622     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1623 
1624     CCInfo.AllocateReg(Reg);
1625     Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1626   }
1627 
1628   if (Info.hasWorkItemIDZ()) {
1629     Register Reg = AMDGPU::VGPR2;
1630     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1631 
1632     CCInfo.AllocateReg(Reg);
1633     Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1634   }
1635 }
1636 
1637 // Try to allocate a VGPR at the end of the argument list, or if no argument
1638 // VGPRs are left allocating a stack slot.
1639 // If \p Mask is is given it indicates bitfield position in the register.
1640 // If \p Arg is given use it with new ]p Mask instead of allocating new.
1641 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u,
1642                                          ArgDescriptor Arg = ArgDescriptor()) {
1643   if (Arg.isSet())
1644     return ArgDescriptor::createArg(Arg, Mask);
1645 
1646   ArrayRef<MCPhysReg> ArgVGPRs
1647     = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1648   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1649   if (RegIdx == ArgVGPRs.size()) {
1650     // Spill to stack required.
1651     int64_t Offset = CCInfo.AllocateStack(4, 4);
1652 
1653     return ArgDescriptor::createStack(Offset, Mask);
1654   }
1655 
1656   unsigned Reg = ArgVGPRs[RegIdx];
1657   Reg = CCInfo.AllocateReg(Reg);
1658   assert(Reg != AMDGPU::NoRegister);
1659 
1660   MachineFunction &MF = CCInfo.getMachineFunction();
1661   Register LiveInVReg = MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1662   MF.getRegInfo().setType(LiveInVReg, LLT::scalar(32));
1663   return ArgDescriptor::createRegister(Reg, Mask);
1664 }
1665 
1666 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1667                                              const TargetRegisterClass *RC,
1668                                              unsigned NumArgRegs) {
1669   ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1670   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1671   if (RegIdx == ArgSGPRs.size())
1672     report_fatal_error("ran out of SGPRs for arguments");
1673 
1674   unsigned Reg = ArgSGPRs[RegIdx];
1675   Reg = CCInfo.AllocateReg(Reg);
1676   assert(Reg != AMDGPU::NoRegister);
1677 
1678   MachineFunction &MF = CCInfo.getMachineFunction();
1679   MF.addLiveIn(Reg, RC);
1680   return ArgDescriptor::createRegister(Reg);
1681 }
1682 
1683 static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1684   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1685 }
1686 
1687 static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1688   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1689 }
1690 
1691 /// Allocate implicit function VGPR arguments at the end of allocated user
1692 /// arguments.
1693 void SITargetLowering::allocateSpecialInputVGPRs(
1694   CCState &CCInfo, MachineFunction &MF,
1695   const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const {
1696   const unsigned Mask = 0x3ff;
1697   ArgDescriptor Arg;
1698 
1699   if (Info.hasWorkItemIDX()) {
1700     Arg = allocateVGPR32Input(CCInfo, Mask);
1701     Info.setWorkItemIDX(Arg);
1702   }
1703 
1704   if (Info.hasWorkItemIDY()) {
1705     Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg);
1706     Info.setWorkItemIDY(Arg);
1707   }
1708 
1709   if (Info.hasWorkItemIDZ())
1710     Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg));
1711 }
1712 
1713 /// Allocate implicit function VGPR arguments in fixed registers.
1714 void SITargetLowering::allocateSpecialInputVGPRsFixed(
1715   CCState &CCInfo, MachineFunction &MF,
1716   const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const {
1717   Register Reg = CCInfo.AllocateReg(AMDGPU::VGPR31);
1718   if (!Reg)
1719     report_fatal_error("failed to allocated VGPR for implicit arguments");
1720 
1721   const unsigned Mask = 0x3ff;
1722   Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg, Mask));
1723   Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg, Mask << 10));
1724   Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg, Mask << 20));
1725 }
1726 
1727 void SITargetLowering::allocateSpecialInputSGPRs(
1728   CCState &CCInfo,
1729   MachineFunction &MF,
1730   const SIRegisterInfo &TRI,
1731   SIMachineFunctionInfo &Info) const {
1732   auto &ArgInfo = Info.getArgInfo();
1733 
1734   // TODO: Unify handling with private memory pointers.
1735 
1736   if (Info.hasDispatchPtr())
1737     ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1738 
1739   if (Info.hasQueuePtr())
1740     ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1741 
1742   // Implicit arg ptr takes the place of the kernarg segment pointer. This is a
1743   // constant offset from the kernarg segment.
1744   if (Info.hasImplicitArgPtr())
1745     ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
1746 
1747   if (Info.hasDispatchID())
1748     ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1749 
1750   // flat_scratch_init is not applicable for non-kernel functions.
1751 
1752   if (Info.hasWorkGroupIDX())
1753     ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1754 
1755   if (Info.hasWorkGroupIDY())
1756     ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1757 
1758   if (Info.hasWorkGroupIDZ())
1759     ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
1760 }
1761 
1762 // Allocate special inputs passed in user SGPRs.
1763 void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo,
1764                                             MachineFunction &MF,
1765                                             const SIRegisterInfo &TRI,
1766                                             SIMachineFunctionInfo &Info) const {
1767   if (Info.hasImplicitBufferPtr()) {
1768     unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1769     MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1770     CCInfo.AllocateReg(ImplicitBufferPtrReg);
1771   }
1772 
1773   // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1774   if (Info.hasPrivateSegmentBuffer()) {
1775     unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1776     MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1777     CCInfo.AllocateReg(PrivateSegmentBufferReg);
1778   }
1779 
1780   if (Info.hasDispatchPtr()) {
1781     unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1782     MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1783     CCInfo.AllocateReg(DispatchPtrReg);
1784   }
1785 
1786   if (Info.hasQueuePtr()) {
1787     unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1788     MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1789     CCInfo.AllocateReg(QueuePtrReg);
1790   }
1791 
1792   if (Info.hasKernargSegmentPtr()) {
1793     MachineRegisterInfo &MRI = MF.getRegInfo();
1794     Register InputPtrReg = Info.addKernargSegmentPtr(TRI);
1795     CCInfo.AllocateReg(InputPtrReg);
1796 
1797     Register VReg = MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1798     MRI.setType(VReg, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
1799   }
1800 
1801   if (Info.hasDispatchID()) {
1802     unsigned DispatchIDReg = Info.addDispatchID(TRI);
1803     MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1804     CCInfo.AllocateReg(DispatchIDReg);
1805   }
1806 
1807   if (Info.hasFlatScratchInit()) {
1808     unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1809     MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1810     CCInfo.AllocateReg(FlatScratchInitReg);
1811   }
1812 
1813   // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1814   // these from the dispatch pointer.
1815 }
1816 
1817 // Allocate special input registers that are initialized per-wave.
1818 void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo,
1819                                            MachineFunction &MF,
1820                                            SIMachineFunctionInfo &Info,
1821                                            CallingConv::ID CallConv,
1822                                            bool IsShader) const {
1823   if (Info.hasWorkGroupIDX()) {
1824     unsigned Reg = Info.addWorkGroupIDX();
1825     MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
1826     CCInfo.AllocateReg(Reg);
1827   }
1828 
1829   if (Info.hasWorkGroupIDY()) {
1830     unsigned Reg = Info.addWorkGroupIDY();
1831     MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
1832     CCInfo.AllocateReg(Reg);
1833   }
1834 
1835   if (Info.hasWorkGroupIDZ()) {
1836     unsigned Reg = Info.addWorkGroupIDZ();
1837     MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
1838     CCInfo.AllocateReg(Reg);
1839   }
1840 
1841   if (Info.hasWorkGroupInfo()) {
1842     unsigned Reg = Info.addWorkGroupInfo();
1843     MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
1844     CCInfo.AllocateReg(Reg);
1845   }
1846 
1847   if (Info.hasPrivateSegmentWaveByteOffset()) {
1848     // Scratch wave offset passed in system SGPR.
1849     unsigned PrivateSegmentWaveByteOffsetReg;
1850 
1851     if (IsShader) {
1852       PrivateSegmentWaveByteOffsetReg =
1853         Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1854 
1855       // This is true if the scratch wave byte offset doesn't have a fixed
1856       // location.
1857       if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1858         PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1859         Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1860       }
1861     } else
1862       PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1863 
1864     MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1865     CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1866   }
1867 }
1868 
1869 static void reservePrivateMemoryRegs(const TargetMachine &TM,
1870                                      MachineFunction &MF,
1871                                      const SIRegisterInfo &TRI,
1872                                      SIMachineFunctionInfo &Info) {
1873   // Now that we've figured out where the scratch register inputs are, see if
1874   // should reserve the arguments and use them directly.
1875   MachineFrameInfo &MFI = MF.getFrameInfo();
1876   bool HasStackObjects = MFI.hasStackObjects();
1877   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1878 
1879   // Record that we know we have non-spill stack objects so we don't need to
1880   // check all stack objects later.
1881   if (HasStackObjects)
1882     Info.setHasNonSpillStackObjects(true);
1883 
1884   // Everything live out of a block is spilled with fast regalloc, so it's
1885   // almost certain that spilling will be required.
1886   if (TM.getOptLevel() == CodeGenOpt::None)
1887     HasStackObjects = true;
1888 
1889   // For now assume stack access is needed in any callee functions, so we need
1890   // the scratch registers to pass in.
1891   bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1892 
1893   if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) {
1894     // If we have stack objects, we unquestionably need the private buffer
1895     // resource. For the Code Object V2 ABI, this will be the first 4 user
1896     // SGPR inputs. We can reserve those and use them directly.
1897 
1898     Register PrivateSegmentBufferReg =
1899         Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
1900     Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1901   } else {
1902     unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1903     // We tentatively reserve the last registers (skipping the last registers
1904     // which may contain VCC, FLAT_SCR, and XNACK). After register allocation,
1905     // we'll replace these with the ones immediately after those which were
1906     // really allocated. In the prologue copies will be inserted from the
1907     // argument to these reserved registers.
1908 
1909     // Without HSA, relocations are used for the scratch pointer and the
1910     // buffer resource setup is always inserted in the prologue. Scratch wave
1911     // offset is still in an input SGPR.
1912     Info.setScratchRSrcReg(ReservedBufferReg);
1913   }
1914 
1915   // hasFP should be accurate for kernels even before the frame is finalized.
1916   if (ST.getFrameLowering()->hasFP(MF)) {
1917     MachineRegisterInfo &MRI = MF.getRegInfo();
1918 
1919     // Try to use s32 as the SP, but move it if it would interfere with input
1920     // arguments. This won't work with calls though.
1921     //
1922     // FIXME: Move SP to avoid any possible inputs, or find a way to spill input
1923     // registers.
1924     if (!MRI.isLiveIn(AMDGPU::SGPR32)) {
1925       Info.setStackPtrOffsetReg(AMDGPU::SGPR32);
1926     } else {
1927       assert(AMDGPU::isShader(MF.getFunction().getCallingConv()));
1928 
1929       if (MFI.hasCalls())
1930         report_fatal_error("call in graphics shader with too many input SGPRs");
1931 
1932       for (unsigned Reg : AMDGPU::SGPR_32RegClass) {
1933         if (!MRI.isLiveIn(Reg)) {
1934           Info.setStackPtrOffsetReg(Reg);
1935           break;
1936         }
1937       }
1938 
1939       if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG)
1940         report_fatal_error("failed to find register for SP");
1941     }
1942 
1943     if (MFI.hasCalls()) {
1944       Info.setScratchWaveOffsetReg(AMDGPU::SGPR33);
1945       Info.setFrameOffsetReg(AMDGPU::SGPR33);
1946     } else {
1947       unsigned ReservedOffsetReg =
1948         TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1949       Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1950       Info.setFrameOffsetReg(ReservedOffsetReg);
1951     }
1952   } else if (RequiresStackAccess) {
1953     assert(!MFI.hasCalls());
1954     // We know there are accesses and they will be done relative to SP, so just
1955     // pin it to the input.
1956     //
1957     // FIXME: Should not do this if inline asm is reading/writing these
1958     // registers.
1959     Register PreloadedSP = Info.getPreloadedReg(
1960         AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1961 
1962     Info.setStackPtrOffsetReg(PreloadedSP);
1963     Info.setScratchWaveOffsetReg(PreloadedSP);
1964     Info.setFrameOffsetReg(PreloadedSP);
1965   } else {
1966     assert(!MFI.hasCalls());
1967 
1968     // There may not be stack access at all. There may still be spills, or
1969     // access of a constant pointer (in which cases an extra copy will be
1970     // emitted in the prolog).
1971     unsigned ReservedOffsetReg
1972       = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1973     Info.setStackPtrOffsetReg(ReservedOffsetReg);
1974     Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1975     Info.setFrameOffsetReg(ReservedOffsetReg);
1976   }
1977 }
1978 
1979 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1980   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1981   return !Info->isEntryFunction();
1982 }
1983 
1984 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1985 
1986 }
1987 
1988 void SITargetLowering::insertCopiesSplitCSR(
1989   MachineBasicBlock *Entry,
1990   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1991   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1992 
1993   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1994   if (!IStart)
1995     return;
1996 
1997   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1998   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1999   MachineBasicBlock::iterator MBBI = Entry->begin();
2000   for (const MCPhysReg *I = IStart; *I; ++I) {
2001     const TargetRegisterClass *RC = nullptr;
2002     if (AMDGPU::SReg_64RegClass.contains(*I))
2003       RC = &AMDGPU::SGPR_64RegClass;
2004     else if (AMDGPU::SReg_32RegClass.contains(*I))
2005       RC = &AMDGPU::SGPR_32RegClass;
2006     else
2007       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2008 
2009     Register NewVR = MRI->createVirtualRegister(RC);
2010     // Create copy from CSR to a virtual register.
2011     Entry->addLiveIn(*I);
2012     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
2013       .addReg(*I);
2014 
2015     // Insert the copy-back instructions right before the terminator.
2016     for (auto *Exit : Exits)
2017       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
2018               TII->get(TargetOpcode::COPY), *I)
2019         .addReg(NewVR);
2020   }
2021 }
2022 
2023 SDValue SITargetLowering::LowerFormalArguments(
2024     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
2025     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2026     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2027   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2028 
2029   MachineFunction &MF = DAG.getMachineFunction();
2030   const Function &Fn = MF.getFunction();
2031   FunctionType *FType = MF.getFunction().getFunctionType();
2032   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2033 
2034   if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
2035     DiagnosticInfoUnsupported NoGraphicsHSA(
2036         Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
2037     DAG.getContext()->diagnose(NoGraphicsHSA);
2038     return DAG.getEntryNode();
2039   }
2040 
2041   SmallVector<ISD::InputArg, 16> Splits;
2042   SmallVector<CCValAssign, 16> ArgLocs;
2043   BitVector Skipped(Ins.size());
2044   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
2045                  *DAG.getContext());
2046 
2047   bool IsShader = AMDGPU::isShader(CallConv);
2048   bool IsKernel = AMDGPU::isKernel(CallConv);
2049   bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
2050 
2051   if (IsShader) {
2052     processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
2053 
2054     // At least one interpolation mode must be enabled or else the GPU will
2055     // hang.
2056     //
2057     // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
2058     // set PSInputAddr, the user wants to enable some bits after the compilation
2059     // based on run-time states. Since we can't know what the final PSInputEna
2060     // will look like, so we shouldn't do anything here and the user should take
2061     // responsibility for the correct programming.
2062     //
2063     // Otherwise, the following restrictions apply:
2064     // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
2065     // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
2066     //   enabled too.
2067     if (CallConv == CallingConv::AMDGPU_PS) {
2068       if ((Info->getPSInputAddr() & 0x7F) == 0 ||
2069            ((Info->getPSInputAddr() & 0xF) == 0 &&
2070             Info->isPSInputAllocated(11))) {
2071         CCInfo.AllocateReg(AMDGPU::VGPR0);
2072         CCInfo.AllocateReg(AMDGPU::VGPR1);
2073         Info->markPSInputAllocated(0);
2074         Info->markPSInputEnabled(0);
2075       }
2076       if (Subtarget->isAmdPalOS()) {
2077         // For isAmdPalOS, the user does not enable some bits after compilation
2078         // based on run-time states; the register values being generated here are
2079         // the final ones set in hardware. Therefore we need to apply the
2080         // workaround to PSInputAddr and PSInputEnable together.  (The case where
2081         // a bit is set in PSInputAddr but not PSInputEnable is where the
2082         // frontend set up an input arg for a particular interpolation mode, but
2083         // nothing uses that input arg. Really we should have an earlier pass
2084         // that removes such an arg.)
2085         unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
2086         if ((PsInputBits & 0x7F) == 0 ||
2087             ((PsInputBits & 0xF) == 0 &&
2088              (PsInputBits >> 11 & 1)))
2089           Info->markPSInputEnabled(
2090               countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
2091       }
2092     }
2093 
2094     assert(!Info->hasDispatchPtr() &&
2095            !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
2096            !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
2097            !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
2098            !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
2099            !Info->hasWorkItemIDZ());
2100   } else if (IsKernel) {
2101     assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
2102   } else {
2103     Splits.append(Ins.begin(), Ins.end());
2104   }
2105 
2106   if (IsEntryFunc) {
2107     allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
2108     allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
2109   } else {
2110     // For the fixed ABI, pass workitem IDs in the last argument register.
2111     if (AMDGPUTargetMachine::EnableFixedFunctionABI)
2112       allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info);
2113   }
2114 
2115   if (IsKernel) {
2116     analyzeFormalArgumentsCompute(CCInfo, Ins);
2117   } else {
2118     CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
2119     CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
2120   }
2121 
2122   SmallVector<SDValue, 16> Chains;
2123 
2124   // FIXME: This is the minimum kernel argument alignment. We should improve
2125   // this to the maximum alignment of the arguments.
2126   //
2127   // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
2128   // kern arg offset.
2129   const unsigned KernelArgBaseAlign = 16;
2130 
2131    for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
2132     const ISD::InputArg &Arg = Ins[i];
2133     if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
2134       InVals.push_back(DAG.getUNDEF(Arg.VT));
2135       continue;
2136     }
2137 
2138     CCValAssign &VA = ArgLocs[ArgIdx++];
2139     MVT VT = VA.getLocVT();
2140 
2141     if (IsEntryFunc && VA.isMemLoc()) {
2142       VT = Ins[i].VT;
2143       EVT MemVT = VA.getLocVT();
2144 
2145       const uint64_t Offset = VA.getLocMemOffset();
2146       unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
2147 
2148       SDValue Arg = lowerKernargMemParameter(
2149         DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
2150       Chains.push_back(Arg.getValue(1));
2151 
2152       auto *ParamTy =
2153         dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
2154       if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
2155           ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
2156                       ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) {
2157         // On SI local pointers are just offsets into LDS, so they are always
2158         // less than 16-bits.  On CI and newer they could potentially be
2159         // real pointers, so we can't guarantee their size.
2160         Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
2161                           DAG.getValueType(MVT::i16));
2162       }
2163 
2164       InVals.push_back(Arg);
2165       continue;
2166     } else if (!IsEntryFunc && VA.isMemLoc()) {
2167       SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
2168       InVals.push_back(Val);
2169       if (!Arg.Flags.isByVal())
2170         Chains.push_back(Val.getValue(1));
2171       continue;
2172     }
2173 
2174     assert(VA.isRegLoc() && "Parameter must be in a register!");
2175 
2176     Register Reg = VA.getLocReg();
2177     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2178     EVT ValVT = VA.getValVT();
2179 
2180     Reg = MF.addLiveIn(Reg, RC);
2181     SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
2182 
2183     if (Arg.Flags.isSRet()) {
2184       // The return object should be reasonably addressable.
2185 
2186       // FIXME: This helps when the return is a real sret. If it is a
2187       // automatically inserted sret (i.e. CanLowerReturn returns false), an
2188       // extra copy is inserted in SelectionDAGBuilder which obscures this.
2189       unsigned NumBits
2190         = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex();
2191       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2192         DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
2193     }
2194 
2195     // If this is an 8 or 16-bit value, it is really passed promoted
2196     // to 32 bits. Insert an assert[sz]ext to capture this, then
2197     // truncate to the right size.
2198     switch (VA.getLocInfo()) {
2199     case CCValAssign::Full:
2200       break;
2201     case CCValAssign::BCvt:
2202       Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
2203       break;
2204     case CCValAssign::SExt:
2205       Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
2206                         DAG.getValueType(ValVT));
2207       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2208       break;
2209     case CCValAssign::ZExt:
2210       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2211                         DAG.getValueType(ValVT));
2212       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2213       break;
2214     case CCValAssign::AExt:
2215       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2216       break;
2217     default:
2218       llvm_unreachable("Unknown loc info!");
2219     }
2220 
2221     InVals.push_back(Val);
2222   }
2223 
2224   if (!IsEntryFunc && !AMDGPUTargetMachine::EnableFixedFunctionABI) {
2225     // Special inputs come after user arguments.
2226     allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
2227   }
2228 
2229   // Start adding system SGPRs.
2230   if (IsEntryFunc) {
2231     allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
2232   } else {
2233     CCInfo.AllocateReg(Info->getScratchRSrcReg());
2234     CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
2235     CCInfo.AllocateReg(Info->getFrameOffsetReg());
2236     allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
2237   }
2238 
2239   auto &ArgUsageInfo =
2240     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2241   ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
2242 
2243   unsigned StackArgSize = CCInfo.getNextStackOffset();
2244   Info->setBytesInStackArgArea(StackArgSize);
2245 
2246   return Chains.empty() ? Chain :
2247     DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
2248 }
2249 
2250 // TODO: If return values can't fit in registers, we should return as many as
2251 // possible in registers before passing on stack.
2252 bool SITargetLowering::CanLowerReturn(
2253   CallingConv::ID CallConv,
2254   MachineFunction &MF, bool IsVarArg,
2255   const SmallVectorImpl<ISD::OutputArg> &Outs,
2256   LLVMContext &Context) const {
2257   // Replacing returns with sret/stack usage doesn't make sense for shaders.
2258   // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
2259   // for shaders. Vector types should be explicitly handled by CC.
2260   if (AMDGPU::isEntryFunctionCC(CallConv))
2261     return true;
2262 
2263   SmallVector<CCValAssign, 16> RVLocs;
2264   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2265   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2266 }
2267 
2268 SDValue
2269 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2270                               bool isVarArg,
2271                               const SmallVectorImpl<ISD::OutputArg> &Outs,
2272                               const SmallVectorImpl<SDValue> &OutVals,
2273                               const SDLoc &DL, SelectionDAG &DAG) const {
2274   MachineFunction &MF = DAG.getMachineFunction();
2275   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2276 
2277   if (AMDGPU::isKernel(CallConv)) {
2278     return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2279                                              OutVals, DL, DAG);
2280   }
2281 
2282   bool IsShader = AMDGPU::isShader(CallConv);
2283 
2284   Info->setIfReturnsVoid(Outs.empty());
2285   bool IsWaveEnd = Info->returnsVoid() && IsShader;
2286 
2287   // CCValAssign - represent the assignment of the return value to a location.
2288   SmallVector<CCValAssign, 48> RVLocs;
2289   SmallVector<ISD::OutputArg, 48> Splits;
2290 
2291   // CCState - Info about the registers and stack slots.
2292   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2293                  *DAG.getContext());
2294 
2295   // Analyze outgoing return values.
2296   CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2297 
2298   SDValue Flag;
2299   SmallVector<SDValue, 48> RetOps;
2300   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2301 
2302   // Add return address for callable functions.
2303   if (!Info->isEntryFunction()) {
2304     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2305     SDValue ReturnAddrReg = CreateLiveInRegister(
2306       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2307 
2308     SDValue ReturnAddrVirtualReg = DAG.getRegister(
2309         MF.getRegInfo().createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass),
2310         MVT::i64);
2311     Chain =
2312         DAG.getCopyToReg(Chain, DL, ReturnAddrVirtualReg, ReturnAddrReg, Flag);
2313     Flag = Chain.getValue(1);
2314     RetOps.push_back(ReturnAddrVirtualReg);
2315   }
2316 
2317   // Copy the result values into the output registers.
2318   for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2319        ++I, ++RealRVLocIdx) {
2320     CCValAssign &VA = RVLocs[I];
2321     assert(VA.isRegLoc() && "Can only return in registers!");
2322     // TODO: Partially return in registers if return values don't fit.
2323     SDValue Arg = OutVals[RealRVLocIdx];
2324 
2325     // Copied from other backends.
2326     switch (VA.getLocInfo()) {
2327     case CCValAssign::Full:
2328       break;
2329     case CCValAssign::BCvt:
2330       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2331       break;
2332     case CCValAssign::SExt:
2333       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2334       break;
2335     case CCValAssign::ZExt:
2336       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2337       break;
2338     case CCValAssign::AExt:
2339       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2340       break;
2341     default:
2342       llvm_unreachable("Unknown loc info!");
2343     }
2344 
2345     Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2346     Flag = Chain.getValue(1);
2347     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2348   }
2349 
2350   // FIXME: Does sret work properly?
2351   if (!Info->isEntryFunction()) {
2352     const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2353     const MCPhysReg *I =
2354       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2355     if (I) {
2356       for (; *I; ++I) {
2357         if (AMDGPU::SReg_64RegClass.contains(*I))
2358           RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2359         else if (AMDGPU::SReg_32RegClass.contains(*I))
2360           RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2361         else
2362           llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2363       }
2364     }
2365   }
2366 
2367   // Update chain and glue.
2368   RetOps[0] = Chain;
2369   if (Flag.getNode())
2370     RetOps.push_back(Flag);
2371 
2372   unsigned Opc = AMDGPUISD::ENDPGM;
2373   if (!IsWaveEnd)
2374     Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
2375   return DAG.getNode(Opc, DL, MVT::Other, RetOps);
2376 }
2377 
2378 SDValue SITargetLowering::LowerCallResult(
2379     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2380     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2381     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2382     SDValue ThisVal) const {
2383   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2384 
2385   // Assign locations to each value returned by this call.
2386   SmallVector<CCValAssign, 16> RVLocs;
2387   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2388                  *DAG.getContext());
2389   CCInfo.AnalyzeCallResult(Ins, RetCC);
2390 
2391   // Copy all of the result registers out of their specified physreg.
2392   for (unsigned i = 0; i != RVLocs.size(); ++i) {
2393     CCValAssign VA = RVLocs[i];
2394     SDValue Val;
2395 
2396     if (VA.isRegLoc()) {
2397       Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2398       Chain = Val.getValue(1);
2399       InFlag = Val.getValue(2);
2400     } else if (VA.isMemLoc()) {
2401       report_fatal_error("TODO: return values in memory");
2402     } else
2403       llvm_unreachable("unknown argument location type");
2404 
2405     switch (VA.getLocInfo()) {
2406     case CCValAssign::Full:
2407       break;
2408     case CCValAssign::BCvt:
2409       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2410       break;
2411     case CCValAssign::ZExt:
2412       Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2413                         DAG.getValueType(VA.getValVT()));
2414       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2415       break;
2416     case CCValAssign::SExt:
2417       Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2418                         DAG.getValueType(VA.getValVT()));
2419       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2420       break;
2421     case CCValAssign::AExt:
2422       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2423       break;
2424     default:
2425       llvm_unreachable("Unknown loc info!");
2426     }
2427 
2428     InVals.push_back(Val);
2429   }
2430 
2431   return Chain;
2432 }
2433 
2434 // Add code to pass special inputs required depending on used features separate
2435 // from the explicit user arguments present in the IR.
2436 void SITargetLowering::passSpecialInputs(
2437     CallLoweringInfo &CLI,
2438     CCState &CCInfo,
2439     const SIMachineFunctionInfo &Info,
2440     SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2441     SmallVectorImpl<SDValue> &MemOpChains,
2442     SDValue Chain) const {
2443   // If we don't have a call site, this was a call inserted by
2444   // legalization. These can never use special inputs.
2445   if (!CLI.CS)
2446     return;
2447 
2448   const Function *CalleeFunc = CLI.CS.getCalledFunction();
2449   assert(CalleeFunc);
2450 
2451   SelectionDAG &DAG = CLI.DAG;
2452   const SDLoc &DL = CLI.DL;
2453 
2454   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2455 
2456   auto &ArgUsageInfo =
2457     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2458   const AMDGPUFunctionArgInfo &CalleeArgInfo
2459     = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2460 
2461   const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2462 
2463   // TODO: Unify with private memory register handling. This is complicated by
2464   // the fact that at least in kernels, the input argument is not necessarily
2465   // in the same location as the input.
2466   AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2467     AMDGPUFunctionArgInfo::DISPATCH_PTR,
2468     AMDGPUFunctionArgInfo::QUEUE_PTR,
2469     AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR,
2470     AMDGPUFunctionArgInfo::DISPATCH_ID,
2471     AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2472     AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2473     AMDGPUFunctionArgInfo::WORKGROUP_ID_Z
2474   };
2475 
2476   for (auto InputID : InputRegs) {
2477     const ArgDescriptor *OutgoingArg;
2478     const TargetRegisterClass *ArgRC;
2479 
2480     std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2481     if (!OutgoingArg)
2482       continue;
2483 
2484     const ArgDescriptor *IncomingArg;
2485     const TargetRegisterClass *IncomingArgRC;
2486     std::tie(IncomingArg, IncomingArgRC)
2487       = CallerArgInfo.getPreloadedValue(InputID);
2488     assert(IncomingArgRC == ArgRC);
2489 
2490     // All special arguments are ints for now.
2491     EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
2492     SDValue InputReg;
2493 
2494     if (IncomingArg) {
2495       InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2496     } else {
2497       // The implicit arg ptr is special because it doesn't have a corresponding
2498       // input for kernels, and is computed from the kernarg segment pointer.
2499       assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2500       InputReg = getImplicitArgPtr(DAG, DL);
2501     }
2502 
2503     if (OutgoingArg->isRegister()) {
2504       RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2505       if (!CCInfo.AllocateReg(OutgoingArg->getRegister()))
2506         report_fatal_error("failed to allocate implicit input argument");
2507     } else {
2508       unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4);
2509       SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2510                                               SpecialArgOffset);
2511       MemOpChains.push_back(ArgStore);
2512     }
2513   }
2514 
2515   // Pack workitem IDs into a single register or pass it as is if already
2516   // packed.
2517   const ArgDescriptor *OutgoingArg;
2518   const TargetRegisterClass *ArgRC;
2519 
2520   std::tie(OutgoingArg, ArgRC) =
2521     CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
2522   if (!OutgoingArg)
2523     std::tie(OutgoingArg, ArgRC) =
2524       CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
2525   if (!OutgoingArg)
2526     std::tie(OutgoingArg, ArgRC) =
2527       CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
2528   if (!OutgoingArg)
2529     return;
2530 
2531   const ArgDescriptor *IncomingArgX
2532     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X).first;
2533   const ArgDescriptor *IncomingArgY
2534     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y).first;
2535   const ArgDescriptor *IncomingArgZ
2536     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z).first;
2537 
2538   SDValue InputReg;
2539   SDLoc SL;
2540 
2541   // If incoming ids are not packed we need to pack them.
2542   if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo.WorkItemIDX)
2543     InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX);
2544 
2545   if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo.WorkItemIDY) {
2546     SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY);
2547     Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y,
2548                     DAG.getShiftAmountConstant(10, MVT::i32, SL));
2549     InputReg = InputReg.getNode() ?
2550                  DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y;
2551   }
2552 
2553   if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo.WorkItemIDZ) {
2554     SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ);
2555     Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z,
2556                     DAG.getShiftAmountConstant(20, MVT::i32, SL));
2557     InputReg = InputReg.getNode() ?
2558                  DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z;
2559   }
2560 
2561   if (!InputReg.getNode()) {
2562     // Workitem ids are already packed, any of present incoming arguments
2563     // will carry all required fields.
2564     ArgDescriptor IncomingArg = ArgDescriptor::createArg(
2565       IncomingArgX ? *IncomingArgX :
2566       IncomingArgY ? *IncomingArgY :
2567                      *IncomingArgZ, ~0u);
2568     InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg);
2569   }
2570 
2571   if (OutgoingArg->isRegister()) {
2572     RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2573     CCInfo.AllocateReg(OutgoingArg->getRegister());
2574   } else {
2575     unsigned SpecialArgOffset = CCInfo.AllocateStack(4, 4);
2576     SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2577                                             SpecialArgOffset);
2578     MemOpChains.push_back(ArgStore);
2579   }
2580 }
2581 
2582 static bool canGuaranteeTCO(CallingConv::ID CC) {
2583   return CC == CallingConv::Fast;
2584 }
2585 
2586 /// Return true if we might ever do TCO for calls with this calling convention.
2587 static bool mayTailCallThisCC(CallingConv::ID CC) {
2588   switch (CC) {
2589   case CallingConv::C:
2590     return true;
2591   default:
2592     return canGuaranteeTCO(CC);
2593   }
2594 }
2595 
2596 bool SITargetLowering::isEligibleForTailCallOptimization(
2597     SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2598     const SmallVectorImpl<ISD::OutputArg> &Outs,
2599     const SmallVectorImpl<SDValue> &OutVals,
2600     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2601   if (!mayTailCallThisCC(CalleeCC))
2602     return false;
2603 
2604   MachineFunction &MF = DAG.getMachineFunction();
2605   const Function &CallerF = MF.getFunction();
2606   CallingConv::ID CallerCC = CallerF.getCallingConv();
2607   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2608   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2609 
2610   // Kernels aren't callable, and don't have a live in return address so it
2611   // doesn't make sense to do a tail call with entry functions.
2612   if (!CallerPreserved)
2613     return false;
2614 
2615   bool CCMatch = CallerCC == CalleeCC;
2616 
2617   if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2618     if (canGuaranteeTCO(CalleeCC) && CCMatch)
2619       return true;
2620     return false;
2621   }
2622 
2623   // TODO: Can we handle var args?
2624   if (IsVarArg)
2625     return false;
2626 
2627   for (const Argument &Arg : CallerF.args()) {
2628     if (Arg.hasByValAttr())
2629       return false;
2630   }
2631 
2632   LLVMContext &Ctx = *DAG.getContext();
2633 
2634   // Check that the call results are passed in the same way.
2635   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2636                                   CCAssignFnForCall(CalleeCC, IsVarArg),
2637                                   CCAssignFnForCall(CallerCC, IsVarArg)))
2638     return false;
2639 
2640   // The callee has to preserve all registers the caller needs to preserve.
2641   if (!CCMatch) {
2642     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2643     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2644       return false;
2645   }
2646 
2647   // Nothing more to check if the callee is taking no arguments.
2648   if (Outs.empty())
2649     return true;
2650 
2651   SmallVector<CCValAssign, 16> ArgLocs;
2652   CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2653 
2654   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2655 
2656   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2657   // If the stack arguments for this call do not fit into our own save area then
2658   // the call cannot be made tail.
2659   // TODO: Is this really necessary?
2660   if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2661     return false;
2662 
2663   const MachineRegisterInfo &MRI = MF.getRegInfo();
2664   return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2665 }
2666 
2667 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2668   if (!CI->isTailCall())
2669     return false;
2670 
2671   const Function *ParentFn = CI->getParent()->getParent();
2672   if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2673     return false;
2674   return true;
2675 }
2676 
2677 // The wave scratch offset register is used as the global base pointer.
2678 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2679                                     SmallVectorImpl<SDValue> &InVals) const {
2680   SelectionDAG &DAG = CLI.DAG;
2681   const SDLoc &DL = CLI.DL;
2682   SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2683   SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2684   SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2685   SDValue Chain = CLI.Chain;
2686   SDValue Callee = CLI.Callee;
2687   bool &IsTailCall = CLI.IsTailCall;
2688   CallingConv::ID CallConv = CLI.CallConv;
2689   bool IsVarArg = CLI.IsVarArg;
2690   bool IsSibCall = false;
2691   bool IsThisReturn = false;
2692   MachineFunction &MF = DAG.getMachineFunction();
2693 
2694   if (Callee.isUndef() || isNullConstant(Callee)) {
2695     if (!CLI.IsTailCall) {
2696       for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
2697         InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
2698     }
2699 
2700     return Chain;
2701   }
2702 
2703   if (IsVarArg) {
2704     return lowerUnhandledCall(CLI, InVals,
2705                               "unsupported call to variadic function ");
2706   }
2707 
2708   if (!CLI.CS.getInstruction())
2709     report_fatal_error("unsupported libcall legalization");
2710 
2711   if (!CLI.CS.getCalledFunction()) {
2712     return lowerUnhandledCall(CLI, InVals,
2713                               "unsupported indirect call to function ");
2714   }
2715 
2716   if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2717     return lowerUnhandledCall(CLI, InVals,
2718                               "unsupported required tail call to function ");
2719   }
2720 
2721   if (AMDGPU::isShader(MF.getFunction().getCallingConv())) {
2722     // Note the issue is with the CC of the calling function, not of the call
2723     // itself.
2724     return lowerUnhandledCall(CLI, InVals,
2725                           "unsupported call from graphics shader of function ");
2726   }
2727 
2728   if (IsTailCall) {
2729     IsTailCall = isEligibleForTailCallOptimization(
2730       Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2731     if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2732       report_fatal_error("failed to perform tail call elimination on a call "
2733                          "site marked musttail");
2734     }
2735 
2736     bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2737 
2738     // A sibling call is one where we're under the usual C ABI and not planning
2739     // to change that but can still do a tail call:
2740     if (!TailCallOpt && IsTailCall)
2741       IsSibCall = true;
2742 
2743     if (IsTailCall)
2744       ++NumTailCalls;
2745   }
2746 
2747   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2748   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2749   SmallVector<SDValue, 8> MemOpChains;
2750 
2751   // Analyze operands of the call, assigning locations to each operand.
2752   SmallVector<CCValAssign, 16> ArgLocs;
2753   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2754   CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2755 
2756   if (AMDGPUTargetMachine::EnableFixedFunctionABI) {
2757     // With a fixed ABI, allocate fixed registers before user arguments.
2758     passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
2759   }
2760 
2761   CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2762 
2763   // Get a count of how many bytes are to be pushed on the stack.
2764   unsigned NumBytes = CCInfo.getNextStackOffset();
2765 
2766   if (IsSibCall) {
2767     // Since we're not changing the ABI to make this a tail call, the memory
2768     // operands are already available in the caller's incoming argument space.
2769     NumBytes = 0;
2770   }
2771 
2772   // FPDiff is the byte offset of the call's argument area from the callee's.
2773   // Stores to callee stack arguments will be placed in FixedStackSlots offset
2774   // by this amount for a tail call. In a sibling call it must be 0 because the
2775   // caller will deallocate the entire stack and the callee still expects its
2776   // arguments to begin at SP+0. Completely unused for non-tail calls.
2777   int32_t FPDiff = 0;
2778   MachineFrameInfo &MFI = MF.getFrameInfo();
2779 
2780   // Adjust the stack pointer for the new arguments...
2781   // These operations are automatically eliminated by the prolog/epilog pass
2782   if (!IsSibCall) {
2783     Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2784 
2785     SmallVector<SDValue, 4> CopyFromChains;
2786 
2787     // In the HSA case, this should be an identity copy.
2788     SDValue ScratchRSrcReg
2789       = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2790     RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2791     CopyFromChains.push_back(ScratchRSrcReg.getValue(1));
2792     Chain = DAG.getTokenFactor(DL, CopyFromChains);
2793   }
2794 
2795   MVT PtrVT = MVT::i32;
2796 
2797   // Walk the register/memloc assignments, inserting copies/loads.
2798   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2799     CCValAssign &VA = ArgLocs[i];
2800     SDValue Arg = OutVals[i];
2801 
2802     // Promote the value if needed.
2803     switch (VA.getLocInfo()) {
2804     case CCValAssign::Full:
2805       break;
2806     case CCValAssign::BCvt:
2807       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2808       break;
2809     case CCValAssign::ZExt:
2810       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2811       break;
2812     case CCValAssign::SExt:
2813       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2814       break;
2815     case CCValAssign::AExt:
2816       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2817       break;
2818     case CCValAssign::FPExt:
2819       Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2820       break;
2821     default:
2822       llvm_unreachable("Unknown loc info!");
2823     }
2824 
2825     if (VA.isRegLoc()) {
2826       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2827     } else {
2828       assert(VA.isMemLoc());
2829 
2830       SDValue DstAddr;
2831       MachinePointerInfo DstInfo;
2832 
2833       unsigned LocMemOffset = VA.getLocMemOffset();
2834       int32_t Offset = LocMemOffset;
2835 
2836       SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT);
2837       MaybeAlign Alignment;
2838 
2839       if (IsTailCall) {
2840         ISD::ArgFlagsTy Flags = Outs[i].Flags;
2841         unsigned OpSize = Flags.isByVal() ?
2842           Flags.getByValSize() : VA.getValVT().getStoreSize();
2843 
2844         // FIXME: We can have better than the minimum byval required alignment.
2845         Alignment =
2846             Flags.isByVal()
2847                 ? Flags.getNonZeroByValAlign()
2848                 : commonAlignment(Subtarget->getStackAlignment(), Offset);
2849 
2850         Offset = Offset + FPDiff;
2851         int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2852 
2853         DstAddr = DAG.getFrameIndex(FI, PtrVT);
2854         DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2855 
2856         // Make sure any stack arguments overlapping with where we're storing
2857         // are loaded before this eventual operation. Otherwise they'll be
2858         // clobbered.
2859 
2860         // FIXME: Why is this really necessary? This seems to just result in a
2861         // lot of code to copy the stack and write them back to the same
2862         // locations, which are supposed to be immutable?
2863         Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2864       } else {
2865         DstAddr = PtrOff;
2866         DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2867         Alignment =
2868             commonAlignment(Subtarget->getStackAlignment(), LocMemOffset);
2869       }
2870 
2871       if (Outs[i].Flags.isByVal()) {
2872         SDValue SizeNode =
2873             DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2874         SDValue Cpy =
2875             DAG.getMemcpy(Chain, DL, DstAddr, Arg, SizeNode,
2876                           Outs[i].Flags.getNonZeroByValAlign(),
2877                           /*isVol = */ false, /*AlwaysInline = */ true,
2878                           /*isTailCall = */ false, DstInfo,
2879                           MachinePointerInfo(AMDGPUAS::PRIVATE_ADDRESS));
2880 
2881         MemOpChains.push_back(Cpy);
2882       } else {
2883         SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo,
2884                                      Alignment ? Alignment->value() : 0);
2885         MemOpChains.push_back(Store);
2886       }
2887     }
2888   }
2889 
2890   if (!AMDGPUTargetMachine::EnableFixedFunctionABI) {
2891     // Copy special input registers after user input arguments.
2892     passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
2893   }
2894 
2895   if (!MemOpChains.empty())
2896     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2897 
2898   // Build a sequence of copy-to-reg nodes chained together with token chain
2899   // and flag operands which copy the outgoing args into the appropriate regs.
2900   SDValue InFlag;
2901   for (auto &RegToPass : RegsToPass) {
2902     Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2903                              RegToPass.second, InFlag);
2904     InFlag = Chain.getValue(1);
2905   }
2906 
2907 
2908   SDValue PhysReturnAddrReg;
2909   if (IsTailCall) {
2910     // Since the return is being combined with the call, we need to pass on the
2911     // return address.
2912 
2913     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2914     SDValue ReturnAddrReg = CreateLiveInRegister(
2915       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2916 
2917     PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2918                                         MVT::i64);
2919     Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2920     InFlag = Chain.getValue(1);
2921   }
2922 
2923   // We don't usually want to end the call-sequence here because we would tidy
2924   // the frame up *after* the call, however in the ABI-changing tail-call case
2925   // we've carefully laid out the parameters so that when sp is reset they'll be
2926   // in the correct location.
2927   if (IsTailCall && !IsSibCall) {
2928     Chain = DAG.getCALLSEQ_END(Chain,
2929                                DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2930                                DAG.getTargetConstant(0, DL, MVT::i32),
2931                                InFlag, DL);
2932     InFlag = Chain.getValue(1);
2933   }
2934 
2935   std::vector<SDValue> Ops;
2936   Ops.push_back(Chain);
2937   Ops.push_back(Callee);
2938   // Add a redundant copy of the callee global which will not be legalized, as
2939   // we need direct access to the callee later.
2940   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee);
2941   const GlobalValue *GV = GSD->getGlobal();
2942   Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
2943 
2944   if (IsTailCall) {
2945     // Each tail call may have to adjust the stack by a different amount, so
2946     // this information must travel along with the operation for eventual
2947     // consumption by emitEpilogue.
2948     Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
2949 
2950     Ops.push_back(PhysReturnAddrReg);
2951   }
2952 
2953   // Add argument registers to the end of the list so that they are known live
2954   // into the call.
2955   for (auto &RegToPass : RegsToPass) {
2956     Ops.push_back(DAG.getRegister(RegToPass.first,
2957                                   RegToPass.second.getValueType()));
2958   }
2959 
2960   // Add a register mask operand representing the call-preserved registers.
2961 
2962   auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
2963   const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2964   assert(Mask && "Missing call preserved mask for calling convention");
2965   Ops.push_back(DAG.getRegisterMask(Mask));
2966 
2967   if (InFlag.getNode())
2968     Ops.push_back(InFlag);
2969 
2970   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2971 
2972   // If we're doing a tall call, use a TC_RETURN here rather than an
2973   // actual call instruction.
2974   if (IsTailCall) {
2975     MFI.setHasTailCall();
2976     return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
2977   }
2978 
2979   // Returns a chain and a flag for retval copy to use.
2980   SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2981   Chain = Call.getValue(0);
2982   InFlag = Call.getValue(1);
2983 
2984   uint64_t CalleePopBytes = NumBytes;
2985   Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
2986                              DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2987                              InFlag, DL);
2988   if (!Ins.empty())
2989     InFlag = Chain.getValue(1);
2990 
2991   // Handle result values, copying them out of physregs into vregs that we
2992   // return.
2993   return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2994                          InVals, IsThisReturn,
2995                          IsThisReturn ? OutVals[0] : SDValue());
2996 }
2997 
2998 Register SITargetLowering::getRegisterByName(const char* RegName, LLT VT,
2999                                              const MachineFunction &MF) const {
3000   Register Reg = StringSwitch<Register>(RegName)
3001     .Case("m0", AMDGPU::M0)
3002     .Case("exec", AMDGPU::EXEC)
3003     .Case("exec_lo", AMDGPU::EXEC_LO)
3004     .Case("exec_hi", AMDGPU::EXEC_HI)
3005     .Case("flat_scratch", AMDGPU::FLAT_SCR)
3006     .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
3007     .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
3008     .Default(Register());
3009 
3010   if (Reg == AMDGPU::NoRegister) {
3011     report_fatal_error(Twine("invalid register name \""
3012                              + StringRef(RegName)  + "\"."));
3013 
3014   }
3015 
3016   if (!Subtarget->hasFlatScrRegister() &&
3017        Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
3018     report_fatal_error(Twine("invalid register \""
3019                              + StringRef(RegName)  + "\" for subtarget."));
3020   }
3021 
3022   switch (Reg) {
3023   case AMDGPU::M0:
3024   case AMDGPU::EXEC_LO:
3025   case AMDGPU::EXEC_HI:
3026   case AMDGPU::FLAT_SCR_LO:
3027   case AMDGPU::FLAT_SCR_HI:
3028     if (VT.getSizeInBits() == 32)
3029       return Reg;
3030     break;
3031   case AMDGPU::EXEC:
3032   case AMDGPU::FLAT_SCR:
3033     if (VT.getSizeInBits() == 64)
3034       return Reg;
3035     break;
3036   default:
3037     llvm_unreachable("missing register type checking");
3038   }
3039 
3040   report_fatal_error(Twine("invalid type for register \""
3041                            + StringRef(RegName) + "\"."));
3042 }
3043 
3044 // If kill is not the last instruction, split the block so kill is always a
3045 // proper terminator.
3046 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
3047                                                     MachineBasicBlock *BB) const {
3048   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3049 
3050   MachineBasicBlock::iterator SplitPoint(&MI);
3051   ++SplitPoint;
3052 
3053   if (SplitPoint == BB->end()) {
3054     // Don't bother with a new block.
3055     MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
3056     return BB;
3057   }
3058 
3059   MachineFunction *MF = BB->getParent();
3060   MachineBasicBlock *SplitBB
3061     = MF->CreateMachineBasicBlock(BB->getBasicBlock());
3062 
3063   MF->insert(++MachineFunction::iterator(BB), SplitBB);
3064   SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
3065 
3066   SplitBB->transferSuccessorsAndUpdatePHIs(BB);
3067   BB->addSuccessor(SplitBB);
3068 
3069   MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
3070   return SplitBB;
3071 }
3072 
3073 // Split block \p MBB at \p MI, as to insert a loop. If \p InstInLoop is true,
3074 // \p MI will be the only instruction in the loop body block. Otherwise, it will
3075 // be the first instruction in the remainder block.
3076 //
3077 /// \returns { LoopBody, Remainder }
3078 static std::pair<MachineBasicBlock *, MachineBasicBlock *>
3079 splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) {
3080   MachineFunction *MF = MBB.getParent();
3081   MachineBasicBlock::iterator I(&MI);
3082 
3083   // To insert the loop we need to split the block. Move everything after this
3084   // point to a new block, and insert a new empty block between the two.
3085   MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
3086   MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
3087   MachineFunction::iterator MBBI(MBB);
3088   ++MBBI;
3089 
3090   MF->insert(MBBI, LoopBB);
3091   MF->insert(MBBI, RemainderBB);
3092 
3093   LoopBB->addSuccessor(LoopBB);
3094   LoopBB->addSuccessor(RemainderBB);
3095 
3096   // Move the rest of the block into a new block.
3097   RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
3098 
3099   if (InstInLoop) {
3100     auto Next = std::next(I);
3101 
3102     // Move instruction to loop body.
3103     LoopBB->splice(LoopBB->begin(), &MBB, I, Next);
3104 
3105     // Move the rest of the block.
3106     RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end());
3107   } else {
3108     RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
3109   }
3110 
3111   MBB.addSuccessor(LoopBB);
3112 
3113   return std::make_pair(LoopBB, RemainderBB);
3114 }
3115 
3116 /// Insert \p MI into a BUNDLE with an S_WAITCNT 0 immediately following it.
3117 void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const {
3118   MachineBasicBlock *MBB = MI.getParent();
3119   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3120   auto I = MI.getIterator();
3121   auto E = std::next(I);
3122 
3123   BuildMI(*MBB, E, MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT))
3124     .addImm(0);
3125 
3126   MIBundleBuilder Bundler(*MBB, I, E);
3127   finalizeBundle(*MBB, Bundler.begin());
3128 }
3129 
3130 MachineBasicBlock *
3131 SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI,
3132                                          MachineBasicBlock *BB) const {
3133   const DebugLoc &DL = MI.getDebugLoc();
3134 
3135   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3136 
3137   MachineBasicBlock *LoopBB;
3138   MachineBasicBlock *RemainderBB;
3139   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3140 
3141   // Apparently kill flags are only valid if the def is in the same block?
3142   if (MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0))
3143     Src->setIsKill(false);
3144 
3145   std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true);
3146 
3147   MachineBasicBlock::iterator I = LoopBB->end();
3148 
3149   const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg(
3150     AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1);
3151 
3152   // Clear TRAP_STS.MEM_VIOL
3153   BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32))
3154     .addImm(0)
3155     .addImm(EncodedReg);
3156 
3157   bundleInstWithWaitcnt(MI);
3158 
3159   Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3160 
3161   // Load and check TRAP_STS.MEM_VIOL
3162   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg)
3163     .addImm(EncodedReg);
3164 
3165   // FIXME: Do we need to use an isel pseudo that may clobber scc?
3166   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32))
3167     .addReg(Reg, RegState::Kill)
3168     .addImm(0);
3169   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3170     .addMBB(LoopBB);
3171 
3172   return RemainderBB;
3173 }
3174 
3175 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
3176 // wavefront. If the value is uniform and just happens to be in a VGPR, this
3177 // will only do one iteration. In the worst case, this will loop 64 times.
3178 //
3179 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
3180 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
3181   const SIInstrInfo *TII,
3182   MachineRegisterInfo &MRI,
3183   MachineBasicBlock &OrigBB,
3184   MachineBasicBlock &LoopBB,
3185   const DebugLoc &DL,
3186   const MachineOperand &IdxReg,
3187   unsigned InitReg,
3188   unsigned ResultReg,
3189   unsigned PhiReg,
3190   unsigned InitSaveExecReg,
3191   int Offset,
3192   bool UseGPRIdxMode,
3193   bool IsIndirectSrc) {
3194   MachineFunction *MF = OrigBB.getParent();
3195   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3196   const SIRegisterInfo *TRI = ST.getRegisterInfo();
3197   MachineBasicBlock::iterator I = LoopBB.begin();
3198 
3199   const TargetRegisterClass *BoolRC = TRI->getBoolRC();
3200   Register PhiExec = MRI.createVirtualRegister(BoolRC);
3201   Register NewExec = MRI.createVirtualRegister(BoolRC);
3202   Register CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3203   Register CondReg = MRI.createVirtualRegister(BoolRC);
3204 
3205   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
3206     .addReg(InitReg)
3207     .addMBB(&OrigBB)
3208     .addReg(ResultReg)
3209     .addMBB(&LoopBB);
3210 
3211   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
3212     .addReg(InitSaveExecReg)
3213     .addMBB(&OrigBB)
3214     .addReg(NewExec)
3215     .addMBB(&LoopBB);
3216 
3217   // Read the next variant <- also loop target.
3218   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
3219     .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
3220 
3221   // Compare the just read M0 value to all possible Idx values.
3222   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
3223     .addReg(CurrentIdxReg)
3224     .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
3225 
3226   // Update EXEC, save the original EXEC value to VCC.
3227   BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32
3228                                                 : AMDGPU::S_AND_SAVEEXEC_B64),
3229           NewExec)
3230     .addReg(CondReg, RegState::Kill);
3231 
3232   MRI.setSimpleHint(NewExec, CondReg);
3233 
3234   if (UseGPRIdxMode) {
3235     unsigned IdxReg;
3236     if (Offset == 0) {
3237       IdxReg = CurrentIdxReg;
3238     } else {
3239       IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3240       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
3241         .addReg(CurrentIdxReg, RegState::Kill)
3242         .addImm(Offset);
3243     }
3244     unsigned IdxMode = IsIndirectSrc ?
3245       AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
3246     MachineInstr *SetOn =
3247       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3248       .addReg(IdxReg, RegState::Kill)
3249       .addImm(IdxMode);
3250     SetOn->getOperand(3).setIsUndef();
3251   } else {
3252     // Move index from VCC into M0
3253     if (Offset == 0) {
3254       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3255         .addReg(CurrentIdxReg, RegState::Kill);
3256     } else {
3257       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3258         .addReg(CurrentIdxReg, RegState::Kill)
3259         .addImm(Offset);
3260     }
3261   }
3262 
3263   // Update EXEC, switch all done bits to 0 and all todo bits to 1.
3264   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3265   MachineInstr *InsertPt =
3266     BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term
3267                                                   : AMDGPU::S_XOR_B64_term), Exec)
3268       .addReg(Exec)
3269       .addReg(NewExec);
3270 
3271   // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
3272   // s_cbranch_scc0?
3273 
3274   // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
3275   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
3276     .addMBB(&LoopBB);
3277 
3278   return InsertPt->getIterator();
3279 }
3280 
3281 // This has slightly sub-optimal regalloc when the source vector is killed by
3282 // the read. The register allocator does not understand that the kill is
3283 // per-workitem, so is kept alive for the whole loop so we end up not re-using a
3284 // subregister from it, using 1 more VGPR than necessary. This was saved when
3285 // this was expanded after register allocation.
3286 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
3287                                                   MachineBasicBlock &MBB,
3288                                                   MachineInstr &MI,
3289                                                   unsigned InitResultReg,
3290                                                   unsigned PhiReg,
3291                                                   int Offset,
3292                                                   bool UseGPRIdxMode,
3293                                                   bool IsIndirectSrc) {
3294   MachineFunction *MF = MBB.getParent();
3295   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3296   const SIRegisterInfo *TRI = ST.getRegisterInfo();
3297   MachineRegisterInfo &MRI = MF->getRegInfo();
3298   const DebugLoc &DL = MI.getDebugLoc();
3299   MachineBasicBlock::iterator I(&MI);
3300 
3301   const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
3302   Register DstReg = MI.getOperand(0).getReg();
3303   Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
3304   Register TmpExec = MRI.createVirtualRegister(BoolXExecRC);
3305   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3306   unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
3307 
3308   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
3309 
3310   // Save the EXEC mask
3311   BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec)
3312     .addReg(Exec);
3313 
3314   MachineBasicBlock *LoopBB;
3315   MachineBasicBlock *RemainderBB;
3316   std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false);
3317 
3318   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3319 
3320   auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
3321                                       InitResultReg, DstReg, PhiReg, TmpExec,
3322                                       Offset, UseGPRIdxMode, IsIndirectSrc);
3323   MachineBasicBlock* LandingPad = MF->CreateMachineBasicBlock();
3324   MachineFunction::iterator MBBI(LoopBB);
3325   ++MBBI;
3326   MF->insert(MBBI, LandingPad);
3327   LoopBB->removeSuccessor(RemainderBB);
3328   LandingPad->addSuccessor(RemainderBB);
3329   LoopBB->addSuccessor(LandingPad);
3330   MachineBasicBlock::iterator First = LandingPad->begin();
3331   BuildMI(*LandingPad, First, DL, TII->get(MovExecOpc), Exec)
3332     .addReg(SaveExec);
3333 
3334   return InsPt;
3335 }
3336 
3337 // Returns subreg index, offset
3338 static std::pair<unsigned, int>
3339 computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
3340                             const TargetRegisterClass *SuperRC,
3341                             unsigned VecReg,
3342                             int Offset) {
3343   int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
3344 
3345   // Skip out of bounds offsets, or else we would end up using an undefined
3346   // register.
3347   if (Offset >= NumElts || Offset < 0)
3348     return std::make_pair(AMDGPU::sub0, Offset);
3349 
3350   return std::make_pair(SIRegisterInfo::getSubRegFromChannel(Offset), 0);
3351 }
3352 
3353 // Return true if the index is an SGPR and was set.
3354 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
3355                                  MachineRegisterInfo &MRI,
3356                                  MachineInstr &MI,
3357                                  int Offset,
3358                                  bool UseGPRIdxMode,
3359                                  bool IsIndirectSrc) {
3360   MachineBasicBlock *MBB = MI.getParent();
3361   const DebugLoc &DL = MI.getDebugLoc();
3362   MachineBasicBlock::iterator I(&MI);
3363 
3364   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3365   const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
3366 
3367   assert(Idx->getReg() != AMDGPU::NoRegister);
3368 
3369   if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
3370     return false;
3371 
3372   if (UseGPRIdxMode) {
3373     unsigned IdxMode = IsIndirectSrc ?
3374       AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
3375     if (Offset == 0) {
3376       MachineInstr *SetOn =
3377           BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3378               .add(*Idx)
3379               .addImm(IdxMode);
3380 
3381       SetOn->getOperand(3).setIsUndef();
3382     } else {
3383       Register Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3384       BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
3385           .add(*Idx)
3386           .addImm(Offset);
3387       MachineInstr *SetOn =
3388         BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3389         .addReg(Tmp, RegState::Kill)
3390         .addImm(IdxMode);
3391 
3392       SetOn->getOperand(3).setIsUndef();
3393     }
3394 
3395     return true;
3396   }
3397 
3398   if (Offset == 0) {
3399     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3400       .add(*Idx);
3401   } else {
3402     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3403       .add(*Idx)
3404       .addImm(Offset);
3405   }
3406 
3407   return true;
3408 }
3409 
3410 // Control flow needs to be inserted if indexing with a VGPR.
3411 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
3412                                           MachineBasicBlock &MBB,
3413                                           const GCNSubtarget &ST) {
3414   const SIInstrInfo *TII = ST.getInstrInfo();
3415   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3416   MachineFunction *MF = MBB.getParent();
3417   MachineRegisterInfo &MRI = MF->getRegInfo();
3418 
3419   Register Dst = MI.getOperand(0).getReg();
3420   Register SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
3421   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3422 
3423   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
3424 
3425   unsigned SubReg;
3426   std::tie(SubReg, Offset)
3427     = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
3428 
3429   const bool UseGPRIdxMode = ST.useVGPRIndexMode();
3430 
3431   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
3432     MachineBasicBlock::iterator I(&MI);
3433     const DebugLoc &DL = MI.getDebugLoc();
3434 
3435     if (UseGPRIdxMode) {
3436       // TODO: Look at the uses to avoid the copy. This may require rescheduling
3437       // to avoid interfering with other uses, so probably requires a new
3438       // optimization pass.
3439       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3440         .addReg(SrcReg, RegState::Undef, SubReg)
3441         .addReg(SrcReg, RegState::Implicit)
3442         .addReg(AMDGPU::M0, RegState::Implicit);
3443       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3444     } else {
3445       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3446         .addReg(SrcReg, RegState::Undef, SubReg)
3447         .addReg(SrcReg, RegState::Implicit);
3448     }
3449 
3450     MI.eraseFromParent();
3451 
3452     return &MBB;
3453   }
3454 
3455   const DebugLoc &DL = MI.getDebugLoc();
3456   MachineBasicBlock::iterator I(&MI);
3457 
3458   Register PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3459   Register InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3460 
3461   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3462 
3463   auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3464                               Offset, UseGPRIdxMode, true);
3465   MachineBasicBlock *LoopBB = InsPt->getParent();
3466 
3467   if (UseGPRIdxMode) {
3468     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3469       .addReg(SrcReg, RegState::Undef, SubReg)
3470       .addReg(SrcReg, RegState::Implicit)
3471       .addReg(AMDGPU::M0, RegState::Implicit);
3472     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3473   } else {
3474     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3475       .addReg(SrcReg, RegState::Undef, SubReg)
3476       .addReg(SrcReg, RegState::Implicit);
3477   }
3478 
3479   MI.eraseFromParent();
3480 
3481   return LoopBB;
3482 }
3483 
3484 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3485                                           MachineBasicBlock &MBB,
3486                                           const GCNSubtarget &ST) {
3487   const SIInstrInfo *TII = ST.getInstrInfo();
3488   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3489   MachineFunction *MF = MBB.getParent();
3490   MachineRegisterInfo &MRI = MF->getRegInfo();
3491 
3492   Register Dst = MI.getOperand(0).getReg();
3493   const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3494   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3495   const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3496   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3497   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3498 
3499   // This can be an immediate, but will be folded later.
3500   assert(Val->getReg());
3501 
3502   unsigned SubReg;
3503   std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3504                                                          SrcVec->getReg(),
3505                                                          Offset);
3506   const bool UseGPRIdxMode = ST.useVGPRIndexMode();
3507 
3508   if (Idx->getReg() == AMDGPU::NoRegister) {
3509     MachineBasicBlock::iterator I(&MI);
3510     const DebugLoc &DL = MI.getDebugLoc();
3511 
3512     assert(Offset == 0);
3513 
3514     BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
3515         .add(*SrcVec)
3516         .add(*Val)
3517         .addImm(SubReg);
3518 
3519     MI.eraseFromParent();
3520     return &MBB;
3521   }
3522 
3523   const MCInstrDesc &MovRelDesc
3524     = TII->getIndirectRegWritePseudo(TRI.getRegSizeInBits(*VecRC), 32, false);
3525 
3526   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
3527     MachineBasicBlock::iterator I(&MI);
3528     const DebugLoc &DL = MI.getDebugLoc();
3529     BuildMI(MBB, I, DL, MovRelDesc, Dst)
3530       .addReg(SrcVec->getReg())
3531       .add(*Val)
3532       .addImm(SubReg);
3533     if (UseGPRIdxMode)
3534       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3535 
3536     MI.eraseFromParent();
3537     return &MBB;
3538   }
3539 
3540   if (Val->isReg())
3541     MRI.clearKillFlags(Val->getReg());
3542 
3543   const DebugLoc &DL = MI.getDebugLoc();
3544 
3545   Register PhiReg = MRI.createVirtualRegister(VecRC);
3546 
3547   auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
3548                               Offset, UseGPRIdxMode, false);
3549   MachineBasicBlock *LoopBB = InsPt->getParent();
3550 
3551   BuildMI(*LoopBB, InsPt, DL, MovRelDesc, Dst)
3552     .addReg(PhiReg)
3553     .add(*Val)
3554     .addImm(AMDGPU::sub0);
3555   if (UseGPRIdxMode)
3556     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3557 
3558   MI.eraseFromParent();
3559   return LoopBB;
3560 }
3561 
3562 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3563   MachineInstr &MI, MachineBasicBlock *BB) const {
3564 
3565   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3566   MachineFunction *MF = BB->getParent();
3567   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3568 
3569   if (TII->isMIMG(MI)) {
3570     if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3571       report_fatal_error("missing mem operand from MIMG instruction");
3572     }
3573     // Add a memoperand for mimg instructions so that they aren't assumed to
3574     // be ordered memory instuctions.
3575 
3576     return BB;
3577   }
3578 
3579   switch (MI.getOpcode()) {
3580   case AMDGPU::S_ADD_U64_PSEUDO:
3581   case AMDGPU::S_SUB_U64_PSEUDO: {
3582     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3583     const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3584     const SIRegisterInfo *TRI = ST.getRegisterInfo();
3585     const TargetRegisterClass *BoolRC = TRI->getBoolRC();
3586     const DebugLoc &DL = MI.getDebugLoc();
3587 
3588     MachineOperand &Dest = MI.getOperand(0);
3589     MachineOperand &Src0 = MI.getOperand(1);
3590     MachineOperand &Src1 = MI.getOperand(2);
3591 
3592     Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3593     Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3594 
3595     MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3596      Src0, BoolRC, AMDGPU::sub0,
3597      &AMDGPU::SReg_32RegClass);
3598     MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3599       Src0, BoolRC, AMDGPU::sub1,
3600       &AMDGPU::SReg_32RegClass);
3601 
3602     MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3603       Src1, BoolRC, AMDGPU::sub0,
3604       &AMDGPU::SReg_32RegClass);
3605     MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3606       Src1, BoolRC, AMDGPU::sub1,
3607       &AMDGPU::SReg_32RegClass);
3608 
3609     bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3610 
3611     unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3612     unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3613     BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3614       .add(Src0Sub0)
3615       .add(Src1Sub0);
3616     BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3617       .add(Src0Sub1)
3618       .add(Src1Sub1);
3619     BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3620       .addReg(DestSub0)
3621       .addImm(AMDGPU::sub0)
3622       .addReg(DestSub1)
3623       .addImm(AMDGPU::sub1);
3624     MI.eraseFromParent();
3625     return BB;
3626   }
3627   case AMDGPU::SI_INIT_M0: {
3628     BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
3629             TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3630         .add(MI.getOperand(0));
3631     MI.eraseFromParent();
3632     return BB;
3633   }
3634   case AMDGPU::SI_INIT_EXEC:
3635     // This should be before all vector instructions.
3636     BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3637             AMDGPU::EXEC)
3638         .addImm(MI.getOperand(0).getImm());
3639     MI.eraseFromParent();
3640     return BB;
3641 
3642   case AMDGPU::SI_INIT_EXEC_LO:
3643     // This should be before all vector instructions.
3644     BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B32),
3645             AMDGPU::EXEC_LO)
3646         .addImm(MI.getOperand(0).getImm());
3647     MI.eraseFromParent();
3648     return BB;
3649 
3650   case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3651     // Extract the thread count from an SGPR input and set EXEC accordingly.
3652     // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3653     //
3654     // S_BFE_U32 count, input, {shift, 7}
3655     // S_BFM_B64 exec, count, 0
3656     // S_CMP_EQ_U32 count, 64
3657     // S_CMOV_B64 exec, -1
3658     MachineInstr *FirstMI = &*BB->begin();
3659     MachineRegisterInfo &MRI = MF->getRegInfo();
3660     Register InputReg = MI.getOperand(0).getReg();
3661     Register CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3662     bool Found = false;
3663 
3664     // Move the COPY of the input reg to the beginning, so that we can use it.
3665     for (auto I = BB->begin(); I != &MI; I++) {
3666       if (I->getOpcode() != TargetOpcode::COPY ||
3667           I->getOperand(0).getReg() != InputReg)
3668         continue;
3669 
3670       if (I == FirstMI) {
3671         FirstMI = &*++BB->begin();
3672       } else {
3673         I->removeFromParent();
3674         BB->insert(FirstMI, &*I);
3675       }
3676       Found = true;
3677       break;
3678     }
3679     assert(Found);
3680     (void)Found;
3681 
3682     // This should be before all vector instructions.
3683     unsigned Mask = (getSubtarget()->getWavefrontSize() << 1) - 1;
3684     bool isWave32 = getSubtarget()->isWave32();
3685     unsigned Exec = isWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3686     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3687         .addReg(InputReg)
3688         .addImm((MI.getOperand(1).getImm() & Mask) | 0x70000);
3689     BuildMI(*BB, FirstMI, DebugLoc(),
3690             TII->get(isWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64),
3691             Exec)
3692         .addReg(CountReg)
3693         .addImm(0);
3694     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3695         .addReg(CountReg, RegState::Kill)
3696         .addImm(getSubtarget()->getWavefrontSize());
3697     BuildMI(*BB, FirstMI, DebugLoc(),
3698             TII->get(isWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64),
3699             Exec)
3700         .addImm(-1);
3701     MI.eraseFromParent();
3702     return BB;
3703   }
3704 
3705   case AMDGPU::GET_GROUPSTATICSIZE: {
3706     assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||
3707            getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL);
3708     DebugLoc DL = MI.getDebugLoc();
3709     BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
3710         .add(MI.getOperand(0))
3711         .addImm(MFI->getLDSSize());
3712     MI.eraseFromParent();
3713     return BB;
3714   }
3715   case AMDGPU::SI_INDIRECT_SRC_V1:
3716   case AMDGPU::SI_INDIRECT_SRC_V2:
3717   case AMDGPU::SI_INDIRECT_SRC_V4:
3718   case AMDGPU::SI_INDIRECT_SRC_V8:
3719   case AMDGPU::SI_INDIRECT_SRC_V16:
3720     return emitIndirectSrc(MI, *BB, *getSubtarget());
3721   case AMDGPU::SI_INDIRECT_DST_V1:
3722   case AMDGPU::SI_INDIRECT_DST_V2:
3723   case AMDGPU::SI_INDIRECT_DST_V4:
3724   case AMDGPU::SI_INDIRECT_DST_V8:
3725   case AMDGPU::SI_INDIRECT_DST_V16:
3726     return emitIndirectDst(MI, *BB, *getSubtarget());
3727   case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3728   case AMDGPU::SI_KILL_I1_PSEUDO:
3729     return splitKillBlock(MI, BB);
3730   case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3731     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3732     const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3733     const SIRegisterInfo *TRI = ST.getRegisterInfo();
3734 
3735     Register Dst = MI.getOperand(0).getReg();
3736     Register Src0 = MI.getOperand(1).getReg();
3737     Register Src1 = MI.getOperand(2).getReg();
3738     const DebugLoc &DL = MI.getDebugLoc();
3739     Register SrcCond = MI.getOperand(3).getReg();
3740 
3741     Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3742     Register DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3743     const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
3744     Register SrcCondCopy = MRI.createVirtualRegister(CondRC);
3745 
3746     BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3747       .addReg(SrcCond);
3748     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3749       .addImm(0)
3750       .addReg(Src0, 0, AMDGPU::sub0)
3751       .addImm(0)
3752       .addReg(Src1, 0, AMDGPU::sub0)
3753       .addReg(SrcCondCopy);
3754     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3755       .addImm(0)
3756       .addReg(Src0, 0, AMDGPU::sub1)
3757       .addImm(0)
3758       .addReg(Src1, 0, AMDGPU::sub1)
3759       .addReg(SrcCondCopy);
3760 
3761     BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3762       .addReg(DstLo)
3763       .addImm(AMDGPU::sub0)
3764       .addReg(DstHi)
3765       .addImm(AMDGPU::sub1);
3766     MI.eraseFromParent();
3767     return BB;
3768   }
3769   case AMDGPU::SI_BR_UNDEF: {
3770     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3771     const DebugLoc &DL = MI.getDebugLoc();
3772     MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3773                            .add(MI.getOperand(0));
3774     Br->getOperand(1).setIsUndef(true); // read undef SCC
3775     MI.eraseFromParent();
3776     return BB;
3777   }
3778   case AMDGPU::ADJCALLSTACKUP:
3779   case AMDGPU::ADJCALLSTACKDOWN: {
3780     const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3781     MachineInstrBuilder MIB(*MF, &MI);
3782 
3783     // Add an implicit use of the frame offset reg to prevent the restore copy
3784     // inserted after the call from being reorderd after stack operations in the
3785     // the caller's frame.
3786     MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
3787         .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3788         .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
3789     return BB;
3790   }
3791   case AMDGPU::SI_CALL_ISEL: {
3792     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3793     const DebugLoc &DL = MI.getDebugLoc();
3794 
3795     unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
3796 
3797     MachineInstrBuilder MIB;
3798     MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
3799 
3800     for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
3801       MIB.add(MI.getOperand(I));
3802 
3803     MIB.cloneMemRefs(MI);
3804     MI.eraseFromParent();
3805     return BB;
3806   }
3807   case AMDGPU::V_ADD_I32_e32:
3808   case AMDGPU::V_SUB_I32_e32:
3809   case AMDGPU::V_SUBREV_I32_e32: {
3810     // TODO: Define distinct V_*_I32_Pseudo instructions instead.
3811     const DebugLoc &DL = MI.getDebugLoc();
3812     unsigned Opc = MI.getOpcode();
3813 
3814     bool NeedClampOperand = false;
3815     if (TII->pseudoToMCOpcode(Opc) == -1) {
3816       Opc = AMDGPU::getVOPe64(Opc);
3817       NeedClampOperand = true;
3818     }
3819 
3820     auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg());
3821     if (TII->isVOP3(*I)) {
3822       const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3823       const SIRegisterInfo *TRI = ST.getRegisterInfo();
3824       I.addReg(TRI->getVCC(), RegState::Define);
3825     }
3826     I.add(MI.getOperand(1))
3827      .add(MI.getOperand(2));
3828     if (NeedClampOperand)
3829       I.addImm(0); // clamp bit for e64 encoding
3830 
3831     TII->legalizeOperands(*I);
3832 
3833     MI.eraseFromParent();
3834     return BB;
3835   }
3836   case AMDGPU::DS_GWS_INIT:
3837   case AMDGPU::DS_GWS_SEMA_V:
3838   case AMDGPU::DS_GWS_SEMA_BR:
3839   case AMDGPU::DS_GWS_SEMA_P:
3840   case AMDGPU::DS_GWS_SEMA_RELEASE_ALL:
3841   case AMDGPU::DS_GWS_BARRIER:
3842     // A s_waitcnt 0 is required to be the instruction immediately following.
3843     if (getSubtarget()->hasGWSAutoReplay()) {
3844       bundleInstWithWaitcnt(MI);
3845       return BB;
3846     }
3847 
3848     return emitGWSMemViolTestLoop(MI, BB);
3849   default:
3850     return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
3851   }
3852 }
3853 
3854 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3855   return isTypeLegal(VT.getScalarType());
3856 }
3857 
3858 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3859   // This currently forces unfolding various combinations of fsub into fma with
3860   // free fneg'd operands. As long as we have fast FMA (controlled by
3861   // isFMAFasterThanFMulAndFAdd), we should perform these.
3862 
3863   // When fma is quarter rate, for f64 where add / sub are at best half rate,
3864   // most of these combines appear to be cycle neutral but save on instruction
3865   // count / code size.
3866   return true;
3867 }
3868 
3869 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3870                                          EVT VT) const {
3871   if (!VT.isVector()) {
3872     return MVT::i1;
3873   }
3874   return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
3875 }
3876 
3877 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3878   // TODO: Should i16 be used always if legal? For now it would force VALU
3879   // shifts.
3880   return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
3881 }
3882 
3883 // Answering this is somewhat tricky and depends on the specific device which
3884 // have different rates for fma or all f64 operations.
3885 //
3886 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3887 // regardless of which device (although the number of cycles differs between
3888 // devices), so it is always profitable for f64.
3889 //
3890 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3891 // only on full rate devices. Normally, we should prefer selecting v_mad_f32
3892 // which we can always do even without fused FP ops since it returns the same
3893 // result as the separate operations and since it is always full
3894 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3895 // however does not support denormals, so we do report fma as faster if we have
3896 // a fast fma device and require denormals.
3897 //
3898 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
3899                                                   EVT VT) const {
3900   VT = VT.getScalarType();
3901 
3902   switch (VT.getSimpleVT().SimpleTy) {
3903   case MVT::f32: {
3904     // This is as fast on some subtargets. However, we always have full rate f32
3905     // mad available which returns the same result as the separate operations
3906     // which we should prefer over fma. We can't use this if we want to support
3907     // denormals, so only report this in these cases.
3908     if (hasFP32Denormals(MF))
3909       return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3910 
3911     // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3912     return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3913   }
3914   case MVT::f64:
3915     return true;
3916   case MVT::f16:
3917     return Subtarget->has16BitInsts() && hasFP64FP16Denormals(MF);
3918   default:
3919     break;
3920   }
3921 
3922   return false;
3923 }
3924 
3925 bool SITargetLowering::isFMADLegalForFAddFSub(const SelectionDAG &DAG,
3926                                               const SDNode *N) const {
3927   // TODO: Check future ftz flag
3928   // v_mad_f32/v_mac_f32 do not support denormals.
3929   EVT VT = N->getValueType(0);
3930   if (VT == MVT::f32)
3931     return !hasFP32Denormals(DAG.getMachineFunction());
3932   if (VT == MVT::f16) {
3933     return Subtarget->hasMadF16() &&
3934            !hasFP64FP16Denormals(DAG.getMachineFunction());
3935   }
3936 
3937   return false;
3938 }
3939 
3940 //===----------------------------------------------------------------------===//
3941 // Custom DAG Lowering Operations
3942 //===----------------------------------------------------------------------===//
3943 
3944 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3945 // wider vector type is legal.
3946 SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
3947                                              SelectionDAG &DAG) const {
3948   unsigned Opc = Op.getOpcode();
3949   EVT VT = Op.getValueType();
3950   assert(VT == MVT::v4f16 || VT == MVT::v4i16);
3951 
3952   SDValue Lo, Hi;
3953   std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3954 
3955   SDLoc SL(Op);
3956   SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3957                              Op->getFlags());
3958   SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3959                              Op->getFlags());
3960 
3961   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3962 }
3963 
3964 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3965 // wider vector type is legal.
3966 SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
3967                                               SelectionDAG &DAG) const {
3968   unsigned Opc = Op.getOpcode();
3969   EVT VT = Op.getValueType();
3970   assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3971 
3972   SDValue Lo0, Hi0;
3973   std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3974   SDValue Lo1, Hi1;
3975   std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3976 
3977   SDLoc SL(Op);
3978 
3979   SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3980                              Op->getFlags());
3981   SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3982                              Op->getFlags());
3983 
3984   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3985 }
3986 
3987 SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op,
3988                                               SelectionDAG &DAG) const {
3989   unsigned Opc = Op.getOpcode();
3990   EVT VT = Op.getValueType();
3991   assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3992 
3993   SDValue Lo0, Hi0;
3994   std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3995   SDValue Lo1, Hi1;
3996   std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3997   SDValue Lo2, Hi2;
3998   std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2);
3999 
4000   SDLoc SL(Op);
4001 
4002   SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, Lo2,
4003                              Op->getFlags());
4004   SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, Hi2,
4005                              Op->getFlags());
4006 
4007   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
4008 }
4009 
4010 
4011 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
4012   switch (Op.getOpcode()) {
4013   default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
4014   case ISD::BRCOND: return LowerBRCOND(Op, DAG);
4015   case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
4016   case ISD::LOAD: {
4017     SDValue Result = LowerLOAD(Op, DAG);
4018     assert((!Result.getNode() ||
4019             Result.getNode()->getNumValues() == 2) &&
4020            "Load should return a value and a chain");
4021     return Result;
4022   }
4023 
4024   case ISD::FSIN:
4025   case ISD::FCOS:
4026     return LowerTrig(Op, DAG);
4027   case ISD::SELECT: return LowerSELECT(Op, DAG);
4028   case ISD::FDIV: return LowerFDIV(Op, DAG);
4029   case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
4030   case ISD::STORE: return LowerSTORE(Op, DAG);
4031   case ISD::GlobalAddress: {
4032     MachineFunction &MF = DAG.getMachineFunction();
4033     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
4034     return LowerGlobalAddress(MFI, Op, DAG);
4035   }
4036   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
4037   case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
4038   case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
4039   case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
4040   case ISD::INSERT_SUBVECTOR:
4041     return lowerINSERT_SUBVECTOR(Op, DAG);
4042   case ISD::INSERT_VECTOR_ELT:
4043     return lowerINSERT_VECTOR_ELT(Op, DAG);
4044   case ISD::EXTRACT_VECTOR_ELT:
4045     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
4046   case ISD::VECTOR_SHUFFLE:
4047     return lowerVECTOR_SHUFFLE(Op, DAG);
4048   case ISD::BUILD_VECTOR:
4049     return lowerBUILD_VECTOR(Op, DAG);
4050   case ISD::FP_ROUND:
4051     return lowerFP_ROUND(Op, DAG);
4052   case ISD::TRAP:
4053     return lowerTRAP(Op, DAG);
4054   case ISD::DEBUGTRAP:
4055     return lowerDEBUGTRAP(Op, DAG);
4056   case ISD::FABS:
4057   case ISD::FNEG:
4058   case ISD::FCANONICALIZE:
4059   case ISD::BSWAP:
4060     return splitUnaryVectorOp(Op, DAG);
4061   case ISD::FMINNUM:
4062   case ISD::FMAXNUM:
4063     return lowerFMINNUM_FMAXNUM(Op, DAG);
4064   case ISD::FMA:
4065     return splitTernaryVectorOp(Op, DAG);
4066   case ISD::SHL:
4067   case ISD::SRA:
4068   case ISD::SRL:
4069   case ISD::ADD:
4070   case ISD::SUB:
4071   case ISD::MUL:
4072   case ISD::SMIN:
4073   case ISD::SMAX:
4074   case ISD::UMIN:
4075   case ISD::UMAX:
4076   case ISD::FADD:
4077   case ISD::FMUL:
4078   case ISD::FMINNUM_IEEE:
4079   case ISD::FMAXNUM_IEEE:
4080     return splitBinaryVectorOp(Op, DAG);
4081   }
4082   return SDValue();
4083 }
4084 
4085 static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
4086                                        const SDLoc &DL,
4087                                        SelectionDAG &DAG, bool Unpacked) {
4088   if (!LoadVT.isVector())
4089     return Result;
4090 
4091   if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
4092     // Truncate to v2i16/v4i16.
4093     EVT IntLoadVT = LoadVT.changeTypeToInteger();
4094 
4095     // Workaround legalizer not scalarizing truncate after vector op
4096     // legalization byt not creating intermediate vector trunc.
4097     SmallVector<SDValue, 4> Elts;
4098     DAG.ExtractVectorElements(Result, Elts);
4099     for (SDValue &Elt : Elts)
4100       Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
4101 
4102     Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
4103 
4104     // Bitcast to original type (v2f16/v4f16).
4105     return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
4106   }
4107 
4108   // Cast back to the original packed type.
4109   return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
4110 }
4111 
4112 SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
4113                                               MemSDNode *M,
4114                                               SelectionDAG &DAG,
4115                                               ArrayRef<SDValue> Ops,
4116                                               bool IsIntrinsic) const {
4117   SDLoc DL(M);
4118 
4119   bool Unpacked = Subtarget->hasUnpackedD16VMem();
4120   EVT LoadVT = M->getValueType(0);
4121 
4122   EVT EquivLoadVT = LoadVT;
4123   if (Unpacked && LoadVT.isVector()) {
4124     EquivLoadVT = LoadVT.isVector() ?
4125       EVT::getVectorVT(*DAG.getContext(), MVT::i32,
4126                        LoadVT.getVectorNumElements()) : LoadVT;
4127   }
4128 
4129   // Change from v4f16/v2f16 to EquivLoadVT.
4130   SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
4131 
4132   SDValue Load
4133     = DAG.getMemIntrinsicNode(
4134       IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
4135       VTList, Ops, M->getMemoryVT(),
4136       M->getMemOperand());
4137   if (!Unpacked) // Just adjusted the opcode.
4138     return Load;
4139 
4140   SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
4141 
4142   return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
4143 }
4144 
4145 SDValue SITargetLowering::lowerIntrinsicLoad(MemSDNode *M, bool IsFormat,
4146                                              SelectionDAG &DAG,
4147                                              ArrayRef<SDValue> Ops) const {
4148   SDLoc DL(M);
4149   EVT LoadVT = M->getValueType(0);
4150   EVT EltType = LoadVT.getScalarType();
4151   EVT IntVT = LoadVT.changeTypeToInteger();
4152 
4153   bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
4154 
4155   unsigned Opc =
4156       IsFormat ? AMDGPUISD::BUFFER_LOAD_FORMAT : AMDGPUISD::BUFFER_LOAD;
4157 
4158   if (IsD16) {
4159     return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops);
4160   }
4161 
4162   // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
4163   if (!IsD16 && !LoadVT.isVector() && EltType.getSizeInBits() < 32)
4164     return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
4165 
4166   if (isTypeLegal(LoadVT)) {
4167     return getMemIntrinsicNode(Opc, DL, M->getVTList(), Ops, IntVT,
4168                                M->getMemOperand(), DAG);
4169   }
4170 
4171   EVT CastVT = getEquivalentMemType(*DAG.getContext(), LoadVT);
4172   SDVTList VTList = DAG.getVTList(CastVT, MVT::Other);
4173   SDValue MemNode = getMemIntrinsicNode(Opc, DL, VTList, Ops, CastVT,
4174                                         M->getMemOperand(), DAG);
4175   return DAG.getMergeValues(
4176       {DAG.getNode(ISD::BITCAST, DL, LoadVT, MemNode), MemNode.getValue(1)},
4177       DL);
4178 }
4179 
4180 static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI,
4181                                   SDNode *N, SelectionDAG &DAG) {
4182   EVT VT = N->getValueType(0);
4183   const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
4184   int CondCode = CD->getSExtValue();
4185   if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
4186       CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
4187     return DAG.getUNDEF(VT);
4188 
4189   ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
4190 
4191   SDValue LHS = N->getOperand(1);
4192   SDValue RHS = N->getOperand(2);
4193 
4194   SDLoc DL(N);
4195 
4196   EVT CmpVT = LHS.getValueType();
4197   if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
4198     unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
4199       ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4200     LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
4201     RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
4202   }
4203 
4204   ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
4205 
4206   unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
4207   EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
4208 
4209   SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS,
4210                               DAG.getCondCode(CCOpcode));
4211   if (VT.bitsEq(CCVT))
4212     return SetCC;
4213   return DAG.getZExtOrTrunc(SetCC, DL, VT);
4214 }
4215 
4216 static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
4217                                   SDNode *N, SelectionDAG &DAG) {
4218   EVT VT = N->getValueType(0);
4219   const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
4220 
4221   int CondCode = CD->getSExtValue();
4222   if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
4223       CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) {
4224     return DAG.getUNDEF(VT);
4225   }
4226 
4227   SDValue Src0 = N->getOperand(1);
4228   SDValue Src1 = N->getOperand(2);
4229   EVT CmpVT = Src0.getValueType();
4230   SDLoc SL(N);
4231 
4232   if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
4233     Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
4234     Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
4235   }
4236 
4237   FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
4238   ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
4239   unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
4240   EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
4241   SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0,
4242                               Src1, DAG.getCondCode(CCOpcode));
4243   if (VT.bitsEq(CCVT))
4244     return SetCC;
4245   return DAG.getZExtOrTrunc(SetCC, SL, VT);
4246 }
4247 
4248 void SITargetLowering::ReplaceNodeResults(SDNode *N,
4249                                           SmallVectorImpl<SDValue> &Results,
4250                                           SelectionDAG &DAG) const {
4251   switch (N->getOpcode()) {
4252   case ISD::INSERT_VECTOR_ELT: {
4253     if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
4254       Results.push_back(Res);
4255     return;
4256   }
4257   case ISD::EXTRACT_VECTOR_ELT: {
4258     if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
4259       Results.push_back(Res);
4260     return;
4261   }
4262   case ISD::INTRINSIC_WO_CHAIN: {
4263     unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
4264     switch (IID) {
4265     case Intrinsic::amdgcn_cvt_pkrtz: {
4266       SDValue Src0 = N->getOperand(1);
4267       SDValue Src1 = N->getOperand(2);
4268       SDLoc SL(N);
4269       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
4270                                 Src0, Src1);
4271       Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
4272       return;
4273     }
4274     case Intrinsic::amdgcn_cvt_pknorm_i16:
4275     case Intrinsic::amdgcn_cvt_pknorm_u16:
4276     case Intrinsic::amdgcn_cvt_pk_i16:
4277     case Intrinsic::amdgcn_cvt_pk_u16: {
4278       SDValue Src0 = N->getOperand(1);
4279       SDValue Src1 = N->getOperand(2);
4280       SDLoc SL(N);
4281       unsigned Opcode;
4282 
4283       if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
4284         Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
4285       else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
4286         Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
4287       else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
4288         Opcode = AMDGPUISD::CVT_PK_I16_I32;
4289       else
4290         Opcode = AMDGPUISD::CVT_PK_U16_U32;
4291 
4292       EVT VT = N->getValueType(0);
4293       if (isTypeLegal(VT))
4294         Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
4295       else {
4296         SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
4297         Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
4298       }
4299       return;
4300     }
4301     }
4302     break;
4303   }
4304   case ISD::INTRINSIC_W_CHAIN: {
4305     if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
4306       if (Res.getOpcode() == ISD::MERGE_VALUES) {
4307         // FIXME: Hacky
4308         Results.push_back(Res.getOperand(0));
4309         Results.push_back(Res.getOperand(1));
4310       } else {
4311         Results.push_back(Res);
4312         Results.push_back(Res.getValue(1));
4313       }
4314       return;
4315     }
4316 
4317     break;
4318   }
4319   case ISD::SELECT: {
4320     SDLoc SL(N);
4321     EVT VT = N->getValueType(0);
4322     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
4323     SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
4324     SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
4325 
4326     EVT SelectVT = NewVT;
4327     if (NewVT.bitsLT(MVT::i32)) {
4328       LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
4329       RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
4330       SelectVT = MVT::i32;
4331     }
4332 
4333     SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
4334                                     N->getOperand(0), LHS, RHS);
4335 
4336     if (NewVT != SelectVT)
4337       NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
4338     Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
4339     return;
4340   }
4341   case ISD::FNEG: {
4342     if (N->getValueType(0) != MVT::v2f16)
4343       break;
4344 
4345     SDLoc SL(N);
4346     SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4347 
4348     SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
4349                              BC,
4350                              DAG.getConstant(0x80008000, SL, MVT::i32));
4351     Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4352     return;
4353   }
4354   case ISD::FABS: {
4355     if (N->getValueType(0) != MVT::v2f16)
4356       break;
4357 
4358     SDLoc SL(N);
4359     SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4360 
4361     SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
4362                              BC,
4363                              DAG.getConstant(0x7fff7fff, SL, MVT::i32));
4364     Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4365     return;
4366   }
4367   default:
4368     break;
4369   }
4370 }
4371 
4372 /// Helper function for LowerBRCOND
4373 static SDNode *findUser(SDValue Value, unsigned Opcode) {
4374 
4375   SDNode *Parent = Value.getNode();
4376   for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
4377        I != E; ++I) {
4378 
4379     if (I.getUse().get() != Value)
4380       continue;
4381 
4382     if (I->getOpcode() == Opcode)
4383       return *I;
4384   }
4385   return nullptr;
4386 }
4387 
4388 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
4389   if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
4390     switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
4391     case Intrinsic::amdgcn_if:
4392       return AMDGPUISD::IF;
4393     case Intrinsic::amdgcn_else:
4394       return AMDGPUISD::ELSE;
4395     case Intrinsic::amdgcn_loop:
4396       return AMDGPUISD::LOOP;
4397     case Intrinsic::amdgcn_end_cf:
4398       llvm_unreachable("should not occur");
4399     default:
4400       return 0;
4401     }
4402   }
4403 
4404   // break, if_break, else_break are all only used as inputs to loop, not
4405   // directly as branch conditions.
4406   return 0;
4407 }
4408 
4409 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
4410   const Triple &TT = getTargetMachine().getTargetTriple();
4411   return (GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4412           GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
4413          AMDGPU::shouldEmitConstantsToTextSection(TT);
4414 }
4415 
4416 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
4417   // FIXME: Either avoid relying on address space here or change the default
4418   // address space for functions to avoid the explicit check.
4419   return (GV->getValueType()->isFunctionTy() ||
4420           !isNonGlobalAddrSpace(GV->getAddressSpace())) &&
4421          !shouldEmitFixup(GV) &&
4422          !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
4423 }
4424 
4425 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
4426   return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
4427 }
4428 
4429 bool SITargetLowering::shouldUseLDSConstAddress(const GlobalValue *GV) const {
4430   if (!GV->hasExternalLinkage())
4431     return true;
4432 
4433   const auto OS = getTargetMachine().getTargetTriple().getOS();
4434   return OS == Triple::AMDHSA || OS == Triple::AMDPAL;
4435 }
4436 
4437 /// This transforms the control flow intrinsics to get the branch destination as
4438 /// last parameter, also switches branch target with BR if the need arise
4439 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
4440                                       SelectionDAG &DAG) const {
4441   SDLoc DL(BRCOND);
4442 
4443   SDNode *Intr = BRCOND.getOperand(1).getNode();
4444   SDValue Target = BRCOND.getOperand(2);
4445   SDNode *BR = nullptr;
4446   SDNode *SetCC = nullptr;
4447 
4448   if (Intr->getOpcode() == ISD::SETCC) {
4449     // As long as we negate the condition everything is fine
4450     SetCC = Intr;
4451     Intr = SetCC->getOperand(0).getNode();
4452 
4453   } else {
4454     // Get the target from BR if we don't negate the condition
4455     BR = findUser(BRCOND, ISD::BR);
4456     Target = BR->getOperand(1);
4457   }
4458 
4459   // FIXME: This changes the types of the intrinsics instead of introducing new
4460   // nodes with the correct types.
4461   // e.g. llvm.amdgcn.loop
4462 
4463   // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
4464   // =>     t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
4465 
4466   unsigned CFNode = isCFIntrinsic(Intr);
4467   if (CFNode == 0) {
4468     // This is a uniform branch so we don't need to legalize.
4469     return BRCOND;
4470   }
4471 
4472   bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
4473                    Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
4474 
4475   assert(!SetCC ||
4476         (SetCC->getConstantOperandVal(1) == 1 &&
4477          cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
4478                                                              ISD::SETNE));
4479 
4480   // operands of the new intrinsic call
4481   SmallVector<SDValue, 4> Ops;
4482   if (HaveChain)
4483     Ops.push_back(BRCOND.getOperand(0));
4484 
4485   Ops.append(Intr->op_begin() + (HaveChain ?  2 : 1), Intr->op_end());
4486   Ops.push_back(Target);
4487 
4488   ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
4489 
4490   // build the new intrinsic call
4491   SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
4492 
4493   if (!HaveChain) {
4494     SDValue Ops[] =  {
4495       SDValue(Result, 0),
4496       BRCOND.getOperand(0)
4497     };
4498 
4499     Result = DAG.getMergeValues(Ops, DL).getNode();
4500   }
4501 
4502   if (BR) {
4503     // Give the branch instruction our target
4504     SDValue Ops[] = {
4505       BR->getOperand(0),
4506       BRCOND.getOperand(2)
4507     };
4508     SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
4509     DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
4510     BR = NewBR.getNode();
4511   }
4512 
4513   SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
4514 
4515   // Copy the intrinsic results to registers
4516   for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
4517     SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
4518     if (!CopyToReg)
4519       continue;
4520 
4521     Chain = DAG.getCopyToReg(
4522       Chain, DL,
4523       CopyToReg->getOperand(1),
4524       SDValue(Result, i - 1),
4525       SDValue());
4526 
4527     DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
4528   }
4529 
4530   // Remove the old intrinsic from the chain
4531   DAG.ReplaceAllUsesOfValueWith(
4532     SDValue(Intr, Intr->getNumValues() - 1),
4533     Intr->getOperand(0));
4534 
4535   return Chain;
4536 }
4537 
4538 SDValue SITargetLowering::LowerRETURNADDR(SDValue Op,
4539                                           SelectionDAG &DAG) const {
4540   MVT VT = Op.getSimpleValueType();
4541   SDLoc DL(Op);
4542   // Checking the depth
4543   if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0)
4544     return DAG.getConstant(0, DL, VT);
4545 
4546   MachineFunction &MF = DAG.getMachineFunction();
4547   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4548   // Check for kernel and shader functions
4549   if (Info->isEntryFunction())
4550     return DAG.getConstant(0, DL, VT);
4551 
4552   MachineFrameInfo &MFI = MF.getFrameInfo();
4553   // There is a call to @llvm.returnaddress in this function
4554   MFI.setReturnAddressIsTaken(true);
4555 
4556   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
4557   // Get the return address reg and mark it as an implicit live-in
4558   unsigned Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent()));
4559 
4560   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
4561 }
4562 
4563 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
4564                                             SDValue Op,
4565                                             const SDLoc &DL,
4566                                             EVT VT) const {
4567   return Op.getValueType().bitsLE(VT) ?
4568       DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
4569       DAG.getNode(ISD::FTRUNC, DL, VT, Op);
4570 }
4571 
4572 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
4573   assert(Op.getValueType() == MVT::f16 &&
4574          "Do not know how to custom lower FP_ROUND for non-f16 type");
4575 
4576   SDValue Src = Op.getOperand(0);
4577   EVT SrcVT = Src.getValueType();
4578   if (SrcVT != MVT::f64)
4579     return Op;
4580 
4581   SDLoc DL(Op);
4582 
4583   SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
4584   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
4585   return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
4586 }
4587 
4588 SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
4589                                                SelectionDAG &DAG) const {
4590   EVT VT = Op.getValueType();
4591   const MachineFunction &MF = DAG.getMachineFunction();
4592   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4593   bool IsIEEEMode = Info->getMode().IEEE;
4594 
4595   // FIXME: Assert during eslection that this is only selected for
4596   // ieee_mode. Currently a combine can produce the ieee version for non-ieee
4597   // mode functions, but this happens to be OK since it's only done in cases
4598   // where there is known no sNaN.
4599   if (IsIEEEMode)
4600     return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
4601 
4602   if (VT == MVT::v4f16)
4603     return splitBinaryVectorOp(Op, DAG);
4604   return Op;
4605 }
4606 
4607 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
4608   SDLoc SL(Op);
4609   SDValue Chain = Op.getOperand(0);
4610 
4611   if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4612       !Subtarget->isTrapHandlerEnabled())
4613     return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
4614 
4615   MachineFunction &MF = DAG.getMachineFunction();
4616   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4617   unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4618   assert(UserSGPR != AMDGPU::NoRegister);
4619   SDValue QueuePtr = CreateLiveInRegister(
4620     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4621   SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4622   SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4623                                    QueuePtr, SDValue());
4624   SDValue Ops[] = {
4625     ToReg,
4626     DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16),
4627     SGPR01,
4628     ToReg.getValue(1)
4629   };
4630   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4631 }
4632 
4633 SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4634   SDLoc SL(Op);
4635   SDValue Chain = Op.getOperand(0);
4636   MachineFunction &MF = DAG.getMachineFunction();
4637 
4638   if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4639       !Subtarget->isTrapHandlerEnabled()) {
4640     DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
4641                                      "debugtrap handler not supported",
4642                                      Op.getDebugLoc(),
4643                                      DS_Warning);
4644     LLVMContext &Ctx = MF.getFunction().getContext();
4645     Ctx.diagnose(NoTrap);
4646     return Chain;
4647   }
4648 
4649   SDValue Ops[] = {
4650     Chain,
4651     DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16)
4652   };
4653   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4654 }
4655 
4656 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
4657                                              SelectionDAG &DAG) const {
4658   // FIXME: Use inline constants (src_{shared, private}_base) instead.
4659   if (Subtarget->hasApertureRegs()) {
4660     unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
4661         AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
4662         AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
4663     unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
4664         AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
4665         AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
4666     unsigned Encoding =
4667         AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
4668         Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4669         WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
4670 
4671     SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4672     SDValue ApertureReg = SDValue(
4673         DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4674     SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4675     return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
4676   }
4677 
4678   MachineFunction &MF = DAG.getMachineFunction();
4679   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4680   unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4681   assert(UserSGPR != AMDGPU::NoRegister);
4682 
4683   SDValue QueuePtr = CreateLiveInRegister(
4684     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4685 
4686   // Offset into amd_queue_t for group_segment_aperture_base_hi /
4687   // private_segment_aperture_base_hi.
4688   uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
4689 
4690   SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
4691 
4692   // TODO: Use custom target PseudoSourceValue.
4693   // TODO: We should use the value from the IR intrinsic call, but it might not
4694   // be available and how do we get it?
4695   MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
4696   return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
4697                      MinAlign(64, StructOffset),
4698                      MachineMemOperand::MODereferenceable |
4699                          MachineMemOperand::MOInvariant);
4700 }
4701 
4702 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4703                                              SelectionDAG &DAG) const {
4704   SDLoc SL(Op);
4705   const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4706 
4707   SDValue Src = ASC->getOperand(0);
4708   SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4709 
4710   const AMDGPUTargetMachine &TM =
4711     static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4712 
4713   // flat -> local/private
4714   if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
4715     unsigned DestAS = ASC->getDestAddressSpace();
4716 
4717     if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
4718         DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
4719       unsigned NullVal = TM.getNullPointerValue(DestAS);
4720       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4721       SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4722       SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4723 
4724       return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4725                          NonNull, Ptr, SegmentNullPtr);
4726     }
4727   }
4728 
4729   // local/private -> flat
4730   if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
4731     unsigned SrcAS = ASC->getSrcAddressSpace();
4732 
4733     if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
4734         SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
4735       unsigned NullVal = TM.getNullPointerValue(SrcAS);
4736       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4737 
4738       SDValue NonNull
4739         = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4740 
4741       SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
4742       SDValue CvtPtr
4743         = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4744 
4745       return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4746                          DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4747                          FlatNullPtr);
4748     }
4749   }
4750 
4751   // global <-> flat are no-ops and never emitted.
4752 
4753   const MachineFunction &MF = DAG.getMachineFunction();
4754   DiagnosticInfoUnsupported InvalidAddrSpaceCast(
4755     MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
4756   DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4757 
4758   return DAG.getUNDEF(ASC->getValueType(0));
4759 }
4760 
4761 // This lowers an INSERT_SUBVECTOR by extracting the individual elements from
4762 // the small vector and inserting them into the big vector. That is better than
4763 // the default expansion of doing it via a stack slot. Even though the use of
4764 // the stack slot would be optimized away afterwards, the stack slot itself
4765 // remains.
4766 SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
4767                                                 SelectionDAG &DAG) const {
4768   SDValue Vec = Op.getOperand(0);
4769   SDValue Ins = Op.getOperand(1);
4770   SDValue Idx = Op.getOperand(2);
4771   EVT VecVT = Vec.getValueType();
4772   EVT InsVT = Ins.getValueType();
4773   EVT EltVT = VecVT.getVectorElementType();
4774   unsigned InsNumElts = InsVT.getVectorNumElements();
4775   unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
4776   SDLoc SL(Op);
4777 
4778   for (unsigned I = 0; I != InsNumElts; ++I) {
4779     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins,
4780                               DAG.getConstant(I, SL, MVT::i32));
4781     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt,
4782                       DAG.getConstant(IdxVal + I, SL, MVT::i32));
4783   }
4784   return Vec;
4785 }
4786 
4787 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4788                                                  SelectionDAG &DAG) const {
4789   SDValue Vec = Op.getOperand(0);
4790   SDValue InsVal = Op.getOperand(1);
4791   SDValue Idx = Op.getOperand(2);
4792   EVT VecVT = Vec.getValueType();
4793   EVT EltVT = VecVT.getVectorElementType();
4794   unsigned VecSize = VecVT.getSizeInBits();
4795   unsigned EltSize = EltVT.getSizeInBits();
4796 
4797 
4798   assert(VecSize <= 64);
4799 
4800   unsigned NumElts = VecVT.getVectorNumElements();
4801   SDLoc SL(Op);
4802   auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4803 
4804   if (NumElts == 4 && EltSize == 16 && KIdx) {
4805     SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4806 
4807     SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4808                                  DAG.getConstant(0, SL, MVT::i32));
4809     SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4810                                  DAG.getConstant(1, SL, MVT::i32));
4811 
4812     SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4813     SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4814 
4815     unsigned Idx = KIdx->getZExtValue();
4816     bool InsertLo = Idx < 2;
4817     SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4818       InsertLo ? LoVec : HiVec,
4819       DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4820       DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4821 
4822     InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4823 
4824     SDValue Concat = InsertLo ?
4825       DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4826       DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4827 
4828     return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4829   }
4830 
4831   if (isa<ConstantSDNode>(Idx))
4832     return SDValue();
4833 
4834   MVT IntVT = MVT::getIntegerVT(VecSize);
4835 
4836   // Avoid stack access for dynamic indexing.
4837   // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
4838 
4839   // Create a congruent vector with the target value in each element so that
4840   // the required element can be masked and ORed into the target vector.
4841   SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT,
4842                                DAG.getSplatBuildVector(VecVT, SL, InsVal));
4843 
4844   assert(isPowerOf2_32(EltSize));
4845   SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4846 
4847   // Convert vector index to bit-index.
4848   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
4849 
4850   SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4851   SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
4852                             DAG.getConstant(0xffff, SL, IntVT),
4853                             ScaledIdx);
4854 
4855   SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
4856   SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
4857                             DAG.getNOT(SL, BFM, IntVT), BCVec);
4858 
4859   SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
4860   return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
4861 }
4862 
4863 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4864                                                   SelectionDAG &DAG) const {
4865   SDLoc SL(Op);
4866 
4867   EVT ResultVT = Op.getValueType();
4868   SDValue Vec = Op.getOperand(0);
4869   SDValue Idx = Op.getOperand(1);
4870   EVT VecVT = Vec.getValueType();
4871   unsigned VecSize = VecVT.getSizeInBits();
4872   EVT EltVT = VecVT.getVectorElementType();
4873   assert(VecSize <= 64);
4874 
4875   DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4876 
4877   // Make sure we do any optimizations that will make it easier to fold
4878   // source modifiers before obscuring it with bit operations.
4879 
4880   // XXX - Why doesn't this get called when vector_shuffle is expanded?
4881   if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4882     return Combined;
4883 
4884   unsigned EltSize = EltVT.getSizeInBits();
4885   assert(isPowerOf2_32(EltSize));
4886 
4887   MVT IntVT = MVT::getIntegerVT(VecSize);
4888   SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4889 
4890   // Convert vector index to bit-index (* EltSize)
4891   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
4892 
4893   SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4894   SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
4895 
4896   if (ResultVT == MVT::f16) {
4897     SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
4898     return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4899   }
4900 
4901   return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
4902 }
4903 
4904 static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) {
4905   assert(Elt % 2 == 0);
4906   return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0);
4907 }
4908 
4909 SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
4910                                               SelectionDAG &DAG) const {
4911   SDLoc SL(Op);
4912   EVT ResultVT = Op.getValueType();
4913   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
4914 
4915   EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16;
4916   EVT EltVT = PackVT.getVectorElementType();
4917   int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements();
4918 
4919   // vector_shuffle <0,1,6,7> lhs, rhs
4920   // -> concat_vectors (extract_subvector lhs, 0), (extract_subvector rhs, 2)
4921   //
4922   // vector_shuffle <6,7,2,3> lhs, rhs
4923   // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 2)
4924   //
4925   // vector_shuffle <6,7,0,1> lhs, rhs
4926   // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 0)
4927 
4928   // Avoid scalarizing when both halves are reading from consecutive elements.
4929   SmallVector<SDValue, 4> Pieces;
4930   for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) {
4931     if (elementPairIsContiguous(SVN->getMask(), I)) {
4932       const int Idx = SVN->getMaskElt(I);
4933       int VecIdx = Idx < SrcNumElts ? 0 : 1;
4934       int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts;
4935       SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL,
4936                                     PackVT, SVN->getOperand(VecIdx),
4937                                     DAG.getConstant(EltIdx, SL, MVT::i32));
4938       Pieces.push_back(SubVec);
4939     } else {
4940       const int Idx0 = SVN->getMaskElt(I);
4941       const int Idx1 = SVN->getMaskElt(I + 1);
4942       int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1;
4943       int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1;
4944       int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts;
4945       int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts;
4946 
4947       SDValue Vec0 = SVN->getOperand(VecIdx0);
4948       SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
4949                                  Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32));
4950 
4951       SDValue Vec1 = SVN->getOperand(VecIdx1);
4952       SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
4953                                  Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32));
4954       Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 }));
4955     }
4956   }
4957 
4958   return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces);
4959 }
4960 
4961 SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
4962                                             SelectionDAG &DAG) const {
4963   SDLoc SL(Op);
4964   EVT VT = Op.getValueType();
4965 
4966   if (VT == MVT::v4i16 || VT == MVT::v4f16) {
4967     EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2);
4968 
4969     // Turn into pair of packed build_vectors.
4970     // TODO: Special case for constants that can be materialized with s_mov_b64.
4971     SDValue Lo = DAG.getBuildVector(HalfVT, SL,
4972                                     { Op.getOperand(0), Op.getOperand(1) });
4973     SDValue Hi = DAG.getBuildVector(HalfVT, SL,
4974                                     { Op.getOperand(2), Op.getOperand(3) });
4975 
4976     SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo);
4977     SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi);
4978 
4979     SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi });
4980     return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
4981   }
4982 
4983   assert(VT == MVT::v2f16 || VT == MVT::v2i16);
4984   assert(!Subtarget->hasVOP3PInsts() && "this should be legal");
4985 
4986   SDValue Lo = Op.getOperand(0);
4987   SDValue Hi = Op.getOperand(1);
4988 
4989   // Avoid adding defined bits with the zero_extend.
4990   if (Hi.isUndef()) {
4991     Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4992     SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo);
4993     return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo);
4994   }
4995 
4996   Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
4997   Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
4998 
4999   SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
5000                               DAG.getConstant(16, SL, MVT::i32));
5001   if (Lo.isUndef())
5002     return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi);
5003 
5004   Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
5005   Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
5006 
5007   SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
5008   return DAG.getNode(ISD::BITCAST, SL, VT, Or);
5009 }
5010 
5011 bool
5012 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
5013   // We can fold offsets for anything that doesn't require a GOT relocation.
5014   return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
5015           GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
5016           GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
5017          !shouldEmitGOTReloc(GA->getGlobal());
5018 }
5019 
5020 static SDValue
5021 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
5022                         const SDLoc &DL, unsigned Offset, EVT PtrVT,
5023                         unsigned GAFlags = SIInstrInfo::MO_NONE) {
5024   // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
5025   // lowered to the following code sequence:
5026   //
5027   // For constant address space:
5028   //   s_getpc_b64 s[0:1]
5029   //   s_add_u32 s0, s0, $symbol
5030   //   s_addc_u32 s1, s1, 0
5031   //
5032   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
5033   //   a fixup or relocation is emitted to replace $symbol with a literal
5034   //   constant, which is a pc-relative offset from the encoding of the $symbol
5035   //   operand to the global variable.
5036   //
5037   // For global address space:
5038   //   s_getpc_b64 s[0:1]
5039   //   s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
5040   //   s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
5041   //
5042   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
5043   //   fixups or relocations are emitted to replace $symbol@*@lo and
5044   //   $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
5045   //   which is a 64-bit pc-relative offset from the encoding of the $symbol
5046   //   operand to the global variable.
5047   //
5048   // What we want here is an offset from the value returned by s_getpc
5049   // (which is the address of the s_add_u32 instruction) to the global
5050   // variable, but since the encoding of $symbol starts 4 bytes after the start
5051   // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
5052   // small. This requires us to add 4 to the global variable offset in order to
5053   // compute the correct address.
5054   SDValue PtrLo =
5055       DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags);
5056   SDValue PtrHi;
5057   if (GAFlags == SIInstrInfo::MO_NONE) {
5058     PtrHi = DAG.getTargetConstant(0, DL, MVT::i32);
5059   } else {
5060     PtrHi =
5061         DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags + 1);
5062   }
5063   return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
5064 }
5065 
5066 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
5067                                              SDValue Op,
5068                                              SelectionDAG &DAG) const {
5069   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
5070   const GlobalValue *GV = GSD->getGlobal();
5071   if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
5072        shouldUseLDSConstAddress(GV)) ||
5073       GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS ||
5074       GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS)
5075     return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
5076 
5077   SDLoc DL(GSD);
5078   EVT PtrVT = Op.getValueType();
5079 
5080   if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
5081     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(),
5082                                             SIInstrInfo::MO_ABS32_LO);
5083     return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA);
5084   }
5085 
5086   if (shouldEmitFixup(GV))
5087     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
5088   else if (shouldEmitPCReloc(GV))
5089     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
5090                                    SIInstrInfo::MO_REL32);
5091 
5092   SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
5093                                             SIInstrInfo::MO_GOTPCREL32);
5094 
5095   Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
5096   PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
5097   const DataLayout &DataLayout = DAG.getDataLayout();
5098   unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
5099   MachinePointerInfo PtrInfo
5100     = MachinePointerInfo::getGOT(DAG.getMachineFunction());
5101 
5102   return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
5103                      MachineMemOperand::MODereferenceable |
5104                          MachineMemOperand::MOInvariant);
5105 }
5106 
5107 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
5108                                    const SDLoc &DL, SDValue V) const {
5109   // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
5110   // the destination register.
5111   //
5112   // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
5113   // so we will end up with redundant moves to m0.
5114   //
5115   // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
5116 
5117   // A Null SDValue creates a glue result.
5118   SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
5119                                   V, Chain);
5120   return SDValue(M0, 0);
5121 }
5122 
5123 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
5124                                                  SDValue Op,
5125                                                  MVT VT,
5126                                                  unsigned Offset) const {
5127   SDLoc SL(Op);
5128   SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
5129                                            DAG.getEntryNode(), Offset, 4, false);
5130   // The local size values will have the hi 16-bits as zero.
5131   return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
5132                      DAG.getValueType(VT));
5133 }
5134 
5135 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
5136                                         EVT VT) {
5137   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
5138                                       "non-hsa intrinsic with hsa target",
5139                                       DL.getDebugLoc());
5140   DAG.getContext()->diagnose(BadIntrin);
5141   return DAG.getUNDEF(VT);
5142 }
5143 
5144 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
5145                                          EVT VT) {
5146   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
5147                                       "intrinsic not supported on subtarget",
5148                                       DL.getDebugLoc());
5149   DAG.getContext()->diagnose(BadIntrin);
5150   return DAG.getUNDEF(VT);
5151 }
5152 
5153 static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
5154                                     ArrayRef<SDValue> Elts) {
5155   assert(!Elts.empty());
5156   MVT Type;
5157   unsigned NumElts;
5158 
5159   if (Elts.size() == 1) {
5160     Type = MVT::f32;
5161     NumElts = 1;
5162   } else if (Elts.size() == 2) {
5163     Type = MVT::v2f32;
5164     NumElts = 2;
5165   } else if (Elts.size() == 3) {
5166     Type = MVT::v3f32;
5167     NumElts = 3;
5168   } else if (Elts.size() <= 4) {
5169     Type = MVT::v4f32;
5170     NumElts = 4;
5171   } else if (Elts.size() <= 8) {
5172     Type = MVT::v8f32;
5173     NumElts = 8;
5174   } else {
5175     assert(Elts.size() <= 16);
5176     Type = MVT::v16f32;
5177     NumElts = 16;
5178   }
5179 
5180   SmallVector<SDValue, 16> VecElts(NumElts);
5181   for (unsigned i = 0; i < Elts.size(); ++i) {
5182     SDValue Elt = Elts[i];
5183     if (Elt.getValueType() != MVT::f32)
5184       Elt = DAG.getBitcast(MVT::f32, Elt);
5185     VecElts[i] = Elt;
5186   }
5187   for (unsigned i = Elts.size(); i < NumElts; ++i)
5188     VecElts[i] = DAG.getUNDEF(MVT::f32);
5189 
5190   if (NumElts == 1)
5191     return VecElts[0];
5192   return DAG.getBuildVector(Type, DL, VecElts);
5193 }
5194 
5195 static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG,
5196                              SDValue *GLC, SDValue *SLC, SDValue *DLC) {
5197   auto CachePolicyConst = cast<ConstantSDNode>(CachePolicy.getNode());
5198 
5199   uint64_t Value = CachePolicyConst->getZExtValue();
5200   SDLoc DL(CachePolicy);
5201   if (GLC) {
5202     *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
5203     Value &= ~(uint64_t)0x1;
5204   }
5205   if (SLC) {
5206     *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
5207     Value &= ~(uint64_t)0x2;
5208   }
5209   if (DLC) {
5210     *DLC = DAG.getTargetConstant((Value & 0x4) ? 1 : 0, DL, MVT::i32);
5211     Value &= ~(uint64_t)0x4;
5212   }
5213 
5214   return Value == 0;
5215 }
5216 
5217 static SDValue padEltsToUndef(SelectionDAG &DAG, const SDLoc &DL, EVT CastVT,
5218                               SDValue Src, int ExtraElts) {
5219   EVT SrcVT = Src.getValueType();
5220 
5221   SmallVector<SDValue, 8> Elts;
5222 
5223   if (SrcVT.isVector())
5224     DAG.ExtractVectorElements(Src, Elts);
5225   else
5226     Elts.push_back(Src);
5227 
5228   SDValue Undef = DAG.getUNDEF(SrcVT.getScalarType());
5229   while (ExtraElts--)
5230     Elts.push_back(Undef);
5231 
5232   return DAG.getBuildVector(CastVT, DL, Elts);
5233 }
5234 
5235 // Re-construct the required return value for a image load intrinsic.
5236 // This is more complicated due to the optional use TexFailCtrl which means the required
5237 // return type is an aggregate
5238 static SDValue constructRetValue(SelectionDAG &DAG,
5239                                  MachineSDNode *Result,
5240                                  ArrayRef<EVT> ResultTypes,
5241                                  bool IsTexFail, bool Unpacked, bool IsD16,
5242                                  int DMaskPop, int NumVDataDwords,
5243                                  const SDLoc &DL, LLVMContext &Context) {
5244   // Determine the required return type. This is the same regardless of IsTexFail flag
5245   EVT ReqRetVT = ResultTypes[0];
5246   int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1;
5247   int NumDataDwords = (!IsD16 || (IsD16 && Unpacked)) ?
5248     ReqRetNumElts : (ReqRetNumElts + 1) / 2;
5249 
5250   int MaskPopDwords = (!IsD16 || (IsD16 && Unpacked)) ?
5251     DMaskPop : (DMaskPop + 1) / 2;
5252 
5253   MVT DataDwordVT = NumDataDwords == 1 ?
5254     MVT::i32 : MVT::getVectorVT(MVT::i32, NumDataDwords);
5255 
5256   MVT MaskPopVT = MaskPopDwords == 1 ?
5257     MVT::i32 : MVT::getVectorVT(MVT::i32, MaskPopDwords);
5258 
5259   SDValue Data(Result, 0);
5260   SDValue TexFail;
5261 
5262   if (IsTexFail) {
5263     SDValue ZeroIdx = DAG.getConstant(0, DL, MVT::i32);
5264     if (MaskPopVT.isVector()) {
5265       Data = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MaskPopVT,
5266                          SDValue(Result, 0), ZeroIdx);
5267     } else {
5268       Data = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MaskPopVT,
5269                          SDValue(Result, 0), ZeroIdx);
5270     }
5271 
5272     TexFail = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
5273                           SDValue(Result, 0),
5274                           DAG.getConstant(MaskPopDwords, DL, MVT::i32));
5275   }
5276 
5277   if (DataDwordVT.isVector())
5278     Data = padEltsToUndef(DAG, DL, DataDwordVT, Data,
5279                           NumDataDwords - MaskPopDwords);
5280 
5281   if (IsD16)
5282     Data = adjustLoadValueTypeImpl(Data, ReqRetVT, DL, DAG, Unpacked);
5283 
5284   if (!ReqRetVT.isVector())
5285     Data = DAG.getNode(ISD::TRUNCATE, DL, ReqRetVT.changeTypeToInteger(), Data);
5286 
5287   Data = DAG.getNode(ISD::BITCAST, DL, ReqRetVT, Data);
5288 
5289   if (TexFail)
5290     return DAG.getMergeValues({Data, TexFail, SDValue(Result, 1)}, DL);
5291 
5292   if (Result->getNumValues() == 1)
5293     return Data;
5294 
5295   return DAG.getMergeValues({Data, SDValue(Result, 1)}, DL);
5296 }
5297 
5298 static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE,
5299                          SDValue *LWE, bool &IsTexFail) {
5300   auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode());
5301 
5302   uint64_t Value = TexFailCtrlConst->getZExtValue();
5303   if (Value) {
5304     IsTexFail = true;
5305   }
5306 
5307   SDLoc DL(TexFailCtrlConst);
5308   *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
5309   Value &= ~(uint64_t)0x1;
5310   *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
5311   Value &= ~(uint64_t)0x2;
5312 
5313   return Value == 0;
5314 }
5315 
5316 SDValue SITargetLowering::lowerImage(SDValue Op,
5317                                      const AMDGPU::ImageDimIntrinsicInfo *Intr,
5318                                      SelectionDAG &DAG) const {
5319   SDLoc DL(Op);
5320   MachineFunction &MF = DAG.getMachineFunction();
5321   const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>();
5322   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
5323       AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
5324   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
5325   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
5326       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
5327   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
5328       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
5329   unsigned IntrOpcode = Intr->BaseOpcode;
5330   bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10;
5331 
5332   SmallVector<EVT, 3> ResultTypes(Op->value_begin(), Op->value_end());
5333   SmallVector<EVT, 3> OrigResultTypes(Op->value_begin(), Op->value_end());
5334   bool IsD16 = false;
5335   bool IsA16 = false;
5336   SDValue VData;
5337   int NumVDataDwords;
5338   bool AdjustRetType = false;
5339 
5340   unsigned AddrIdx; // Index of first address argument
5341   unsigned DMask;
5342   unsigned DMaskLanes = 0;
5343 
5344   if (BaseOpcode->Atomic) {
5345     VData = Op.getOperand(2);
5346 
5347     bool Is64Bit = VData.getValueType() == MVT::i64;
5348     if (BaseOpcode->AtomicX2) {
5349       SDValue VData2 = Op.getOperand(3);
5350       VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL,
5351                                  {VData, VData2});
5352       if (Is64Bit)
5353         VData = DAG.getBitcast(MVT::v4i32, VData);
5354 
5355       ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32;
5356       DMask = Is64Bit ? 0xf : 0x3;
5357       NumVDataDwords = Is64Bit ? 4 : 2;
5358       AddrIdx = 4;
5359     } else {
5360       DMask = Is64Bit ? 0x3 : 0x1;
5361       NumVDataDwords = Is64Bit ? 2 : 1;
5362       AddrIdx = 3;
5363     }
5364   } else {
5365     unsigned DMaskIdx = BaseOpcode->Store ? 3 : isa<MemSDNode>(Op) ? 2 : 1;
5366     auto DMaskConst = cast<ConstantSDNode>(Op.getOperand(DMaskIdx));
5367     DMask = DMaskConst->getZExtValue();
5368     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
5369 
5370     if (BaseOpcode->Store) {
5371       VData = Op.getOperand(2);
5372 
5373       MVT StoreVT = VData.getSimpleValueType();
5374       if (StoreVT.getScalarType() == MVT::f16) {
5375         if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
5376           return Op; // D16 is unsupported for this instruction
5377 
5378         IsD16 = true;
5379         VData = handleD16VData(VData, DAG);
5380       }
5381 
5382       NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
5383     } else {
5384       // Work out the num dwords based on the dmask popcount and underlying type
5385       // and whether packing is supported.
5386       MVT LoadVT = ResultTypes[0].getSimpleVT();
5387       if (LoadVT.getScalarType() == MVT::f16) {
5388         if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
5389           return Op; // D16 is unsupported for this instruction
5390 
5391         IsD16 = true;
5392       }
5393 
5394       // Confirm that the return type is large enough for the dmask specified
5395       if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) ||
5396           (!LoadVT.isVector() && DMaskLanes > 1))
5397           return Op;
5398 
5399       if (IsD16 && !Subtarget->hasUnpackedD16VMem())
5400         NumVDataDwords = (DMaskLanes + 1) / 2;
5401       else
5402         NumVDataDwords = DMaskLanes;
5403 
5404       AdjustRetType = true;
5405     }
5406 
5407     AddrIdx = DMaskIdx + 1;
5408   }
5409 
5410   unsigned NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0;
5411   unsigned NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0;
5412   unsigned NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0;
5413   unsigned NumVAddrs = BaseOpcode->NumExtraArgs + NumGradients +
5414                        NumCoords + NumLCM;
5415   unsigned NumMIVAddrs = NumVAddrs;
5416 
5417   SmallVector<SDValue, 4> VAddrs;
5418 
5419   // Optimize _L to _LZ when _L is zero
5420   if (LZMappingInfo) {
5421     if (auto ConstantLod =
5422          dyn_cast<ConstantFPSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
5423       if (ConstantLod->isZero() || ConstantLod->isNegative()) {
5424         IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
5425         NumMIVAddrs--;               // remove 'lod'
5426       }
5427     }
5428   }
5429 
5430   // Optimize _mip away, when 'lod' is zero
5431   if (MIPMappingInfo) {
5432     if (auto ConstantLod =
5433          dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
5434       if (ConstantLod->isNullValue()) {
5435         IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
5436         NumMIVAddrs--;               // remove 'lod'
5437       }
5438     }
5439   }
5440 
5441   // Check for 16 bit addresses and pack if true.
5442   unsigned DimIdx = AddrIdx + BaseOpcode->NumExtraArgs;
5443   MVT VAddrVT = Op.getOperand(DimIdx).getSimpleValueType();
5444   const MVT VAddrScalarVT = VAddrVT.getScalarType();
5445   if (((VAddrScalarVT == MVT::f16) || (VAddrScalarVT == MVT::i16))) {
5446     // Illegal to use a16 images
5447     if (!ST->hasFeature(AMDGPU::FeatureR128A16) && !ST->hasFeature(AMDGPU::FeatureGFX10A16))
5448       return Op;
5449 
5450     IsA16 = true;
5451     const MVT VectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16;
5452     for (unsigned i = AddrIdx; i < (AddrIdx + NumMIVAddrs); ++i) {
5453       SDValue AddrLo;
5454       // Push back extra arguments.
5455       if (i < DimIdx) {
5456         AddrLo = Op.getOperand(i);
5457       } else {
5458         // Dz/dh, dz/dv and the last odd coord are packed with undef. Also,
5459         // in 1D, derivatives dx/dh and dx/dv are packed with undef.
5460         if (((i + 1) >= (AddrIdx + NumMIVAddrs)) ||
5461             ((NumGradients / 2) % 2 == 1 &&
5462             (i == DimIdx + (NumGradients / 2) - 1 ||
5463              i == DimIdx + NumGradients - 1))) {
5464           AddrLo = Op.getOperand(i);
5465           if (AddrLo.getValueType() != MVT::i16)
5466             AddrLo = DAG.getBitcast(MVT::i16, Op.getOperand(i));
5467           AddrLo = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, AddrLo);
5468         } else {
5469           AddrLo = DAG.getBuildVector(VectorVT, DL,
5470                                       {Op.getOperand(i), Op.getOperand(i + 1)});
5471           i++;
5472         }
5473         AddrLo = DAG.getBitcast(MVT::f32, AddrLo);
5474       }
5475       VAddrs.push_back(AddrLo);
5476     }
5477   } else {
5478     for (unsigned i = 0; i < NumMIVAddrs; ++i)
5479       VAddrs.push_back(Op.getOperand(AddrIdx + i));
5480   }
5481 
5482   // If the register allocator cannot place the address registers contiguously
5483   // without introducing moves, then using the non-sequential address encoding
5484   // is always preferable, since it saves VALU instructions and is usually a
5485   // wash in terms of code size or even better.
5486   //
5487   // However, we currently have no way of hinting to the register allocator that
5488   // MIMG addresses should be placed contiguously when it is possible to do so,
5489   // so force non-NSA for the common 2-address case as a heuristic.
5490   //
5491   // SIShrinkInstructions will convert NSA encodings to non-NSA after register
5492   // allocation when possible.
5493   bool UseNSA =
5494       ST->hasFeature(AMDGPU::FeatureNSAEncoding) && VAddrs.size() >= 3;
5495   SDValue VAddr;
5496   if (!UseNSA)
5497     VAddr = getBuildDwordsVector(DAG, DL, VAddrs);
5498 
5499   SDValue True = DAG.getTargetConstant(1, DL, MVT::i1);
5500   SDValue False = DAG.getTargetConstant(0, DL, MVT::i1);
5501   unsigned CtrlIdx; // Index of texfailctrl argument
5502   SDValue Unorm;
5503   if (!BaseOpcode->Sampler) {
5504     Unorm = True;
5505     CtrlIdx = AddrIdx + NumVAddrs + 1;
5506   } else {
5507     auto UnormConst =
5508         cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2));
5509 
5510     Unorm = UnormConst->getZExtValue() ? True : False;
5511     CtrlIdx = AddrIdx + NumVAddrs + 3;
5512   }
5513 
5514   SDValue TFE;
5515   SDValue LWE;
5516   SDValue TexFail = Op.getOperand(CtrlIdx);
5517   bool IsTexFail = false;
5518   if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail))
5519     return Op;
5520 
5521   if (IsTexFail) {
5522     if (!DMaskLanes) {
5523       // Expecting to get an error flag since TFC is on - and dmask is 0
5524       // Force dmask to be at least 1 otherwise the instruction will fail
5525       DMask = 0x1;
5526       DMaskLanes = 1;
5527       NumVDataDwords = 1;
5528     }
5529     NumVDataDwords += 1;
5530     AdjustRetType = true;
5531   }
5532 
5533   // Has something earlier tagged that the return type needs adjusting
5534   // This happens if the instruction is a load or has set TexFailCtrl flags
5535   if (AdjustRetType) {
5536     // NumVDataDwords reflects the true number of dwords required in the return type
5537     if (DMaskLanes == 0 && !BaseOpcode->Store) {
5538       // This is a no-op load. This can be eliminated
5539       SDValue Undef = DAG.getUNDEF(Op.getValueType());
5540       if (isa<MemSDNode>(Op))
5541         return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
5542       return Undef;
5543     }
5544 
5545     EVT NewVT = NumVDataDwords > 1 ?
5546                   EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumVDataDwords)
5547                 : MVT::i32;
5548 
5549     ResultTypes[0] = NewVT;
5550     if (ResultTypes.size() == 3) {
5551       // Original result was aggregate type used for TexFailCtrl results
5552       // The actual instruction returns as a vector type which has now been
5553       // created. Remove the aggregate result.
5554       ResultTypes.erase(&ResultTypes[1]);
5555     }
5556   }
5557 
5558   SDValue GLC;
5559   SDValue SLC;
5560   SDValue DLC;
5561   if (BaseOpcode->Atomic) {
5562     GLC = True; // TODO no-return optimization
5563     if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC,
5564                           IsGFX10 ? &DLC : nullptr))
5565       return Op;
5566   } else {
5567     if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC,
5568                           IsGFX10 ? &DLC : nullptr))
5569       return Op;
5570   }
5571 
5572   SmallVector<SDValue, 26> Ops;
5573   if (BaseOpcode->Store || BaseOpcode->Atomic)
5574     Ops.push_back(VData); // vdata
5575   if (UseNSA) {
5576     for (const SDValue &Addr : VAddrs)
5577       Ops.push_back(Addr);
5578   } else {
5579     Ops.push_back(VAddr);
5580   }
5581   Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc
5582   if (BaseOpcode->Sampler)
5583     Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler
5584   Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32));
5585   if (IsGFX10)
5586     Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32));
5587   Ops.push_back(Unorm);
5588   if (IsGFX10)
5589     Ops.push_back(DLC);
5590   Ops.push_back(GLC);
5591   Ops.push_back(SLC);
5592   Ops.push_back(IsA16 &&  // r128, a16 for gfx9
5593                 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False);
5594   if (IsGFX10)
5595     Ops.push_back(IsA16 ? True : False);
5596   Ops.push_back(TFE);
5597   Ops.push_back(LWE);
5598   if (!IsGFX10)
5599     Ops.push_back(DimInfo->DA ? True : False);
5600   if (BaseOpcode->HasD16)
5601     Ops.push_back(IsD16 ? True : False);
5602   if (isa<MemSDNode>(Op))
5603     Ops.push_back(Op.getOperand(0)); // chain
5604 
5605   int NumVAddrDwords =
5606       UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32;
5607   int Opcode = -1;
5608 
5609   if (IsGFX10) {
5610     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
5611                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
5612                                           : AMDGPU::MIMGEncGfx10Default,
5613                                    NumVDataDwords, NumVAddrDwords);
5614   } else {
5615     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5616       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
5617                                      NumVDataDwords, NumVAddrDwords);
5618     if (Opcode == -1)
5619       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
5620                                      NumVDataDwords, NumVAddrDwords);
5621   }
5622   assert(Opcode != -1);
5623 
5624   MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops);
5625   if (auto MemOp = dyn_cast<MemSDNode>(Op)) {
5626     MachineMemOperand *MemRef = MemOp->getMemOperand();
5627     DAG.setNodeMemRefs(NewNode, {MemRef});
5628   }
5629 
5630   if (BaseOpcode->AtomicX2) {
5631     SmallVector<SDValue, 1> Elt;
5632     DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
5633     return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
5634   } else if (!BaseOpcode->Store) {
5635     return constructRetValue(DAG, NewNode,
5636                              OrigResultTypes, IsTexFail,
5637                              Subtarget->hasUnpackedD16VMem(), IsD16,
5638                              DMaskLanes, NumVDataDwords, DL,
5639                              *DAG.getContext());
5640   }
5641 
5642   return SDValue(NewNode, 0);
5643 }
5644 
5645 SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
5646                                        SDValue Offset, SDValue CachePolicy,
5647                                        SelectionDAG &DAG) const {
5648   MachineFunction &MF = DAG.getMachineFunction();
5649 
5650   const DataLayout &DataLayout = DAG.getDataLayout();
5651   unsigned Align =
5652       DataLayout.getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
5653 
5654   MachineMemOperand *MMO = MF.getMachineMemOperand(
5655       MachinePointerInfo(),
5656       MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
5657           MachineMemOperand::MOInvariant,
5658       VT.getStoreSize(), Align);
5659 
5660   if (!Offset->isDivergent()) {
5661     SDValue Ops[] = {
5662         Rsrc,
5663         Offset, // Offset
5664         CachePolicy
5665     };
5666 
5667     // Widen vec3 load to vec4.
5668     if (VT.isVector() && VT.getVectorNumElements() == 3) {
5669       EVT WidenedVT =
5670           EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
5671       auto WidenedOp = DAG.getMemIntrinsicNode(
5672           AMDGPUISD::SBUFFER_LOAD, DL, DAG.getVTList(WidenedVT), Ops, WidenedVT,
5673           MF.getMachineMemOperand(MMO, 0, WidenedVT.getStoreSize()));
5674       auto Subvector = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, WidenedOp,
5675                                    DAG.getVectorIdxConstant(0, DL));
5676       return Subvector;
5677     }
5678 
5679     return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL,
5680                                    DAG.getVTList(VT), Ops, VT, MMO);
5681   }
5682 
5683   // We have a divergent offset. Emit a MUBUF buffer load instead. We can
5684   // assume that the buffer is unswizzled.
5685   SmallVector<SDValue, 4> Loads;
5686   unsigned NumLoads = 1;
5687   MVT LoadVT = VT.getSimpleVT();
5688   unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1;
5689   assert((LoadVT.getScalarType() == MVT::i32 ||
5690           LoadVT.getScalarType() == MVT::f32));
5691 
5692   if (NumElts == 8 || NumElts == 16) {
5693     NumLoads = NumElts / 4;
5694     LoadVT = MVT::getVectorVT(LoadVT.getScalarType(), 4);
5695   }
5696 
5697   SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue});
5698   SDValue Ops[] = {
5699       DAG.getEntryNode(),                               // Chain
5700       Rsrc,                                             // rsrc
5701       DAG.getConstant(0, DL, MVT::i32),                 // vindex
5702       {},                                               // voffset
5703       {},                                               // soffset
5704       {},                                               // offset
5705       CachePolicy,                                      // cachepolicy
5706       DAG.getTargetConstant(0, DL, MVT::i1),            // idxen
5707   };
5708 
5709   // Use the alignment to ensure that the required offsets will fit into the
5710   // immediate offsets.
5711   setBufferOffsets(Offset, DAG, &Ops[3], NumLoads > 1 ? 16 * NumLoads : 4);
5712 
5713   uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
5714   for (unsigned i = 0; i < NumLoads; ++i) {
5715     Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32);
5716     Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops,
5717                                         LoadVT, MMO, DAG));
5718   }
5719 
5720   if (NumElts == 8 || NumElts == 16)
5721     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads);
5722 
5723   return Loads[0];
5724 }
5725 
5726 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
5727                                                   SelectionDAG &DAG) const {
5728   MachineFunction &MF = DAG.getMachineFunction();
5729   auto MFI = MF.getInfo<SIMachineFunctionInfo>();
5730 
5731   EVT VT = Op.getValueType();
5732   SDLoc DL(Op);
5733   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5734 
5735   // TODO: Should this propagate fast-math-flags?
5736 
5737   switch (IntrinsicID) {
5738   case Intrinsic::amdgcn_implicit_buffer_ptr: {
5739     if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction()))
5740       return emitNonHSAIntrinsicError(DAG, DL, VT);
5741     return getPreloadedValue(DAG, *MFI, VT,
5742                              AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
5743   }
5744   case Intrinsic::amdgcn_dispatch_ptr:
5745   case Intrinsic::amdgcn_queue_ptr: {
5746     if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) {
5747       DiagnosticInfoUnsupported BadIntrin(
5748           MF.getFunction(), "unsupported hsa intrinsic without hsa target",
5749           DL.getDebugLoc());
5750       DAG.getContext()->diagnose(BadIntrin);
5751       return DAG.getUNDEF(VT);
5752     }
5753 
5754     auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
5755       AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
5756     return getPreloadedValue(DAG, *MFI, VT, RegID);
5757   }
5758   case Intrinsic::amdgcn_implicitarg_ptr: {
5759     if (MFI->isEntryFunction())
5760       return getImplicitArgPtr(DAG, DL);
5761     return getPreloadedValue(DAG, *MFI, VT,
5762                              AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
5763   }
5764   case Intrinsic::amdgcn_kernarg_segment_ptr: {
5765     if (!AMDGPU::isKernel(MF.getFunction().getCallingConv())) {
5766       // This only makes sense to call in a kernel, so just lower to null.
5767       return DAG.getConstant(0, DL, VT);
5768     }
5769 
5770     return getPreloadedValue(DAG, *MFI, VT,
5771                              AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
5772   }
5773   case Intrinsic::amdgcn_dispatch_id: {
5774     return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
5775   }
5776   case Intrinsic::amdgcn_rcp:
5777     return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
5778   case Intrinsic::amdgcn_rsq:
5779     return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5780   case Intrinsic::amdgcn_rsq_legacy:
5781     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5782       return emitRemovedIntrinsicError(DAG, DL, VT);
5783 
5784     return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
5785   case Intrinsic::amdgcn_rcp_legacy:
5786     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5787       return emitRemovedIntrinsicError(DAG, DL, VT);
5788     return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
5789   case Intrinsic::amdgcn_rsq_clamp: {
5790     if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
5791       return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
5792 
5793     Type *Type = VT.getTypeForEVT(*DAG.getContext());
5794     APFloat Max = APFloat::getLargest(Type->getFltSemantics());
5795     APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
5796 
5797     SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5798     SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
5799                               DAG.getConstantFP(Max, DL, VT));
5800     return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
5801                        DAG.getConstantFP(Min, DL, VT));
5802   }
5803   case Intrinsic::r600_read_ngroups_x:
5804     if (Subtarget->isAmdHsaOS())
5805       return emitNonHSAIntrinsicError(DAG, DL, VT);
5806 
5807     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5808                                     SI::KernelInputOffsets::NGROUPS_X, 4, false);
5809   case Intrinsic::r600_read_ngroups_y:
5810     if (Subtarget->isAmdHsaOS())
5811       return emitNonHSAIntrinsicError(DAG, DL, VT);
5812 
5813     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5814                                     SI::KernelInputOffsets::NGROUPS_Y, 4, false);
5815   case Intrinsic::r600_read_ngroups_z:
5816     if (Subtarget->isAmdHsaOS())
5817       return emitNonHSAIntrinsicError(DAG, DL, VT);
5818 
5819     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5820                                     SI::KernelInputOffsets::NGROUPS_Z, 4, false);
5821   case Intrinsic::r600_read_global_size_x:
5822     if (Subtarget->isAmdHsaOS())
5823       return emitNonHSAIntrinsicError(DAG, DL, VT);
5824 
5825     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5826                                     SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false);
5827   case Intrinsic::r600_read_global_size_y:
5828     if (Subtarget->isAmdHsaOS())
5829       return emitNonHSAIntrinsicError(DAG, DL, VT);
5830 
5831     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5832                                     SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false);
5833   case Intrinsic::r600_read_global_size_z:
5834     if (Subtarget->isAmdHsaOS())
5835       return emitNonHSAIntrinsicError(DAG, DL, VT);
5836 
5837     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5838                                     SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false);
5839   case Intrinsic::r600_read_local_size_x:
5840     if (Subtarget->isAmdHsaOS())
5841       return emitNonHSAIntrinsicError(DAG, DL, VT);
5842 
5843     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5844                                   SI::KernelInputOffsets::LOCAL_SIZE_X);
5845   case Intrinsic::r600_read_local_size_y:
5846     if (Subtarget->isAmdHsaOS())
5847       return emitNonHSAIntrinsicError(DAG, DL, VT);
5848 
5849     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5850                                   SI::KernelInputOffsets::LOCAL_SIZE_Y);
5851   case Intrinsic::r600_read_local_size_z:
5852     if (Subtarget->isAmdHsaOS())
5853       return emitNonHSAIntrinsicError(DAG, DL, VT);
5854 
5855     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5856                                   SI::KernelInputOffsets::LOCAL_SIZE_Z);
5857   case Intrinsic::amdgcn_workgroup_id_x:
5858     return getPreloadedValue(DAG, *MFI, VT,
5859                              AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
5860   case Intrinsic::amdgcn_workgroup_id_y:
5861     return getPreloadedValue(DAG, *MFI, VT,
5862                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
5863   case Intrinsic::amdgcn_workgroup_id_z:
5864     return getPreloadedValue(DAG, *MFI, VT,
5865                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
5866   case Intrinsic::amdgcn_workitem_id_x:
5867     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5868                           SDLoc(DAG.getEntryNode()),
5869                           MFI->getArgInfo().WorkItemIDX);
5870   case Intrinsic::amdgcn_workitem_id_y:
5871     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5872                           SDLoc(DAG.getEntryNode()),
5873                           MFI->getArgInfo().WorkItemIDY);
5874   case Intrinsic::amdgcn_workitem_id_z:
5875     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5876                           SDLoc(DAG.getEntryNode()),
5877                           MFI->getArgInfo().WorkItemIDZ);
5878   case Intrinsic::amdgcn_wavefrontsize:
5879     return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(),
5880                            SDLoc(Op), MVT::i32);
5881   case Intrinsic::amdgcn_s_buffer_load: {
5882     bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10;
5883     SDValue GLC;
5884     SDValue DLC = DAG.getTargetConstant(0, DL, MVT::i1);
5885     if (!parseCachePolicy(Op.getOperand(3), DAG, &GLC, nullptr,
5886                           IsGFX10 ? &DLC : nullptr))
5887       return Op;
5888     return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5889                         DAG);
5890   }
5891   case Intrinsic::amdgcn_fdiv_fast:
5892     return lowerFDIV_FAST(Op, DAG);
5893   case Intrinsic::amdgcn_sin:
5894     return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
5895 
5896   case Intrinsic::amdgcn_cos:
5897     return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
5898 
5899   case Intrinsic::amdgcn_mul_u24:
5900     return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2));
5901   case Intrinsic::amdgcn_mul_i24:
5902     return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2));
5903 
5904   case Intrinsic::amdgcn_log_clamp: {
5905     if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
5906       return SDValue();
5907 
5908     DiagnosticInfoUnsupported BadIntrin(
5909       MF.getFunction(), "intrinsic not supported on subtarget",
5910       DL.getDebugLoc());
5911       DAG.getContext()->diagnose(BadIntrin);
5912       return DAG.getUNDEF(VT);
5913   }
5914   case Intrinsic::amdgcn_ldexp:
5915     return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
5916                        Op.getOperand(1), Op.getOperand(2));
5917 
5918   case Intrinsic::amdgcn_fract:
5919     return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
5920 
5921   case Intrinsic::amdgcn_class:
5922     return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
5923                        Op.getOperand(1), Op.getOperand(2));
5924   case Intrinsic::amdgcn_div_fmas:
5925     return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
5926                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5927                        Op.getOperand(4));
5928 
5929   case Intrinsic::amdgcn_div_fixup:
5930     return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
5931                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5932 
5933   case Intrinsic::amdgcn_trig_preop:
5934     return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
5935                        Op.getOperand(1), Op.getOperand(2));
5936   case Intrinsic::amdgcn_div_scale: {
5937     const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3));
5938 
5939     // Translate to the operands expected by the machine instruction. The
5940     // first parameter must be the same as the first instruction.
5941     SDValue Numerator = Op.getOperand(1);
5942     SDValue Denominator = Op.getOperand(2);
5943 
5944     // Note this order is opposite of the machine instruction's operations,
5945     // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
5946     // intrinsic has the numerator as the first operand to match a normal
5947     // division operation.
5948 
5949     SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
5950 
5951     return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
5952                        Denominator, Numerator);
5953   }
5954   case Intrinsic::amdgcn_icmp: {
5955     // There is a Pat that handles this variant, so return it as-is.
5956     if (Op.getOperand(1).getValueType() == MVT::i1 &&
5957         Op.getConstantOperandVal(2) == 0 &&
5958         Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE)
5959       return Op;
5960     return lowerICMPIntrinsic(*this, Op.getNode(), DAG);
5961   }
5962   case Intrinsic::amdgcn_fcmp: {
5963     return lowerFCMPIntrinsic(*this, Op.getNode(), DAG);
5964   }
5965   case Intrinsic::amdgcn_fmed3:
5966     return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
5967                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5968   case Intrinsic::amdgcn_fdot2:
5969     return DAG.getNode(AMDGPUISD::FDOT2, DL, VT,
5970                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5971                        Op.getOperand(4));
5972   case Intrinsic::amdgcn_fmul_legacy:
5973     return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
5974                        Op.getOperand(1), Op.getOperand(2));
5975   case Intrinsic::amdgcn_sffbh:
5976     return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
5977   case Intrinsic::amdgcn_sbfe:
5978     return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
5979                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5980   case Intrinsic::amdgcn_ubfe:
5981     return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
5982                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5983   case Intrinsic::amdgcn_cvt_pkrtz:
5984   case Intrinsic::amdgcn_cvt_pknorm_i16:
5985   case Intrinsic::amdgcn_cvt_pknorm_u16:
5986   case Intrinsic::amdgcn_cvt_pk_i16:
5987   case Intrinsic::amdgcn_cvt_pk_u16: {
5988     // FIXME: Stop adding cast if v2f16/v2i16 are legal.
5989     EVT VT = Op.getValueType();
5990     unsigned Opcode;
5991 
5992     if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
5993       Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
5994     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
5995       Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
5996     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
5997       Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
5998     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
5999       Opcode = AMDGPUISD::CVT_PK_I16_I32;
6000     else
6001       Opcode = AMDGPUISD::CVT_PK_U16_U32;
6002 
6003     if (isTypeLegal(VT))
6004       return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2));
6005 
6006     SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
6007                                Op.getOperand(1), Op.getOperand(2));
6008     return DAG.getNode(ISD::BITCAST, DL, VT, Node);
6009   }
6010   case Intrinsic::amdgcn_fmad_ftz:
6011     return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1),
6012                        Op.getOperand(2), Op.getOperand(3));
6013 
6014   case Intrinsic::amdgcn_if_break:
6015     return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT,
6016                                       Op->getOperand(1), Op->getOperand(2)), 0);
6017 
6018   case Intrinsic::amdgcn_groupstaticsize: {
6019     Triple::OSType OS = getTargetMachine().getTargetTriple().getOS();
6020     if (OS == Triple::AMDHSA || OS == Triple::AMDPAL)
6021       return Op;
6022 
6023     const Module *M = MF.getFunction().getParent();
6024     const GlobalValue *GV =
6025         M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize));
6026     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0,
6027                                             SIInstrInfo::MO_ABS32_LO);
6028     return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0};
6029   }
6030   case Intrinsic::amdgcn_is_shared:
6031   case Intrinsic::amdgcn_is_private: {
6032     SDLoc SL(Op);
6033     unsigned AS = (IntrinsicID == Intrinsic::amdgcn_is_shared) ?
6034       AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
6035     SDValue Aperture = getSegmentAperture(AS, SL, DAG);
6036     SDValue SrcVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32,
6037                                  Op.getOperand(1));
6038 
6039     SDValue SrcHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, SrcVec,
6040                                 DAG.getConstant(1, SL, MVT::i32));
6041     return DAG.getSetCC(SL, MVT::i1, SrcHi, Aperture, ISD::SETEQ);
6042   }
6043   default:
6044     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6045             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
6046       return lowerImage(Op, ImageDimIntr, DAG);
6047 
6048     return Op;
6049   }
6050 }
6051 
6052 // This function computes an appropriate offset to pass to
6053 // MachineMemOperand::setOffset() based on the offset inputs to
6054 // an intrinsic.  If any of the offsets are non-contstant or
6055 // if VIndex is non-zero then this function returns 0.  Otherwise,
6056 // it returns the sum of VOffset, SOffset, and Offset.
6057 static unsigned getBufferOffsetForMMO(SDValue VOffset,
6058                                       SDValue SOffset,
6059                                       SDValue Offset,
6060                                       SDValue VIndex = SDValue()) {
6061 
6062   if (!isa<ConstantSDNode>(VOffset) || !isa<ConstantSDNode>(SOffset) ||
6063       !isa<ConstantSDNode>(Offset))
6064     return 0;
6065 
6066   if (VIndex) {
6067     if (!isa<ConstantSDNode>(VIndex) || !cast<ConstantSDNode>(VIndex)->isNullValue())
6068       return 0;
6069   }
6070 
6071   return cast<ConstantSDNode>(VOffset)->getSExtValue() +
6072          cast<ConstantSDNode>(SOffset)->getSExtValue() +
6073          cast<ConstantSDNode>(Offset)->getSExtValue();
6074 }
6075 
6076 static unsigned getDSShaderTypeValue(const MachineFunction &MF) {
6077   switch (MF.getFunction().getCallingConv()) {
6078   case CallingConv::AMDGPU_PS:
6079     return 1;
6080   case CallingConv::AMDGPU_VS:
6081     return 2;
6082   case CallingConv::AMDGPU_GS:
6083     return 3;
6084   case CallingConv::AMDGPU_HS:
6085   case CallingConv::AMDGPU_LS:
6086   case CallingConv::AMDGPU_ES:
6087     report_fatal_error("ds_ordered_count unsupported for this calling conv");
6088   case CallingConv::AMDGPU_CS:
6089   case CallingConv::AMDGPU_KERNEL:
6090   case CallingConv::C:
6091   case CallingConv::Fast:
6092   default:
6093     // Assume other calling conventions are various compute callable functions
6094     return 0;
6095   }
6096 }
6097 
6098 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
6099                                                  SelectionDAG &DAG) const {
6100   unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6101   SDLoc DL(Op);
6102 
6103   switch (IntrID) {
6104   case Intrinsic::amdgcn_ds_ordered_add:
6105   case Intrinsic::amdgcn_ds_ordered_swap: {
6106     MemSDNode *M = cast<MemSDNode>(Op);
6107     SDValue Chain = M->getOperand(0);
6108     SDValue M0 = M->getOperand(2);
6109     SDValue Value = M->getOperand(3);
6110     unsigned IndexOperand = M->getConstantOperandVal(7);
6111     unsigned WaveRelease = M->getConstantOperandVal(8);
6112     unsigned WaveDone = M->getConstantOperandVal(9);
6113 
6114     unsigned OrderedCountIndex = IndexOperand & 0x3f;
6115     IndexOperand &= ~0x3f;
6116     unsigned CountDw = 0;
6117 
6118     if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) {
6119       CountDw = (IndexOperand >> 24) & 0xf;
6120       IndexOperand &= ~(0xf << 24);
6121 
6122       if (CountDw < 1 || CountDw > 4) {
6123         report_fatal_error(
6124             "ds_ordered_count: dword count must be between 1 and 4");
6125       }
6126     }
6127 
6128     if (IndexOperand)
6129       report_fatal_error("ds_ordered_count: bad index operand");
6130 
6131     if (WaveDone && !WaveRelease)
6132       report_fatal_error("ds_ordered_count: wave_done requires wave_release");
6133 
6134     unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
6135     unsigned ShaderType = getDSShaderTypeValue(DAG.getMachineFunction());
6136     unsigned Offset0 = OrderedCountIndex << 2;
6137     unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
6138                        (Instruction << 4);
6139 
6140     if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
6141       Offset1 |= (CountDw - 1) << 6;
6142 
6143     unsigned Offset = Offset0 | (Offset1 << 8);
6144 
6145     SDValue Ops[] = {
6146       Chain,
6147       Value,
6148       DAG.getTargetConstant(Offset, DL, MVT::i16),
6149       copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue
6150     };
6151     return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL,
6152                                    M->getVTList(), Ops, M->getMemoryVT(),
6153                                    M->getMemOperand());
6154   }
6155   case Intrinsic::amdgcn_ds_fadd: {
6156     MemSDNode *M = cast<MemSDNode>(Op);
6157     unsigned Opc;
6158     switch (IntrID) {
6159     case Intrinsic::amdgcn_ds_fadd:
6160       Opc = ISD::ATOMIC_LOAD_FADD;
6161       break;
6162     }
6163 
6164     return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(),
6165                          M->getOperand(0), M->getOperand(2), M->getOperand(3),
6166                          M->getMemOperand());
6167   }
6168   case Intrinsic::amdgcn_atomic_inc:
6169   case Intrinsic::amdgcn_atomic_dec:
6170   case Intrinsic::amdgcn_ds_fmin:
6171   case Intrinsic::amdgcn_ds_fmax: {
6172     MemSDNode *M = cast<MemSDNode>(Op);
6173     unsigned Opc;
6174     switch (IntrID) {
6175     case Intrinsic::amdgcn_atomic_inc:
6176       Opc = AMDGPUISD::ATOMIC_INC;
6177       break;
6178     case Intrinsic::amdgcn_atomic_dec:
6179       Opc = AMDGPUISD::ATOMIC_DEC;
6180       break;
6181     case Intrinsic::amdgcn_ds_fmin:
6182       Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
6183       break;
6184     case Intrinsic::amdgcn_ds_fmax:
6185       Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
6186       break;
6187     default:
6188       llvm_unreachable("Unknown intrinsic!");
6189     }
6190     SDValue Ops[] = {
6191       M->getOperand(0), // Chain
6192       M->getOperand(2), // Ptr
6193       M->getOperand(3)  // Value
6194     };
6195 
6196     return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
6197                                    M->getMemoryVT(), M->getMemOperand());
6198   }
6199   case Intrinsic::amdgcn_buffer_load:
6200   case Intrinsic::amdgcn_buffer_load_format: {
6201     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
6202     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6203     unsigned IdxEn = 1;
6204     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
6205       IdxEn = Idx->getZExtValue() != 0;
6206     SDValue Ops[] = {
6207       Op.getOperand(0), // Chain
6208       Op.getOperand(2), // rsrc
6209       Op.getOperand(3), // vindex
6210       SDValue(),        // voffset -- will be set by setBufferOffsets
6211       SDValue(),        // soffset -- will be set by setBufferOffsets
6212       SDValue(),        // offset -- will be set by setBufferOffsets
6213       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6214       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
6215     };
6216 
6217     unsigned Offset = setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]);
6218     // We don't know the offset if vindex is non-zero, so clear it.
6219     if (IdxEn)
6220       Offset = 0;
6221 
6222     unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
6223         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
6224 
6225     EVT VT = Op.getValueType();
6226     EVT IntVT = VT.changeTypeToInteger();
6227     auto *M = cast<MemSDNode>(Op);
6228     M->getMemOperand()->setOffset(Offset);
6229     EVT LoadVT = Op.getValueType();
6230 
6231     if (LoadVT.getScalarType() == MVT::f16)
6232       return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
6233                                  M, DAG, Ops);
6234 
6235     // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
6236     if (LoadVT.getScalarType() == MVT::i8 ||
6237         LoadVT.getScalarType() == MVT::i16)
6238       return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
6239 
6240     return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
6241                                M->getMemOperand(), DAG);
6242   }
6243   case Intrinsic::amdgcn_raw_buffer_load:
6244   case Intrinsic::amdgcn_raw_buffer_load_format: {
6245     const bool IsFormat = IntrID == Intrinsic::amdgcn_raw_buffer_load_format;
6246 
6247     auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
6248     SDValue Ops[] = {
6249       Op.getOperand(0), // Chain
6250       Op.getOperand(2), // rsrc
6251       DAG.getConstant(0, DL, MVT::i32), // vindex
6252       Offsets.first,    // voffset
6253       Op.getOperand(4), // soffset
6254       Offsets.second,   // offset
6255       Op.getOperand(5), // cachepolicy, swizzled buffer
6256       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6257     };
6258 
6259     auto *M = cast<MemSDNode>(Op);
6260     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[3], Ops[4], Ops[5]));
6261     return lowerIntrinsicLoad(M, IsFormat, DAG, Ops);
6262   }
6263   case Intrinsic::amdgcn_struct_buffer_load:
6264   case Intrinsic::amdgcn_struct_buffer_load_format: {
6265     const bool IsFormat = IntrID == Intrinsic::amdgcn_struct_buffer_load_format;
6266 
6267     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6268     SDValue Ops[] = {
6269       Op.getOperand(0), // Chain
6270       Op.getOperand(2), // rsrc
6271       Op.getOperand(3), // vindex
6272       Offsets.first,    // voffset
6273       Op.getOperand(5), // soffset
6274       Offsets.second,   // offset
6275       Op.getOperand(6), // cachepolicy, swizzled buffer
6276       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6277     };
6278 
6279     auto *M = cast<MemSDNode>(Op);
6280     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[3], Ops[4], Ops[5],
6281                                                         Ops[2]));
6282     return lowerIntrinsicLoad(cast<MemSDNode>(Op), IsFormat, DAG, Ops);
6283   }
6284   case Intrinsic::amdgcn_tbuffer_load: {
6285     MemSDNode *M = cast<MemSDNode>(Op);
6286     EVT LoadVT = Op.getValueType();
6287 
6288     unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6289     unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6290     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6291     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6292     unsigned IdxEn = 1;
6293     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
6294       IdxEn = Idx->getZExtValue() != 0;
6295     SDValue Ops[] = {
6296       Op.getOperand(0),  // Chain
6297       Op.getOperand(2),  // rsrc
6298       Op.getOperand(3),  // vindex
6299       Op.getOperand(4),  // voffset
6300       Op.getOperand(5),  // soffset
6301       Op.getOperand(6),  // offset
6302       DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6303       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6304       DAG.getTargetConstant(IdxEn, DL, MVT::i1) // idxen
6305     };
6306 
6307     if (LoadVT.getScalarType() == MVT::f16)
6308       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6309                                  M, DAG, Ops);
6310     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6311                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6312                                DAG);
6313   }
6314   case Intrinsic::amdgcn_raw_tbuffer_load: {
6315     MemSDNode *M = cast<MemSDNode>(Op);
6316     EVT LoadVT = Op.getValueType();
6317     auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
6318 
6319     SDValue Ops[] = {
6320       Op.getOperand(0),  // Chain
6321       Op.getOperand(2),  // rsrc
6322       DAG.getConstant(0, DL, MVT::i32), // vindex
6323       Offsets.first,     // voffset
6324       Op.getOperand(4),  // soffset
6325       Offsets.second,    // offset
6326       Op.getOperand(5),  // format
6327       Op.getOperand(6),  // cachepolicy, swizzled buffer
6328       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6329     };
6330 
6331     if (LoadVT.getScalarType() == MVT::f16)
6332       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6333                                  M, DAG, Ops);
6334     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6335                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6336                                DAG);
6337   }
6338   case Intrinsic::amdgcn_struct_tbuffer_load: {
6339     MemSDNode *M = cast<MemSDNode>(Op);
6340     EVT LoadVT = Op.getValueType();
6341     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6342 
6343     SDValue Ops[] = {
6344       Op.getOperand(0),  // Chain
6345       Op.getOperand(2),  // rsrc
6346       Op.getOperand(3),  // vindex
6347       Offsets.first,     // voffset
6348       Op.getOperand(5),  // soffset
6349       Offsets.second,    // offset
6350       Op.getOperand(6),  // format
6351       Op.getOperand(7),  // cachepolicy, swizzled buffer
6352       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6353     };
6354 
6355     if (LoadVT.getScalarType() == MVT::f16)
6356       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6357                                  M, DAG, Ops);
6358     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6359                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6360                                DAG);
6361   }
6362   case Intrinsic::amdgcn_buffer_atomic_swap:
6363   case Intrinsic::amdgcn_buffer_atomic_add:
6364   case Intrinsic::amdgcn_buffer_atomic_sub:
6365   case Intrinsic::amdgcn_buffer_atomic_smin:
6366   case Intrinsic::amdgcn_buffer_atomic_umin:
6367   case Intrinsic::amdgcn_buffer_atomic_smax:
6368   case Intrinsic::amdgcn_buffer_atomic_umax:
6369   case Intrinsic::amdgcn_buffer_atomic_and:
6370   case Intrinsic::amdgcn_buffer_atomic_or:
6371   case Intrinsic::amdgcn_buffer_atomic_xor: {
6372     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6373     unsigned IdxEn = 1;
6374     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6375       IdxEn = Idx->getZExtValue() != 0;
6376     SDValue Ops[] = {
6377       Op.getOperand(0), // Chain
6378       Op.getOperand(2), // vdata
6379       Op.getOperand(3), // rsrc
6380       Op.getOperand(4), // vindex
6381       SDValue(),        // voffset -- will be set by setBufferOffsets
6382       SDValue(),        // soffset -- will be set by setBufferOffsets
6383       SDValue(),        // offset -- will be set by setBufferOffsets
6384       DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6385       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
6386     };
6387     unsigned Offset = setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6388     // We don't know the offset if vindex is non-zero, so clear it.
6389     if (IdxEn)
6390       Offset = 0;
6391     EVT VT = Op.getValueType();
6392 
6393     auto *M = cast<MemSDNode>(Op);
6394     M->getMemOperand()->setOffset(Offset);
6395     unsigned Opcode = 0;
6396 
6397     switch (IntrID) {
6398     case Intrinsic::amdgcn_buffer_atomic_swap:
6399       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6400       break;
6401     case Intrinsic::amdgcn_buffer_atomic_add:
6402       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6403       break;
6404     case Intrinsic::amdgcn_buffer_atomic_sub:
6405       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6406       break;
6407     case Intrinsic::amdgcn_buffer_atomic_smin:
6408       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6409       break;
6410     case Intrinsic::amdgcn_buffer_atomic_umin:
6411       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6412       break;
6413     case Intrinsic::amdgcn_buffer_atomic_smax:
6414       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6415       break;
6416     case Intrinsic::amdgcn_buffer_atomic_umax:
6417       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6418       break;
6419     case Intrinsic::amdgcn_buffer_atomic_and:
6420       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6421       break;
6422     case Intrinsic::amdgcn_buffer_atomic_or:
6423       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6424       break;
6425     case Intrinsic::amdgcn_buffer_atomic_xor:
6426       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6427       break;
6428     default:
6429       llvm_unreachable("unhandled atomic opcode");
6430     }
6431 
6432     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6433                                    M->getMemOperand());
6434   }
6435   case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6436   case Intrinsic::amdgcn_raw_buffer_atomic_add:
6437   case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6438   case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6439   case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6440   case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6441   case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6442   case Intrinsic::amdgcn_raw_buffer_atomic_and:
6443   case Intrinsic::amdgcn_raw_buffer_atomic_or:
6444   case Intrinsic::amdgcn_raw_buffer_atomic_xor:
6445   case Intrinsic::amdgcn_raw_buffer_atomic_inc:
6446   case Intrinsic::amdgcn_raw_buffer_atomic_dec: {
6447     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6448     SDValue Ops[] = {
6449       Op.getOperand(0), // Chain
6450       Op.getOperand(2), // vdata
6451       Op.getOperand(3), // rsrc
6452       DAG.getConstant(0, DL, MVT::i32), // vindex
6453       Offsets.first,    // voffset
6454       Op.getOperand(5), // soffset
6455       Offsets.second,   // offset
6456       Op.getOperand(6), // cachepolicy
6457       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6458     };
6459     EVT VT = Op.getValueType();
6460 
6461     auto *M = cast<MemSDNode>(Op);
6462     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6]));
6463     unsigned Opcode = 0;
6464 
6465     switch (IntrID) {
6466     case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6467       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6468       break;
6469     case Intrinsic::amdgcn_raw_buffer_atomic_add:
6470       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6471       break;
6472     case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6473       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6474       break;
6475     case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6476       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6477       break;
6478     case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6479       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6480       break;
6481     case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6482       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6483       break;
6484     case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6485       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6486       break;
6487     case Intrinsic::amdgcn_raw_buffer_atomic_and:
6488       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6489       break;
6490     case Intrinsic::amdgcn_raw_buffer_atomic_or:
6491       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6492       break;
6493     case Intrinsic::amdgcn_raw_buffer_atomic_xor:
6494       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6495       break;
6496     case Intrinsic::amdgcn_raw_buffer_atomic_inc:
6497       Opcode = AMDGPUISD::BUFFER_ATOMIC_INC;
6498       break;
6499     case Intrinsic::amdgcn_raw_buffer_atomic_dec:
6500       Opcode = AMDGPUISD::BUFFER_ATOMIC_DEC;
6501       break;
6502     default:
6503       llvm_unreachable("unhandled atomic opcode");
6504     }
6505 
6506     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6507                                    M->getMemOperand());
6508   }
6509   case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6510   case Intrinsic::amdgcn_struct_buffer_atomic_add:
6511   case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6512   case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6513   case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6514   case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6515   case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6516   case Intrinsic::amdgcn_struct_buffer_atomic_and:
6517   case Intrinsic::amdgcn_struct_buffer_atomic_or:
6518   case Intrinsic::amdgcn_struct_buffer_atomic_xor:
6519   case Intrinsic::amdgcn_struct_buffer_atomic_inc:
6520   case Intrinsic::amdgcn_struct_buffer_atomic_dec: {
6521     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6522     SDValue Ops[] = {
6523       Op.getOperand(0), // Chain
6524       Op.getOperand(2), // vdata
6525       Op.getOperand(3), // rsrc
6526       Op.getOperand(4), // vindex
6527       Offsets.first,    // voffset
6528       Op.getOperand(6), // soffset
6529       Offsets.second,   // offset
6530       Op.getOperand(7), // cachepolicy
6531       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6532     };
6533     EVT VT = Op.getValueType();
6534 
6535     auto *M = cast<MemSDNode>(Op);
6536     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6],
6537                                                         Ops[3]));
6538     unsigned Opcode = 0;
6539 
6540     switch (IntrID) {
6541     case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6542       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6543       break;
6544     case Intrinsic::amdgcn_struct_buffer_atomic_add:
6545       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6546       break;
6547     case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6548       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6549       break;
6550     case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6551       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6552       break;
6553     case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6554       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6555       break;
6556     case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6557       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6558       break;
6559     case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6560       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6561       break;
6562     case Intrinsic::amdgcn_struct_buffer_atomic_and:
6563       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6564       break;
6565     case Intrinsic::amdgcn_struct_buffer_atomic_or:
6566       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6567       break;
6568     case Intrinsic::amdgcn_struct_buffer_atomic_xor:
6569       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6570       break;
6571     case Intrinsic::amdgcn_struct_buffer_atomic_inc:
6572       Opcode = AMDGPUISD::BUFFER_ATOMIC_INC;
6573       break;
6574     case Intrinsic::amdgcn_struct_buffer_atomic_dec:
6575       Opcode = AMDGPUISD::BUFFER_ATOMIC_DEC;
6576       break;
6577     default:
6578       llvm_unreachable("unhandled atomic opcode");
6579     }
6580 
6581     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6582                                    M->getMemOperand());
6583   }
6584   case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
6585     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6586     unsigned IdxEn = 1;
6587     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5)))
6588       IdxEn = Idx->getZExtValue() != 0;
6589     SDValue Ops[] = {
6590       Op.getOperand(0), // Chain
6591       Op.getOperand(2), // src
6592       Op.getOperand(3), // cmp
6593       Op.getOperand(4), // rsrc
6594       Op.getOperand(5), // vindex
6595       SDValue(),        // voffset -- will be set by setBufferOffsets
6596       SDValue(),        // soffset -- will be set by setBufferOffsets
6597       SDValue(),        // offset -- will be set by setBufferOffsets
6598       DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6599       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
6600     };
6601     unsigned Offset = setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]);
6602     // We don't know the offset if vindex is non-zero, so clear it.
6603     if (IdxEn)
6604       Offset = 0;
6605     EVT VT = Op.getValueType();
6606     auto *M = cast<MemSDNode>(Op);
6607     M->getMemOperand()->setOffset(Offset);
6608 
6609     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6610                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6611   }
6612   case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: {
6613     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6614     SDValue Ops[] = {
6615       Op.getOperand(0), // Chain
6616       Op.getOperand(2), // src
6617       Op.getOperand(3), // cmp
6618       Op.getOperand(4), // rsrc
6619       DAG.getConstant(0, DL, MVT::i32), // vindex
6620       Offsets.first,    // voffset
6621       Op.getOperand(6), // soffset
6622       Offsets.second,   // offset
6623       Op.getOperand(7), // cachepolicy
6624       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6625     };
6626     EVT VT = Op.getValueType();
6627     auto *M = cast<MemSDNode>(Op);
6628     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[5], Ops[6], Ops[7]));
6629 
6630     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6631                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6632   }
6633   case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: {
6634     auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG);
6635     SDValue Ops[] = {
6636       Op.getOperand(0), // Chain
6637       Op.getOperand(2), // src
6638       Op.getOperand(3), // cmp
6639       Op.getOperand(4), // rsrc
6640       Op.getOperand(5), // vindex
6641       Offsets.first,    // voffset
6642       Op.getOperand(7), // soffset
6643       Offsets.second,   // offset
6644       Op.getOperand(8), // cachepolicy
6645       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6646     };
6647     EVT VT = Op.getValueType();
6648     auto *M = cast<MemSDNode>(Op);
6649     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[5], Ops[6], Ops[7],
6650                                                         Ops[4]));
6651 
6652     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6653                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6654   }
6655 
6656   default:
6657     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6658             AMDGPU::getImageDimIntrinsicInfo(IntrID))
6659       return lowerImage(Op, ImageDimIntr, DAG);
6660 
6661     return SDValue();
6662   }
6663 }
6664 
6665 // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to
6666 // dwordx4 if on SI.
6667 SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL,
6668                                               SDVTList VTList,
6669                                               ArrayRef<SDValue> Ops, EVT MemVT,
6670                                               MachineMemOperand *MMO,
6671                                               SelectionDAG &DAG) const {
6672   EVT VT = VTList.VTs[0];
6673   EVT WidenedVT = VT;
6674   EVT WidenedMemVT = MemVT;
6675   if (!Subtarget->hasDwordx3LoadStores() &&
6676       (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) {
6677     WidenedVT = EVT::getVectorVT(*DAG.getContext(),
6678                                  WidenedVT.getVectorElementType(), 4);
6679     WidenedMemVT = EVT::getVectorVT(*DAG.getContext(),
6680                                     WidenedMemVT.getVectorElementType(), 4);
6681     MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16);
6682   }
6683 
6684   assert(VTList.NumVTs == 2);
6685   SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]);
6686 
6687   auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops,
6688                                        WidenedMemVT, MMO);
6689   if (WidenedVT != VT) {
6690     auto Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp,
6691                                DAG.getVectorIdxConstant(0, DL));
6692     NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL);
6693   }
6694   return NewOp;
6695 }
6696 
6697 SDValue SITargetLowering::handleD16VData(SDValue VData,
6698                                          SelectionDAG &DAG) const {
6699   EVT StoreVT = VData.getValueType();
6700 
6701   // No change for f16 and legal vector D16 types.
6702   if (!StoreVT.isVector())
6703     return VData;
6704 
6705   SDLoc DL(VData);
6706   assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
6707 
6708   if (Subtarget->hasUnpackedD16VMem()) {
6709     // We need to unpack the packed data to store.
6710     EVT IntStoreVT = StoreVT.changeTypeToInteger();
6711     SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
6712 
6713     EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
6714                                         StoreVT.getVectorNumElements());
6715     SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
6716     return DAG.UnrollVectorOp(ZExt.getNode());
6717   }
6718 
6719   assert(isTypeLegal(StoreVT));
6720   return VData;
6721 }
6722 
6723 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
6724                                               SelectionDAG &DAG) const {
6725   SDLoc DL(Op);
6726   SDValue Chain = Op.getOperand(0);
6727   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6728   MachineFunction &MF = DAG.getMachineFunction();
6729 
6730   switch (IntrinsicID) {
6731   case Intrinsic::amdgcn_exp_compr: {
6732     SDValue Src0 = Op.getOperand(4);
6733     SDValue Src1 = Op.getOperand(5);
6734     // Hack around illegal type on SI by directly selecting it.
6735     if (isTypeLegal(Src0.getValueType()))
6736       return SDValue();
6737 
6738     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
6739     SDValue Undef = DAG.getUNDEF(MVT::f32);
6740     const SDValue Ops[] = {
6741       Op.getOperand(2), // tgt
6742       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), // src0
6743       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), // src1
6744       Undef, // src2
6745       Undef, // src3
6746       Op.getOperand(7), // vm
6747       DAG.getTargetConstant(1, DL, MVT::i1), // compr
6748       Op.getOperand(3), // en
6749       Op.getOperand(0) // Chain
6750     };
6751 
6752     unsigned Opc = Done->isNullValue() ? AMDGPU::EXP : AMDGPU::EXP_DONE;
6753     return SDValue(DAG.getMachineNode(Opc, DL, Op->getVTList(), Ops), 0);
6754   }
6755   case Intrinsic::amdgcn_s_barrier: {
6756     if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
6757       const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
6758       unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
6759       if (WGSize <= ST.getWavefrontSize())
6760         return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
6761                                           Op.getOperand(0)), 0);
6762     }
6763     return SDValue();
6764   };
6765   case Intrinsic::amdgcn_tbuffer_store: {
6766     SDValue VData = Op.getOperand(2);
6767     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6768     if (IsD16)
6769       VData = handleD16VData(VData, DAG);
6770     unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6771     unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6772     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6773     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue();
6774     unsigned IdxEn = 1;
6775     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6776       IdxEn = Idx->getZExtValue() != 0;
6777     SDValue Ops[] = {
6778       Chain,
6779       VData,             // vdata
6780       Op.getOperand(3),  // rsrc
6781       Op.getOperand(4),  // vindex
6782       Op.getOperand(5),  // voffset
6783       Op.getOperand(6),  // soffset
6784       Op.getOperand(7),  // offset
6785       DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6786       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6787       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idexen
6788     };
6789     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6790                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6791     MemSDNode *M = cast<MemSDNode>(Op);
6792     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6793                                    M->getMemoryVT(), M->getMemOperand());
6794   }
6795 
6796   case Intrinsic::amdgcn_struct_tbuffer_store: {
6797     SDValue VData = Op.getOperand(2);
6798     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6799     if (IsD16)
6800       VData = handleD16VData(VData, DAG);
6801     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6802     SDValue Ops[] = {
6803       Chain,
6804       VData,             // vdata
6805       Op.getOperand(3),  // rsrc
6806       Op.getOperand(4),  // vindex
6807       Offsets.first,     // voffset
6808       Op.getOperand(6),  // soffset
6809       Offsets.second,    // offset
6810       Op.getOperand(7),  // format
6811       Op.getOperand(8),  // cachepolicy, swizzled buffer
6812       DAG.getTargetConstant(1, DL, MVT::i1), // idexen
6813     };
6814     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6815                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6816     MemSDNode *M = cast<MemSDNode>(Op);
6817     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6818                                    M->getMemoryVT(), M->getMemOperand());
6819   }
6820 
6821   case Intrinsic::amdgcn_raw_tbuffer_store: {
6822     SDValue VData = Op.getOperand(2);
6823     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6824     if (IsD16)
6825       VData = handleD16VData(VData, DAG);
6826     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6827     SDValue Ops[] = {
6828       Chain,
6829       VData,             // vdata
6830       Op.getOperand(3),  // rsrc
6831       DAG.getConstant(0, DL, MVT::i32), // vindex
6832       Offsets.first,     // voffset
6833       Op.getOperand(5),  // soffset
6834       Offsets.second,    // offset
6835       Op.getOperand(6),  // format
6836       Op.getOperand(7),  // cachepolicy, swizzled buffer
6837       DAG.getTargetConstant(0, DL, MVT::i1), // idexen
6838     };
6839     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6840                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6841     MemSDNode *M = cast<MemSDNode>(Op);
6842     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6843                                    M->getMemoryVT(), M->getMemOperand());
6844   }
6845 
6846   case Intrinsic::amdgcn_buffer_store:
6847   case Intrinsic::amdgcn_buffer_store_format: {
6848     SDValue VData = Op.getOperand(2);
6849     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6850     if (IsD16)
6851       VData = handleD16VData(VData, DAG);
6852     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6853     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6854     unsigned IdxEn = 1;
6855     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6856       IdxEn = Idx->getZExtValue() != 0;
6857     SDValue Ops[] = {
6858       Chain,
6859       VData,
6860       Op.getOperand(3), // rsrc
6861       Op.getOperand(4), // vindex
6862       SDValue(), // voffset -- will be set by setBufferOffsets
6863       SDValue(), // soffset -- will be set by setBufferOffsets
6864       SDValue(), // offset -- will be set by setBufferOffsets
6865       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6866       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
6867     };
6868     unsigned Offset = setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6869     // We don't know the offset if vindex is non-zero, so clear it.
6870     if (IdxEn)
6871       Offset = 0;
6872     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
6873                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6874     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6875     MemSDNode *M = cast<MemSDNode>(Op);
6876     M->getMemOperand()->setOffset(Offset);
6877 
6878     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6879     EVT VDataType = VData.getValueType().getScalarType();
6880     if (VDataType == MVT::i8 || VDataType == MVT::i16)
6881       return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6882 
6883     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6884                                    M->getMemoryVT(), M->getMemOperand());
6885   }
6886 
6887   case Intrinsic::amdgcn_raw_buffer_store:
6888   case Intrinsic::amdgcn_raw_buffer_store_format: {
6889     const bool IsFormat =
6890         IntrinsicID == Intrinsic::amdgcn_raw_buffer_store_format;
6891 
6892     SDValue VData = Op.getOperand(2);
6893     EVT VDataVT = VData.getValueType();
6894     EVT EltType = VDataVT.getScalarType();
6895     bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
6896     if (IsD16)
6897       VData = handleD16VData(VData, DAG);
6898 
6899     if (!isTypeLegal(VDataVT)) {
6900       VData =
6901           DAG.getNode(ISD::BITCAST, DL,
6902                       getEquivalentMemType(*DAG.getContext(), VDataVT), VData);
6903     }
6904 
6905     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6906     SDValue Ops[] = {
6907       Chain,
6908       VData,
6909       Op.getOperand(3), // rsrc
6910       DAG.getConstant(0, DL, MVT::i32), // vindex
6911       Offsets.first,    // voffset
6912       Op.getOperand(5), // soffset
6913       Offsets.second,   // offset
6914       Op.getOperand(6), // cachepolicy, swizzled buffer
6915       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6916     };
6917     unsigned Opc =
6918         IsFormat ? AMDGPUISD::BUFFER_STORE_FORMAT : AMDGPUISD::BUFFER_STORE;
6919     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6920     MemSDNode *M = cast<MemSDNode>(Op);
6921     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6]));
6922 
6923     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6924     if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32)
6925       return handleByteShortBufferStores(DAG, VDataVT, DL, Ops, M);
6926 
6927     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6928                                    M->getMemoryVT(), M->getMemOperand());
6929   }
6930 
6931   case Intrinsic::amdgcn_struct_buffer_store:
6932   case Intrinsic::amdgcn_struct_buffer_store_format: {
6933     const bool IsFormat =
6934         IntrinsicID == Intrinsic::amdgcn_struct_buffer_store_format;
6935 
6936     SDValue VData = Op.getOperand(2);
6937     EVT VDataVT = VData.getValueType();
6938     EVT EltType = VDataVT.getScalarType();
6939     bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
6940 
6941     if (IsD16)
6942       VData = handleD16VData(VData, DAG);
6943 
6944     if (!isTypeLegal(VDataVT)) {
6945       VData =
6946           DAG.getNode(ISD::BITCAST, DL,
6947                       getEquivalentMemType(*DAG.getContext(), VDataVT), VData);
6948     }
6949 
6950     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6951     SDValue Ops[] = {
6952       Chain,
6953       VData,
6954       Op.getOperand(3), // rsrc
6955       Op.getOperand(4), // vindex
6956       Offsets.first,    // voffset
6957       Op.getOperand(6), // soffset
6958       Offsets.second,   // offset
6959       Op.getOperand(7), // cachepolicy, swizzled buffer
6960       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6961     };
6962     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ?
6963                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6964     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6965     MemSDNode *M = cast<MemSDNode>(Op);
6966     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6],
6967                                                         Ops[3]));
6968 
6969     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6970     EVT VDataType = VData.getValueType().getScalarType();
6971     if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32)
6972       return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6973 
6974     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6975                                    M->getMemoryVT(), M->getMemOperand());
6976   }
6977 
6978   case Intrinsic::amdgcn_buffer_atomic_fadd: {
6979     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6980     unsigned IdxEn = 1;
6981     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6982       IdxEn = Idx->getZExtValue() != 0;
6983     SDValue Ops[] = {
6984       Chain,
6985       Op.getOperand(2), // vdata
6986       Op.getOperand(3), // rsrc
6987       Op.getOperand(4), // vindex
6988       SDValue(),        // voffset -- will be set by setBufferOffsets
6989       SDValue(),        // soffset -- will be set by setBufferOffsets
6990       SDValue(),        // offset -- will be set by setBufferOffsets
6991       DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6992       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
6993     };
6994     unsigned Offset = setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6995     // We don't know the offset if vindex is non-zero, so clear it.
6996     if (IdxEn)
6997       Offset = 0;
6998     EVT VT = Op.getOperand(2).getValueType();
6999 
7000     auto *M = cast<MemSDNode>(Op);
7001     M->getMemOperand()->setOffset(Offset);
7002     unsigned Opcode = VT.isVector() ? AMDGPUISD::BUFFER_ATOMIC_PK_FADD
7003                                     : AMDGPUISD::BUFFER_ATOMIC_FADD;
7004 
7005     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
7006                                    M->getMemOperand());
7007   }
7008 
7009   case Intrinsic::amdgcn_global_atomic_fadd: {
7010     SDValue Ops[] = {
7011       Chain,
7012       Op.getOperand(2), // ptr
7013       Op.getOperand(3)  // vdata
7014     };
7015     EVT VT = Op.getOperand(3).getValueType();
7016 
7017     auto *M = cast<MemSDNode>(Op);
7018     if (VT.isVector()) {
7019       return DAG.getMemIntrinsicNode(
7020         AMDGPUISD::ATOMIC_PK_FADD, DL, Op->getVTList(), Ops, VT,
7021         M->getMemOperand());
7022     }
7023 
7024     return DAG.getAtomic(ISD::ATOMIC_LOAD_FADD, DL, VT,
7025                          DAG.getVTList(VT, MVT::Other), Ops,
7026                          M->getMemOperand()).getValue(1);
7027   }
7028   case Intrinsic::amdgcn_end_cf:
7029     return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other,
7030                                       Op->getOperand(2), Chain), 0);
7031 
7032   default: {
7033     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
7034             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
7035       return lowerImage(Op, ImageDimIntr, DAG);
7036 
7037     return Op;
7038   }
7039   }
7040 }
7041 
7042 // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args:
7043 // offset (the offset that is included in bounds checking and swizzling, to be
7044 // split between the instruction's voffset and immoffset fields) and soffset
7045 // (the offset that is excluded from bounds checking and swizzling, to go in
7046 // the instruction's soffset field).  This function takes the first kind of
7047 // offset and figures out how to split it between voffset and immoffset.
7048 std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets(
7049     SDValue Offset, SelectionDAG &DAG) const {
7050   SDLoc DL(Offset);
7051   const unsigned MaxImm = 4095;
7052   SDValue N0 = Offset;
7053   ConstantSDNode *C1 = nullptr;
7054 
7055   if ((C1 = dyn_cast<ConstantSDNode>(N0)))
7056     N0 = SDValue();
7057   else if (DAG.isBaseWithConstantOffset(N0)) {
7058     C1 = cast<ConstantSDNode>(N0.getOperand(1));
7059     N0 = N0.getOperand(0);
7060   }
7061 
7062   if (C1) {
7063     unsigned ImmOffset = C1->getZExtValue();
7064     // If the immediate value is too big for the immoffset field, put the value
7065     // and -4096 into the immoffset field so that the value that is copied/added
7066     // for the voffset field is a multiple of 4096, and it stands more chance
7067     // of being CSEd with the copy/add for another similar load/store.
7068     // However, do not do that rounding down to a multiple of 4096 if that is a
7069     // negative number, as it appears to be illegal to have a negative offset
7070     // in the vgpr, even if adding the immediate offset makes it positive.
7071     unsigned Overflow = ImmOffset & ~MaxImm;
7072     ImmOffset -= Overflow;
7073     if ((int32_t)Overflow < 0) {
7074       Overflow += ImmOffset;
7075       ImmOffset = 0;
7076     }
7077     C1 = cast<ConstantSDNode>(DAG.getTargetConstant(ImmOffset, DL, MVT::i32));
7078     if (Overflow) {
7079       auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32);
7080       if (!N0)
7081         N0 = OverflowVal;
7082       else {
7083         SDValue Ops[] = { N0, OverflowVal };
7084         N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops);
7085       }
7086     }
7087   }
7088   if (!N0)
7089     N0 = DAG.getConstant(0, DL, MVT::i32);
7090   if (!C1)
7091     C1 = cast<ConstantSDNode>(DAG.getTargetConstant(0, DL, MVT::i32));
7092   return {N0, SDValue(C1, 0)};
7093 }
7094 
7095 // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the
7096 // three offsets (voffset, soffset and instoffset) into the SDValue[3] array
7097 // pointed to by Offsets.
7098 unsigned SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
7099                                         SelectionDAG &DAG, SDValue *Offsets,
7100                                         unsigned Align) const {
7101   SDLoc DL(CombinedOffset);
7102   if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) {
7103     uint32_t Imm = C->getZExtValue();
7104     uint32_t SOffset, ImmOffset;
7105     if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, Align)) {
7106       Offsets[0] = DAG.getConstant(0, DL, MVT::i32);
7107       Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
7108       Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32);
7109       return SOffset + ImmOffset;
7110     }
7111   }
7112   if (DAG.isBaseWithConstantOffset(CombinedOffset)) {
7113     SDValue N0 = CombinedOffset.getOperand(0);
7114     SDValue N1 = CombinedOffset.getOperand(1);
7115     uint32_t SOffset, ImmOffset;
7116     int Offset = cast<ConstantSDNode>(N1)->getSExtValue();
7117     if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset,
7118                                                 Subtarget, Align)) {
7119       Offsets[0] = N0;
7120       Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
7121       Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32);
7122       return 0;
7123     }
7124   }
7125   Offsets[0] = CombinedOffset;
7126   Offsets[1] = DAG.getConstant(0, DL, MVT::i32);
7127   Offsets[2] = DAG.getTargetConstant(0, DL, MVT::i32);
7128   return 0;
7129 }
7130 
7131 // Handle 8 bit and 16 bit buffer loads
7132 SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG,
7133                                                      EVT LoadVT, SDLoc DL,
7134                                                      ArrayRef<SDValue> Ops,
7135                                                      MemSDNode *M) const {
7136   EVT IntVT = LoadVT.changeTypeToInteger();
7137   unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ?
7138          AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT;
7139 
7140   SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other);
7141   SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList,
7142                                                Ops, IntVT,
7143                                                M->getMemOperand());
7144   SDValue LoadVal = DAG.getNode(ISD::TRUNCATE, DL, IntVT, BufferLoad);
7145   LoadVal = DAG.getNode(ISD::BITCAST, DL, LoadVT, LoadVal);
7146 
7147   return DAG.getMergeValues({LoadVal, BufferLoad.getValue(1)}, DL);
7148 }
7149 
7150 // Handle 8 bit and 16 bit buffer stores
7151 SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG,
7152                                                       EVT VDataType, SDLoc DL,
7153                                                       SDValue Ops[],
7154                                                       MemSDNode *M) const {
7155   if (VDataType == MVT::f16)
7156     Ops[1] = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Ops[1]);
7157 
7158   SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]);
7159   Ops[1] = BufferStoreExt;
7160   unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE :
7161                                  AMDGPUISD::BUFFER_STORE_SHORT;
7162   ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9);
7163   return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType,
7164                                      M->getMemOperand());
7165 }
7166 
7167 static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
7168                                  ISD::LoadExtType ExtType, SDValue Op,
7169                                  const SDLoc &SL, EVT VT) {
7170   if (VT.bitsLT(Op.getValueType()))
7171     return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
7172 
7173   switch (ExtType) {
7174   case ISD::SEXTLOAD:
7175     return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
7176   case ISD::ZEXTLOAD:
7177     return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
7178   case ISD::EXTLOAD:
7179     return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
7180   case ISD::NON_EXTLOAD:
7181     return Op;
7182   }
7183 
7184   llvm_unreachable("invalid ext type");
7185 }
7186 
7187 SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
7188   SelectionDAG &DAG = DCI.DAG;
7189   if (Ld->getAlignment() < 4 || Ld->isDivergent())
7190     return SDValue();
7191 
7192   // FIXME: Constant loads should all be marked invariant.
7193   unsigned AS = Ld->getAddressSpace();
7194   if (AS != AMDGPUAS::CONSTANT_ADDRESS &&
7195       AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
7196       (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
7197     return SDValue();
7198 
7199   // Don't do this early, since it may interfere with adjacent load merging for
7200   // illegal types. We can avoid losing alignment information for exotic types
7201   // pre-legalize.
7202   EVT MemVT = Ld->getMemoryVT();
7203   if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
7204       MemVT.getSizeInBits() >= 32)
7205     return SDValue();
7206 
7207   SDLoc SL(Ld);
7208 
7209   assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
7210          "unexpected vector extload");
7211 
7212   // TODO: Drop only high part of range.
7213   SDValue Ptr = Ld->getBasePtr();
7214   SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
7215                                 MVT::i32, SL, Ld->getChain(), Ptr,
7216                                 Ld->getOffset(),
7217                                 Ld->getPointerInfo(), MVT::i32,
7218                                 Ld->getAlignment(),
7219                                 Ld->getMemOperand()->getFlags(),
7220                                 Ld->getAAInfo(),
7221                                 nullptr); // Drop ranges
7222 
7223   EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
7224   if (MemVT.isFloatingPoint()) {
7225     assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
7226            "unexpected fp extload");
7227     TruncVT = MemVT.changeTypeToInteger();
7228   }
7229 
7230   SDValue Cvt = NewLoad;
7231   if (Ld->getExtensionType() == ISD::SEXTLOAD) {
7232     Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
7233                       DAG.getValueType(TruncVT));
7234   } else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
7235              Ld->getExtensionType() == ISD::NON_EXTLOAD) {
7236     Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
7237   } else {
7238     assert(Ld->getExtensionType() == ISD::EXTLOAD);
7239   }
7240 
7241   EVT VT = Ld->getValueType(0);
7242   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
7243 
7244   DCI.AddToWorklist(Cvt.getNode());
7245 
7246   // We may need to handle exotic cases, such as i16->i64 extloads, so insert
7247   // the appropriate extension from the 32-bit load.
7248   Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
7249   DCI.AddToWorklist(Cvt.getNode());
7250 
7251   // Handle conversion back to floating point if necessary.
7252   Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
7253 
7254   return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
7255 }
7256 
7257 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7258   SDLoc DL(Op);
7259   LoadSDNode *Load = cast<LoadSDNode>(Op);
7260   ISD::LoadExtType ExtType = Load->getExtensionType();
7261   EVT MemVT = Load->getMemoryVT();
7262 
7263   if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
7264     if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
7265       return SDValue();
7266 
7267     // FIXME: Copied from PPC
7268     // First, load into 32 bits, then truncate to 1 bit.
7269 
7270     SDValue Chain = Load->getChain();
7271     SDValue BasePtr = Load->getBasePtr();
7272     MachineMemOperand *MMO = Load->getMemOperand();
7273 
7274     EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
7275 
7276     SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
7277                                    BasePtr, RealMemVT, MMO);
7278 
7279     if (!MemVT.isVector()) {
7280       SDValue Ops[] = {
7281         DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
7282         NewLD.getValue(1)
7283       };
7284 
7285       return DAG.getMergeValues(Ops, DL);
7286     }
7287 
7288     SmallVector<SDValue, 3> Elts;
7289     for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) {
7290       SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD,
7291                                 DAG.getConstant(I, DL, MVT::i32));
7292 
7293       Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt));
7294     }
7295 
7296     SDValue Ops[] = {
7297       DAG.getBuildVector(MemVT, DL, Elts),
7298       NewLD.getValue(1)
7299     };
7300 
7301     return DAG.getMergeValues(Ops, DL);
7302   }
7303 
7304   if (!MemVT.isVector())
7305     return SDValue();
7306 
7307   assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
7308          "Custom lowering for non-i32 vectors hasn't been implemented.");
7309 
7310   if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
7311                                       MemVT, *Load->getMemOperand())) {
7312     SDValue Ops[2];
7313     std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
7314     return DAG.getMergeValues(Ops, DL);
7315   }
7316 
7317   unsigned Alignment = Load->getAlignment();
7318   unsigned AS = Load->getAddressSpace();
7319   if (Subtarget->hasLDSMisalignedBug() &&
7320       AS == AMDGPUAS::FLAT_ADDRESS &&
7321       Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) {
7322     return SplitVectorLoad(Op, DAG);
7323   }
7324 
7325   MachineFunction &MF = DAG.getMachineFunction();
7326   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
7327   // If there is a possibilty that flat instruction access scratch memory
7328   // then we need to use the same legalization rules we use for private.
7329   if (AS == AMDGPUAS::FLAT_ADDRESS &&
7330       !Subtarget->hasMultiDwordFlatScratchAddressing())
7331     AS = MFI->hasFlatScratchInit() ?
7332          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
7333 
7334   unsigned NumElements = MemVT.getVectorNumElements();
7335 
7336   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7337       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
7338     if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) {
7339       if (MemVT.isPow2VectorType())
7340         return SDValue();
7341       if (NumElements == 3)
7342         return WidenVectorLoad(Op, DAG);
7343       return SplitVectorLoad(Op, DAG);
7344     }
7345     // Non-uniform loads will be selected to MUBUF instructions, so they
7346     // have the same legalization requirements as global and private
7347     // loads.
7348     //
7349   }
7350 
7351   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7352       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
7353       AS == AMDGPUAS::GLOBAL_ADDRESS) {
7354     if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
7355         !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
7356         Alignment >= 4 && NumElements < 32) {
7357       if (MemVT.isPow2VectorType())
7358         return SDValue();
7359       if (NumElements == 3)
7360         return WidenVectorLoad(Op, DAG);
7361       return SplitVectorLoad(Op, DAG);
7362     }
7363     // Non-uniform loads will be selected to MUBUF instructions, so they
7364     // have the same legalization requirements as global and private
7365     // loads.
7366     //
7367   }
7368   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7369       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
7370       AS == AMDGPUAS::GLOBAL_ADDRESS ||
7371       AS == AMDGPUAS::FLAT_ADDRESS) {
7372     if (NumElements > 4)
7373       return SplitVectorLoad(Op, DAG);
7374     // v3 loads not supported on SI.
7375     if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7376       return WidenVectorLoad(Op, DAG);
7377     // v3 and v4 loads are supported for private and global memory.
7378     return SDValue();
7379   }
7380   if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
7381     // Depending on the setting of the private_element_size field in the
7382     // resource descriptor, we can only make private accesses up to a certain
7383     // size.
7384     switch (Subtarget->getMaxPrivateElementSize()) {
7385     case 4: {
7386       SDValue Ops[2];
7387       std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
7388       return DAG.getMergeValues(Ops, DL);
7389     }
7390     case 8:
7391       if (NumElements > 2)
7392         return SplitVectorLoad(Op, DAG);
7393       return SDValue();
7394     case 16:
7395       // Same as global/flat
7396       if (NumElements > 4)
7397         return SplitVectorLoad(Op, DAG);
7398       // v3 loads not supported on SI.
7399       if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7400         return WidenVectorLoad(Op, DAG);
7401       return SDValue();
7402     default:
7403       llvm_unreachable("unsupported private_element_size");
7404     }
7405   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
7406     // Use ds_read_b128 if possible.
7407     if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
7408         MemVT.getStoreSize() == 16)
7409       return SDValue();
7410 
7411     if (NumElements > 2)
7412       return SplitVectorLoad(Op, DAG);
7413 
7414     // SI has a hardware bug in the LDS / GDS boounds checking: if the base
7415     // address is negative, then the instruction is incorrectly treated as
7416     // out-of-bounds even if base + offsets is in bounds. Split vectorized
7417     // loads here to avoid emitting ds_read2_b32. We may re-combine the
7418     // load later in the SILoadStoreOptimizer.
7419     if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
7420         NumElements == 2 && MemVT.getStoreSize() == 8 &&
7421         Load->getAlignment() < 8) {
7422       return SplitVectorLoad(Op, DAG);
7423     }
7424   }
7425   return SDValue();
7426 }
7427 
7428 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
7429   EVT VT = Op.getValueType();
7430   assert(VT.getSizeInBits() == 64);
7431 
7432   SDLoc DL(Op);
7433   SDValue Cond = Op.getOperand(0);
7434 
7435   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
7436   SDValue One = DAG.getConstant(1, DL, MVT::i32);
7437 
7438   SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
7439   SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
7440 
7441   SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
7442   SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
7443 
7444   SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
7445 
7446   SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
7447   SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
7448 
7449   SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
7450 
7451   SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
7452   return DAG.getNode(ISD::BITCAST, DL, VT, Res);
7453 }
7454 
7455 // Catch division cases where we can use shortcuts with rcp and rsq
7456 // instructions.
7457 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
7458                                               SelectionDAG &DAG) const {
7459   SDLoc SL(Op);
7460   SDValue LHS = Op.getOperand(0);
7461   SDValue RHS = Op.getOperand(1);
7462   EVT VT = Op.getValueType();
7463   const SDNodeFlags Flags = Op->getFlags();
7464 
7465   bool AllowInaccurateRcp = DAG.getTarget().Options.UnsafeFPMath ||
7466                             Flags.hasApproximateFuncs();
7467 
7468   // Without !fpmath accuracy information, we can't do more because we don't
7469   // know exactly whether rcp is accurate enough to meet !fpmath requirement.
7470   if (!AllowInaccurateRcp)
7471     return SDValue();
7472 
7473   if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
7474     if (CLHS->isExactlyValue(1.0)) {
7475       // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
7476       // the CI documentation has a worst case error of 1 ulp.
7477       // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
7478       // use it as long as we aren't trying to use denormals.
7479       //
7480       // v_rcp_f16 and v_rsq_f16 DO support denormals.
7481 
7482       // 1.0 / sqrt(x) -> rsq(x)
7483 
7484       // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
7485       // error seems really high at 2^29 ULP.
7486       if (RHS.getOpcode() == ISD::FSQRT)
7487         return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
7488 
7489       // 1.0 / x -> rcp(x)
7490       return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
7491     }
7492 
7493     // Same as for 1.0, but expand the sign out of the constant.
7494     if (CLHS->isExactlyValue(-1.0)) {
7495       // -1.0 / x -> rcp (fneg x)
7496       SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
7497       return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
7498     }
7499   }
7500 
7501   // Turn into multiply by the reciprocal.
7502   // x / y -> x * (1.0 / y)
7503   SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
7504   return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
7505 }
7506 
7507 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
7508                           EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
7509   if (GlueChain->getNumValues() <= 1) {
7510     return DAG.getNode(Opcode, SL, VT, A, B);
7511   }
7512 
7513   assert(GlueChain->getNumValues() == 3);
7514 
7515   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
7516   switch (Opcode) {
7517   default: llvm_unreachable("no chain equivalent for opcode");
7518   case ISD::FMUL:
7519     Opcode = AMDGPUISD::FMUL_W_CHAIN;
7520     break;
7521   }
7522 
7523   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
7524                      GlueChain.getValue(2));
7525 }
7526 
7527 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
7528                            EVT VT, SDValue A, SDValue B, SDValue C,
7529                            SDValue GlueChain) {
7530   if (GlueChain->getNumValues() <= 1) {
7531     return DAG.getNode(Opcode, SL, VT, A, B, C);
7532   }
7533 
7534   assert(GlueChain->getNumValues() == 3);
7535 
7536   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
7537   switch (Opcode) {
7538   default: llvm_unreachable("no chain equivalent for opcode");
7539   case ISD::FMA:
7540     Opcode = AMDGPUISD::FMA_W_CHAIN;
7541     break;
7542   }
7543 
7544   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
7545                      GlueChain.getValue(2));
7546 }
7547 
7548 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
7549   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
7550     return FastLowered;
7551 
7552   SDLoc SL(Op);
7553   SDValue Src0 = Op.getOperand(0);
7554   SDValue Src1 = Op.getOperand(1);
7555 
7556   SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
7557   SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
7558 
7559   SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
7560   SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
7561 
7562   SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
7563   SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
7564 
7565   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
7566 }
7567 
7568 // Faster 2.5 ULP division that does not support denormals.
7569 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
7570   SDLoc SL(Op);
7571   SDValue LHS = Op.getOperand(1);
7572   SDValue RHS = Op.getOperand(2);
7573 
7574   SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
7575 
7576   const APFloat K0Val(BitsToFloat(0x6f800000));
7577   const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
7578 
7579   const APFloat K1Val(BitsToFloat(0x2f800000));
7580   const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
7581 
7582   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
7583 
7584   EVT SetCCVT =
7585     getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
7586 
7587   SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
7588 
7589   SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
7590 
7591   // TODO: Should this propagate fast-math-flags?
7592   r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
7593 
7594   // rcp does not support denormals.
7595   SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
7596 
7597   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
7598 
7599   return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
7600 }
7601 
7602 // Returns immediate value for setting the F32 denorm mode when using the
7603 // S_DENORM_MODE instruction.
7604 static const SDValue getSPDenormModeValue(int SPDenormMode, SelectionDAG &DAG,
7605                                           const SDLoc &SL, const GCNSubtarget *ST) {
7606   assert(ST->hasDenormModeInst() && "Requires S_DENORM_MODE");
7607   int DPDenormModeDefault = hasFP64FP16Denormals(DAG.getMachineFunction())
7608                                 ? FP_DENORM_FLUSH_NONE
7609                                 : FP_DENORM_FLUSH_IN_FLUSH_OUT;
7610 
7611   int Mode = SPDenormMode | (DPDenormModeDefault << 2);
7612   return DAG.getTargetConstant(Mode, SL, MVT::i32);
7613 }
7614 
7615 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
7616   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
7617     return FastLowered;
7618 
7619   SDLoc SL(Op);
7620   SDValue LHS = Op.getOperand(0);
7621   SDValue RHS = Op.getOperand(1);
7622 
7623   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
7624 
7625   SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
7626 
7627   SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7628                                           RHS, RHS, LHS);
7629   SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7630                                         LHS, RHS, LHS);
7631 
7632   // Denominator is scaled to not be denormal, so using rcp is ok.
7633   SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
7634                                   DenominatorScaled);
7635   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
7636                                      DenominatorScaled);
7637 
7638   const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
7639                                (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
7640                                (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
7641   const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
7642 
7643   const bool HasFP32Denormals = hasFP32Denormals(DAG.getMachineFunction());
7644 
7645   if (!HasFP32Denormals) {
7646     SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
7647 
7648     SDValue EnableDenorm;
7649     if (Subtarget->hasDenormModeInst()) {
7650       const SDValue EnableDenormValue =
7651           getSPDenormModeValue(FP_DENORM_FLUSH_NONE, DAG, SL, Subtarget);
7652 
7653       EnableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, BindParamVTs,
7654                                  DAG.getEntryNode(), EnableDenormValue);
7655     } else {
7656       const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
7657                                                         SL, MVT::i32);
7658       EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
7659                                  DAG.getEntryNode(), EnableDenormValue,
7660                                  BitField);
7661     }
7662 
7663     SDValue Ops[3] = {
7664       NegDivScale0,
7665       EnableDenorm.getValue(0),
7666       EnableDenorm.getValue(1)
7667     };
7668 
7669     NegDivScale0 = DAG.getMergeValues(Ops, SL);
7670   }
7671 
7672   SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
7673                              ApproxRcp, One, NegDivScale0);
7674 
7675   SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
7676                              ApproxRcp, Fma0);
7677 
7678   SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
7679                            Fma1, Fma1);
7680 
7681   SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
7682                              NumeratorScaled, Mul);
7683 
7684   SDValue Fma3 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
7685 
7686   SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
7687                              NumeratorScaled, Fma3);
7688 
7689   if (!HasFP32Denormals) {
7690     SDValue DisableDenorm;
7691     if (Subtarget->hasDenormModeInst()) {
7692       const SDValue DisableDenormValue =
7693           getSPDenormModeValue(FP_DENORM_FLUSH_IN_FLUSH_OUT, DAG, SL, Subtarget);
7694 
7695       DisableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, MVT::Other,
7696                                   Fma4.getValue(1), DisableDenormValue,
7697                                   Fma4.getValue(2));
7698     } else {
7699       const SDValue DisableDenormValue =
7700           DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
7701 
7702       DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
7703                                   Fma4.getValue(1), DisableDenormValue,
7704                                   BitField, Fma4.getValue(2));
7705     }
7706 
7707     SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
7708                                       DisableDenorm, DAG.getRoot());
7709     DAG.setRoot(OutputChain);
7710   }
7711 
7712   SDValue Scale = NumeratorScaled.getValue(1);
7713   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
7714                              Fma4, Fma1, Fma3, Scale);
7715 
7716   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
7717 }
7718 
7719 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
7720   if (DAG.getTarget().Options.UnsafeFPMath)
7721     return lowerFastUnsafeFDIV(Op, DAG);
7722 
7723   SDLoc SL(Op);
7724   SDValue X = Op.getOperand(0);
7725   SDValue Y = Op.getOperand(1);
7726 
7727   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
7728 
7729   SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
7730 
7731   SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
7732 
7733   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
7734 
7735   SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
7736 
7737   SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
7738 
7739   SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
7740 
7741   SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
7742 
7743   SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
7744 
7745   SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
7746   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
7747 
7748   SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
7749                              NegDivScale0, Mul, DivScale1);
7750 
7751   SDValue Scale;
7752 
7753   if (!Subtarget->hasUsableDivScaleConditionOutput()) {
7754     // Workaround a hardware bug on SI where the condition output from div_scale
7755     // is not usable.
7756 
7757     const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
7758 
7759     // Figure out if the scale to use for div_fmas.
7760     SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
7761     SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
7762     SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
7763     SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
7764 
7765     SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
7766     SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
7767 
7768     SDValue Scale0Hi
7769       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
7770     SDValue Scale1Hi
7771       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
7772 
7773     SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
7774     SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
7775     Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
7776   } else {
7777     Scale = DivScale1.getValue(1);
7778   }
7779 
7780   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
7781                              Fma4, Fma3, Mul, Scale);
7782 
7783   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
7784 }
7785 
7786 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
7787   EVT VT = Op.getValueType();
7788 
7789   if (VT == MVT::f32)
7790     return LowerFDIV32(Op, DAG);
7791 
7792   if (VT == MVT::f64)
7793     return LowerFDIV64(Op, DAG);
7794 
7795   if (VT == MVT::f16)
7796     return LowerFDIV16(Op, DAG);
7797 
7798   llvm_unreachable("Unexpected type for fdiv");
7799 }
7800 
7801 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7802   SDLoc DL(Op);
7803   StoreSDNode *Store = cast<StoreSDNode>(Op);
7804   EVT VT = Store->getMemoryVT();
7805 
7806   if (VT == MVT::i1) {
7807     return DAG.getTruncStore(Store->getChain(), DL,
7808        DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
7809        Store->getBasePtr(), MVT::i1, Store->getMemOperand());
7810   }
7811 
7812   assert(VT.isVector() &&
7813          Store->getValue().getValueType().getScalarType() == MVT::i32);
7814 
7815   if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
7816                                       VT, *Store->getMemOperand())) {
7817     return expandUnalignedStore(Store, DAG);
7818   }
7819 
7820   unsigned AS = Store->getAddressSpace();
7821   if (Subtarget->hasLDSMisalignedBug() &&
7822       AS == AMDGPUAS::FLAT_ADDRESS &&
7823       Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) {
7824     return SplitVectorStore(Op, DAG);
7825   }
7826 
7827   MachineFunction &MF = DAG.getMachineFunction();
7828   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
7829   // If there is a possibilty that flat instruction access scratch memory
7830   // then we need to use the same legalization rules we use for private.
7831   if (AS == AMDGPUAS::FLAT_ADDRESS &&
7832       !Subtarget->hasMultiDwordFlatScratchAddressing())
7833     AS = MFI->hasFlatScratchInit() ?
7834          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
7835 
7836   unsigned NumElements = VT.getVectorNumElements();
7837   if (AS == AMDGPUAS::GLOBAL_ADDRESS ||
7838       AS == AMDGPUAS::FLAT_ADDRESS) {
7839     if (NumElements > 4)
7840       return SplitVectorStore(Op, DAG);
7841     // v3 stores not supported on SI.
7842     if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7843       return SplitVectorStore(Op, DAG);
7844     return SDValue();
7845   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
7846     switch (Subtarget->getMaxPrivateElementSize()) {
7847     case 4:
7848       return scalarizeVectorStore(Store, DAG);
7849     case 8:
7850       if (NumElements > 2)
7851         return SplitVectorStore(Op, DAG);
7852       return SDValue();
7853     case 16:
7854       if (NumElements > 4 || NumElements == 3)
7855         return SplitVectorStore(Op, DAG);
7856       return SDValue();
7857     default:
7858       llvm_unreachable("unsupported private_element_size");
7859     }
7860   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
7861     // Use ds_write_b128 if possible.
7862     if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
7863         VT.getStoreSize() == 16 && NumElements != 3)
7864       return SDValue();
7865 
7866     if (NumElements > 2)
7867       return SplitVectorStore(Op, DAG);
7868 
7869     // SI has a hardware bug in the LDS / GDS boounds checking: if the base
7870     // address is negative, then the instruction is incorrectly treated as
7871     // out-of-bounds even if base + offsets is in bounds. Split vectorized
7872     // stores here to avoid emitting ds_write2_b32. We may re-combine the
7873     // store later in the SILoadStoreOptimizer.
7874     if (!Subtarget->hasUsableDSOffset() &&
7875         NumElements == 2 && VT.getStoreSize() == 8 &&
7876         Store->getAlignment() < 8) {
7877       return SplitVectorStore(Op, DAG);
7878     }
7879 
7880     return SDValue();
7881   } else {
7882     llvm_unreachable("unhandled address space");
7883   }
7884 }
7885 
7886 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
7887   SDLoc DL(Op);
7888   EVT VT = Op.getValueType();
7889   SDValue Arg = Op.getOperand(0);
7890   SDValue TrigVal;
7891 
7892   // TODO: Should this propagate fast-math-flags?
7893 
7894   SDValue OneOver2Pi = DAG.getConstantFP(0.5 / M_PI, DL, VT);
7895 
7896   if (Subtarget->hasTrigReducedRange()) {
7897     SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7898     TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal);
7899   } else {
7900     TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7901   }
7902 
7903   switch (Op.getOpcode()) {
7904   case ISD::FCOS:
7905     return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal);
7906   case ISD::FSIN:
7907     return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal);
7908   default:
7909     llvm_unreachable("Wrong trig opcode");
7910   }
7911 }
7912 
7913 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
7914   AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
7915   assert(AtomicNode->isCompareAndSwap());
7916   unsigned AS = AtomicNode->getAddressSpace();
7917 
7918   // No custom lowering required for local address space
7919   if (!isFlatGlobalAddrSpace(AS))
7920     return Op;
7921 
7922   // Non-local address space requires custom lowering for atomic compare
7923   // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
7924   SDLoc DL(Op);
7925   SDValue ChainIn = Op.getOperand(0);
7926   SDValue Addr = Op.getOperand(1);
7927   SDValue Old = Op.getOperand(2);
7928   SDValue New = Op.getOperand(3);
7929   EVT VT = Op.getValueType();
7930   MVT SimpleVT = VT.getSimpleVT();
7931   MVT VecType = MVT::getVectorVT(SimpleVT, 2);
7932 
7933   SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
7934   SDValue Ops[] = { ChainIn, Addr, NewOld };
7935 
7936   return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
7937                                  Ops, VT, AtomicNode->getMemOperand());
7938 }
7939 
7940 //===----------------------------------------------------------------------===//
7941 // Custom DAG optimizations
7942 //===----------------------------------------------------------------------===//
7943 
7944 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
7945                                                      DAGCombinerInfo &DCI) const {
7946   EVT VT = N->getValueType(0);
7947   EVT ScalarVT = VT.getScalarType();
7948   if (ScalarVT != MVT::f32)
7949     return SDValue();
7950 
7951   SelectionDAG &DAG = DCI.DAG;
7952   SDLoc DL(N);
7953 
7954   SDValue Src = N->getOperand(0);
7955   EVT SrcVT = Src.getValueType();
7956 
7957   // TODO: We could try to match extracting the higher bytes, which would be
7958   // easier if i8 vectors weren't promoted to i32 vectors, particularly after
7959   // types are legalized. v4i8 -> v4f32 is probably the only case to worry
7960   // about in practice.
7961   if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
7962     if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
7963       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
7964       DCI.AddToWorklist(Cvt.getNode());
7965       return Cvt;
7966     }
7967   }
7968 
7969   return SDValue();
7970 }
7971 
7972 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
7973 
7974 // This is a variant of
7975 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
7976 //
7977 // The normal DAG combiner will do this, but only if the add has one use since
7978 // that would increase the number of instructions.
7979 //
7980 // This prevents us from seeing a constant offset that can be folded into a
7981 // memory instruction's addressing mode. If we know the resulting add offset of
7982 // a pointer can be folded into an addressing offset, we can replace the pointer
7983 // operand with the add of new constant offset. This eliminates one of the uses,
7984 // and may allow the remaining use to also be simplified.
7985 //
7986 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
7987                                                unsigned AddrSpace,
7988                                                EVT MemVT,
7989                                                DAGCombinerInfo &DCI) const {
7990   SDValue N0 = N->getOperand(0);
7991   SDValue N1 = N->getOperand(1);
7992 
7993   // We only do this to handle cases where it's profitable when there are
7994   // multiple uses of the add, so defer to the standard combine.
7995   if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
7996       N0->hasOneUse())
7997     return SDValue();
7998 
7999   const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
8000   if (!CN1)
8001     return SDValue();
8002 
8003   const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
8004   if (!CAdd)
8005     return SDValue();
8006 
8007   // If the resulting offset is too large, we can't fold it into the addressing
8008   // mode offset.
8009   APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
8010   Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
8011 
8012   AddrMode AM;
8013   AM.HasBaseReg = true;
8014   AM.BaseOffs = Offset.getSExtValue();
8015   if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
8016     return SDValue();
8017 
8018   SelectionDAG &DAG = DCI.DAG;
8019   SDLoc SL(N);
8020   EVT VT = N->getValueType(0);
8021 
8022   SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
8023   SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
8024 
8025   SDNodeFlags Flags;
8026   Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
8027                           (N0.getOpcode() == ISD::OR ||
8028                            N0->getFlags().hasNoUnsignedWrap()));
8029 
8030   return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
8031 }
8032 
8033 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
8034                                                   DAGCombinerInfo &DCI) const {
8035   SDValue Ptr = N->getBasePtr();
8036   SelectionDAG &DAG = DCI.DAG;
8037   SDLoc SL(N);
8038 
8039   // TODO: We could also do this for multiplies.
8040   if (Ptr.getOpcode() == ISD::SHL) {
8041     SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(),  N->getAddressSpace(),
8042                                           N->getMemoryVT(), DCI);
8043     if (NewPtr) {
8044       SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
8045 
8046       NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
8047       return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
8048     }
8049   }
8050 
8051   return SDValue();
8052 }
8053 
8054 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
8055   return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
8056          (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
8057          (Opc == ISD::XOR && Val == 0);
8058 }
8059 
8060 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
8061 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
8062 // integer combine opportunities since most 64-bit operations are decomposed
8063 // this way.  TODO: We won't want this for SALU especially if it is an inline
8064 // immediate.
8065 SDValue SITargetLowering::splitBinaryBitConstantOp(
8066   DAGCombinerInfo &DCI,
8067   const SDLoc &SL,
8068   unsigned Opc, SDValue LHS,
8069   const ConstantSDNode *CRHS) const {
8070   uint64_t Val = CRHS->getZExtValue();
8071   uint32_t ValLo = Lo_32(Val);
8072   uint32_t ValHi = Hi_32(Val);
8073   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8074 
8075     if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
8076          bitOpWithConstantIsReducible(Opc, ValHi)) ||
8077         (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
8078     // If we need to materialize a 64-bit immediate, it will be split up later
8079     // anyway. Avoid creating the harder to understand 64-bit immediate
8080     // materialization.
8081     return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
8082   }
8083 
8084   return SDValue();
8085 }
8086 
8087 // Returns true if argument is a boolean value which is not serialized into
8088 // memory or argument and does not require v_cmdmask_b32 to be deserialized.
8089 static bool isBoolSGPR(SDValue V) {
8090   if (V.getValueType() != MVT::i1)
8091     return false;
8092   switch (V.getOpcode()) {
8093   default: break;
8094   case ISD::SETCC:
8095   case ISD::AND:
8096   case ISD::OR:
8097   case ISD::XOR:
8098   case AMDGPUISD::FP_CLASS:
8099     return true;
8100   }
8101   return false;
8102 }
8103 
8104 // If a constant has all zeroes or all ones within each byte return it.
8105 // Otherwise return 0.
8106 static uint32_t getConstantPermuteMask(uint32_t C) {
8107   // 0xff for any zero byte in the mask
8108   uint32_t ZeroByteMask = 0;
8109   if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
8110   if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
8111   if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
8112   if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
8113   uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte
8114   if ((NonZeroByteMask & C) != NonZeroByteMask)
8115     return 0; // Partial bytes selected.
8116   return C;
8117 }
8118 
8119 // Check if a node selects whole bytes from its operand 0 starting at a byte
8120 // boundary while masking the rest. Returns select mask as in the v_perm_b32
8121 // or -1 if not succeeded.
8122 // Note byte select encoding:
8123 // value 0-3 selects corresponding source byte;
8124 // value 0xc selects zero;
8125 // value 0xff selects 0xff.
8126 static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
8127   assert(V.getValueSizeInBits() == 32);
8128 
8129   if (V.getNumOperands() != 2)
8130     return ~0;
8131 
8132   ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
8133   if (!N1)
8134     return ~0;
8135 
8136   uint32_t C = N1->getZExtValue();
8137 
8138   switch (V.getOpcode()) {
8139   default:
8140     break;
8141   case ISD::AND:
8142     if (uint32_t ConstMask = getConstantPermuteMask(C)) {
8143       return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
8144     }
8145     break;
8146 
8147   case ISD::OR:
8148     if (uint32_t ConstMask = getConstantPermuteMask(C)) {
8149       return (0x03020100 & ~ConstMask) | ConstMask;
8150     }
8151     break;
8152 
8153   case ISD::SHL:
8154     if (C % 8)
8155       return ~0;
8156 
8157     return uint32_t((0x030201000c0c0c0cull << C) >> 32);
8158 
8159   case ISD::SRL:
8160     if (C % 8)
8161       return ~0;
8162 
8163     return uint32_t(0x0c0c0c0c03020100ull >> C);
8164   }
8165 
8166   return ~0;
8167 }
8168 
8169 SDValue SITargetLowering::performAndCombine(SDNode *N,
8170                                             DAGCombinerInfo &DCI) const {
8171   if (DCI.isBeforeLegalize())
8172     return SDValue();
8173 
8174   SelectionDAG &DAG = DCI.DAG;
8175   EVT VT = N->getValueType(0);
8176   SDValue LHS = N->getOperand(0);
8177   SDValue RHS = N->getOperand(1);
8178 
8179 
8180   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
8181   if (VT == MVT::i64 && CRHS) {
8182     if (SDValue Split
8183         = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
8184       return Split;
8185   }
8186 
8187   if (CRHS && VT == MVT::i32) {
8188     // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
8189     // nb = number of trailing zeroes in mask
8190     // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
8191     // given that we are selecting 8 or 16 bit fields starting at byte boundary.
8192     uint64_t Mask = CRHS->getZExtValue();
8193     unsigned Bits = countPopulation(Mask);
8194     if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
8195         (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
8196       if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
8197         unsigned Shift = CShift->getZExtValue();
8198         unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
8199         unsigned Offset = NB + Shift;
8200         if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
8201           SDLoc SL(N);
8202           SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
8203                                     LHS->getOperand(0),
8204                                     DAG.getConstant(Offset, SL, MVT::i32),
8205                                     DAG.getConstant(Bits, SL, MVT::i32));
8206           EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
8207           SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
8208                                     DAG.getValueType(NarrowVT));
8209           SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
8210                                     DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
8211           return Shl;
8212         }
8213       }
8214     }
8215 
8216     // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
8217     if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
8218         isa<ConstantSDNode>(LHS.getOperand(2))) {
8219       uint32_t Sel = getConstantPermuteMask(Mask);
8220       if (!Sel)
8221         return SDValue();
8222 
8223       // Select 0xc for all zero bytes
8224       Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
8225       SDLoc DL(N);
8226       return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
8227                          LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
8228     }
8229   }
8230 
8231   // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
8232   // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
8233   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
8234     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8235     ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
8236 
8237     SDValue X = LHS.getOperand(0);
8238     SDValue Y = RHS.getOperand(0);
8239     if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
8240       return SDValue();
8241 
8242     if (LCC == ISD::SETO) {
8243       if (X != LHS.getOperand(1))
8244         return SDValue();
8245 
8246       if (RCC == ISD::SETUNE) {
8247         const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
8248         if (!C1 || !C1->isInfinity() || C1->isNegative())
8249           return SDValue();
8250 
8251         const uint32_t Mask = SIInstrFlags::N_NORMAL |
8252                               SIInstrFlags::N_SUBNORMAL |
8253                               SIInstrFlags::N_ZERO |
8254                               SIInstrFlags::P_ZERO |
8255                               SIInstrFlags::P_SUBNORMAL |
8256                               SIInstrFlags::P_NORMAL;
8257 
8258         static_assert(((~(SIInstrFlags::S_NAN |
8259                           SIInstrFlags::Q_NAN |
8260                           SIInstrFlags::N_INFINITY |
8261                           SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
8262                       "mask not equal");
8263 
8264         SDLoc DL(N);
8265         return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
8266                            X, DAG.getConstant(Mask, DL, MVT::i32));
8267       }
8268     }
8269   }
8270 
8271   if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS)
8272     std::swap(LHS, RHS);
8273 
8274   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS &&
8275       RHS.hasOneUse()) {
8276     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8277     // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan)
8278     // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan)
8279     const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8280     if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask &&
8281         (RHS.getOperand(0) == LHS.getOperand(0) &&
8282          LHS.getOperand(0) == LHS.getOperand(1))) {
8283       const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN;
8284       unsigned NewMask = LCC == ISD::SETO ?
8285         Mask->getZExtValue() & ~OrdMask :
8286         Mask->getZExtValue() & OrdMask;
8287 
8288       SDLoc DL(N);
8289       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0),
8290                          DAG.getConstant(NewMask, DL, MVT::i32));
8291     }
8292   }
8293 
8294   if (VT == MVT::i32 &&
8295       (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
8296     // and x, (sext cc from i1) => select cc, x, 0
8297     if (RHS.getOpcode() != ISD::SIGN_EXTEND)
8298       std::swap(LHS, RHS);
8299     if (isBoolSGPR(RHS.getOperand(0)))
8300       return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
8301                            LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
8302   }
8303 
8304   // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
8305   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8306   if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
8307       N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
8308     uint32_t LHSMask = getPermuteMask(DAG, LHS);
8309     uint32_t RHSMask = getPermuteMask(DAG, RHS);
8310     if (LHSMask != ~0u && RHSMask != ~0u) {
8311       // Canonicalize the expression in an attempt to have fewer unique masks
8312       // and therefore fewer registers used to hold the masks.
8313       if (LHSMask > RHSMask) {
8314         std::swap(LHSMask, RHSMask);
8315         std::swap(LHS, RHS);
8316       }
8317 
8318       // Select 0xc for each lane used from source operand. Zero has 0xc mask
8319       // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
8320       uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8321       uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8322 
8323       // Check of we need to combine values from two sources within a byte.
8324       if (!(LHSUsedLanes & RHSUsedLanes) &&
8325           // If we select high and lower word keep it for SDWA.
8326           // TODO: teach SDWA to work with v_perm_b32 and remove the check.
8327           !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
8328         // Each byte in each mask is either selector mask 0-3, or has higher
8329         // bits set in either of masks, which can be 0xff for 0xff or 0x0c for
8330         // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise
8331         // mask which is not 0xff wins. By anding both masks we have a correct
8332         // result except that 0x0c shall be corrected to give 0x0c only.
8333         uint32_t Mask = LHSMask & RHSMask;
8334         for (unsigned I = 0; I < 32; I += 8) {
8335           uint32_t ByteSel = 0xff << I;
8336           if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
8337             Mask &= (0x0c << I) & 0xffffffff;
8338         }
8339 
8340         // Add 4 to each active LHS lane. It will not affect any existing 0xff
8341         // or 0x0c.
8342         uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
8343         SDLoc DL(N);
8344 
8345         return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
8346                            LHS.getOperand(0), RHS.getOperand(0),
8347                            DAG.getConstant(Sel, DL, MVT::i32));
8348       }
8349     }
8350   }
8351 
8352   return SDValue();
8353 }
8354 
8355 SDValue SITargetLowering::performOrCombine(SDNode *N,
8356                                            DAGCombinerInfo &DCI) const {
8357   SelectionDAG &DAG = DCI.DAG;
8358   SDValue LHS = N->getOperand(0);
8359   SDValue RHS = N->getOperand(1);
8360 
8361   EVT VT = N->getValueType(0);
8362   if (VT == MVT::i1) {
8363     // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
8364     if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
8365         RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
8366       SDValue Src = LHS.getOperand(0);
8367       if (Src != RHS.getOperand(0))
8368         return SDValue();
8369 
8370       const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
8371       const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8372       if (!CLHS || !CRHS)
8373         return SDValue();
8374 
8375       // Only 10 bits are used.
8376       static const uint32_t MaxMask = 0x3ff;
8377 
8378       uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
8379       SDLoc DL(N);
8380       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
8381                          Src, DAG.getConstant(NewMask, DL, MVT::i32));
8382     }
8383 
8384     return SDValue();
8385   }
8386 
8387   // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
8388   if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
8389       LHS.getOpcode() == AMDGPUISD::PERM &&
8390       isa<ConstantSDNode>(LHS.getOperand(2))) {
8391     uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
8392     if (!Sel)
8393       return SDValue();
8394 
8395     Sel |= LHS.getConstantOperandVal(2);
8396     SDLoc DL(N);
8397     return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
8398                        LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
8399   }
8400 
8401   // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
8402   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8403   if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
8404       N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
8405     uint32_t LHSMask = getPermuteMask(DAG, LHS);
8406     uint32_t RHSMask = getPermuteMask(DAG, RHS);
8407     if (LHSMask != ~0u && RHSMask != ~0u) {
8408       // Canonicalize the expression in an attempt to have fewer unique masks
8409       // and therefore fewer registers used to hold the masks.
8410       if (LHSMask > RHSMask) {
8411         std::swap(LHSMask, RHSMask);
8412         std::swap(LHS, RHS);
8413       }
8414 
8415       // Select 0xc for each lane used from source operand. Zero has 0xc mask
8416       // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
8417       uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8418       uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8419 
8420       // Check of we need to combine values from two sources within a byte.
8421       if (!(LHSUsedLanes & RHSUsedLanes) &&
8422           // If we select high and lower word keep it for SDWA.
8423           // TODO: teach SDWA to work with v_perm_b32 and remove the check.
8424           !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
8425         // Kill zero bytes selected by other mask. Zero value is 0xc.
8426         LHSMask &= ~RHSUsedLanes;
8427         RHSMask &= ~LHSUsedLanes;
8428         // Add 4 to each active LHS lane
8429         LHSMask |= LHSUsedLanes & 0x04040404;
8430         // Combine masks
8431         uint32_t Sel = LHSMask | RHSMask;
8432         SDLoc DL(N);
8433 
8434         return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
8435                            LHS.getOperand(0), RHS.getOperand(0),
8436                            DAG.getConstant(Sel, DL, MVT::i32));
8437       }
8438     }
8439   }
8440 
8441   if (VT != MVT::i64)
8442     return SDValue();
8443 
8444   // TODO: This could be a generic combine with a predicate for extracting the
8445   // high half of an integer being free.
8446 
8447   // (or i64:x, (zero_extend i32:y)) ->
8448   //   i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
8449   if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
8450       RHS.getOpcode() != ISD::ZERO_EXTEND)
8451     std::swap(LHS, RHS);
8452 
8453   if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
8454     SDValue ExtSrc = RHS.getOperand(0);
8455     EVT SrcVT = ExtSrc.getValueType();
8456     if (SrcVT == MVT::i32) {
8457       SDLoc SL(N);
8458       SDValue LowLHS, HiBits;
8459       std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
8460       SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
8461 
8462       DCI.AddToWorklist(LowOr.getNode());
8463       DCI.AddToWorklist(HiBits.getNode());
8464 
8465       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
8466                                 LowOr, HiBits);
8467       return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
8468     }
8469   }
8470 
8471   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
8472   if (CRHS) {
8473     if (SDValue Split
8474           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
8475       return Split;
8476   }
8477 
8478   return SDValue();
8479 }
8480 
8481 SDValue SITargetLowering::performXorCombine(SDNode *N,
8482                                             DAGCombinerInfo &DCI) const {
8483   EVT VT = N->getValueType(0);
8484   if (VT != MVT::i64)
8485     return SDValue();
8486 
8487   SDValue LHS = N->getOperand(0);
8488   SDValue RHS = N->getOperand(1);
8489 
8490   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
8491   if (CRHS) {
8492     if (SDValue Split
8493           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
8494       return Split;
8495   }
8496 
8497   return SDValue();
8498 }
8499 
8500 // Instructions that will be lowered with a final instruction that zeros the
8501 // high result bits.
8502 // XXX - probably only need to list legal operations.
8503 static bool fp16SrcZerosHighBits(unsigned Opc) {
8504   switch (Opc) {
8505   case ISD::FADD:
8506   case ISD::FSUB:
8507   case ISD::FMUL:
8508   case ISD::FDIV:
8509   case ISD::FREM:
8510   case ISD::FMA:
8511   case ISD::FMAD:
8512   case ISD::FCANONICALIZE:
8513   case ISD::FP_ROUND:
8514   case ISD::UINT_TO_FP:
8515   case ISD::SINT_TO_FP:
8516   case ISD::FABS:
8517     // Fabs is lowered to a bit operation, but it's an and which will clear the
8518     // high bits anyway.
8519   case ISD::FSQRT:
8520   case ISD::FSIN:
8521   case ISD::FCOS:
8522   case ISD::FPOWI:
8523   case ISD::FPOW:
8524   case ISD::FLOG:
8525   case ISD::FLOG2:
8526   case ISD::FLOG10:
8527   case ISD::FEXP:
8528   case ISD::FEXP2:
8529   case ISD::FCEIL:
8530   case ISD::FTRUNC:
8531   case ISD::FRINT:
8532   case ISD::FNEARBYINT:
8533   case ISD::FROUND:
8534   case ISD::FFLOOR:
8535   case ISD::FMINNUM:
8536   case ISD::FMAXNUM:
8537   case AMDGPUISD::FRACT:
8538   case AMDGPUISD::CLAMP:
8539   case AMDGPUISD::COS_HW:
8540   case AMDGPUISD::SIN_HW:
8541   case AMDGPUISD::FMIN3:
8542   case AMDGPUISD::FMAX3:
8543   case AMDGPUISD::FMED3:
8544   case AMDGPUISD::FMAD_FTZ:
8545   case AMDGPUISD::RCP:
8546   case AMDGPUISD::RSQ:
8547   case AMDGPUISD::RCP_IFLAG:
8548   case AMDGPUISD::LDEXP:
8549     return true;
8550   default:
8551     // fcopysign, select and others may be lowered to 32-bit bit operations
8552     // which don't zero the high bits.
8553     return false;
8554   }
8555 }
8556 
8557 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
8558                                                    DAGCombinerInfo &DCI) const {
8559   if (!Subtarget->has16BitInsts() ||
8560       DCI.getDAGCombineLevel() < AfterLegalizeDAG)
8561     return SDValue();
8562 
8563   EVT VT = N->getValueType(0);
8564   if (VT != MVT::i32)
8565     return SDValue();
8566 
8567   SDValue Src = N->getOperand(0);
8568   if (Src.getValueType() != MVT::i16)
8569     return SDValue();
8570 
8571   // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
8572   // FIXME: It is not universally true that the high bits are zeroed on gfx9.
8573   if (Src.getOpcode() == ISD::BITCAST) {
8574     SDValue BCSrc = Src.getOperand(0);
8575     if (BCSrc.getValueType() == MVT::f16 &&
8576         fp16SrcZerosHighBits(BCSrc.getOpcode()))
8577       return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
8578   }
8579 
8580   return SDValue();
8581 }
8582 
8583 SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N,
8584                                                         DAGCombinerInfo &DCI)
8585                                                         const {
8586   SDValue Src = N->getOperand(0);
8587   auto *VTSign = cast<VTSDNode>(N->getOperand(1));
8588 
8589   if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE &&
8590       VTSign->getVT() == MVT::i8) ||
8591       (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT &&
8592       VTSign->getVT() == MVT::i16)) &&
8593       Src.hasOneUse()) {
8594     auto *M = cast<MemSDNode>(Src);
8595     SDValue Ops[] = {
8596       Src.getOperand(0), // Chain
8597       Src.getOperand(1), // rsrc
8598       Src.getOperand(2), // vindex
8599       Src.getOperand(3), // voffset
8600       Src.getOperand(4), // soffset
8601       Src.getOperand(5), // offset
8602       Src.getOperand(6),
8603       Src.getOperand(7)
8604     };
8605     // replace with BUFFER_LOAD_BYTE/SHORT
8606     SDVTList ResList = DCI.DAG.getVTList(MVT::i32,
8607                                          Src.getOperand(0).getValueType());
8608     unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ?
8609                    AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT;
8610     SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N),
8611                                                           ResList,
8612                                                           Ops, M->getMemoryVT(),
8613                                                           M->getMemOperand());
8614     return DCI.DAG.getMergeValues({BufferLoadSignExt,
8615                                   BufferLoadSignExt.getValue(1)}, SDLoc(N));
8616   }
8617   return SDValue();
8618 }
8619 
8620 SDValue SITargetLowering::performClassCombine(SDNode *N,
8621                                               DAGCombinerInfo &DCI) const {
8622   SelectionDAG &DAG = DCI.DAG;
8623   SDValue Mask = N->getOperand(1);
8624 
8625   // fp_class x, 0 -> false
8626   if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
8627     if (CMask->isNullValue())
8628       return DAG.getConstant(0, SDLoc(N), MVT::i1);
8629   }
8630 
8631   if (N->getOperand(0).isUndef())
8632     return DAG.getUNDEF(MVT::i1);
8633 
8634   return SDValue();
8635 }
8636 
8637 SDValue SITargetLowering::performRcpCombine(SDNode *N,
8638                                             DAGCombinerInfo &DCI) const {
8639   EVT VT = N->getValueType(0);
8640   SDValue N0 = N->getOperand(0);
8641 
8642   if (N0.isUndef())
8643     return N0;
8644 
8645   if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP ||
8646                          N0.getOpcode() == ISD::SINT_TO_FP)) {
8647     return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0,
8648                            N->getFlags());
8649   }
8650 
8651   if ((VT == MVT::f32 || VT == MVT::f16) && N0.getOpcode() == ISD::FSQRT) {
8652     return DCI.DAG.getNode(AMDGPUISD::RSQ, SDLoc(N), VT,
8653                            N0.getOperand(0), N->getFlags());
8654   }
8655 
8656   return AMDGPUTargetLowering::performRcpCombine(N, DCI);
8657 }
8658 
8659 bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
8660                                        unsigned MaxDepth) const {
8661   unsigned Opcode = Op.getOpcode();
8662   if (Opcode == ISD::FCANONICALIZE)
8663     return true;
8664 
8665   if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8666     auto F = CFP->getValueAPF();
8667     if (F.isNaN() && F.isSignaling())
8668       return false;
8669     return !F.isDenormal() || denormalsEnabledForType(DAG, Op.getValueType());
8670   }
8671 
8672   // If source is a result of another standard FP operation it is already in
8673   // canonical form.
8674   if (MaxDepth == 0)
8675     return false;
8676 
8677   switch (Opcode) {
8678   // These will flush denorms if required.
8679   case ISD::FADD:
8680   case ISD::FSUB:
8681   case ISD::FMUL:
8682   case ISD::FCEIL:
8683   case ISD::FFLOOR:
8684   case ISD::FMA:
8685   case ISD::FMAD:
8686   case ISD::FSQRT:
8687   case ISD::FDIV:
8688   case ISD::FREM:
8689   case ISD::FP_ROUND:
8690   case ISD::FP_EXTEND:
8691   case AMDGPUISD::FMUL_LEGACY:
8692   case AMDGPUISD::FMAD_FTZ:
8693   case AMDGPUISD::RCP:
8694   case AMDGPUISD::RSQ:
8695   case AMDGPUISD::RSQ_CLAMP:
8696   case AMDGPUISD::RCP_LEGACY:
8697   case AMDGPUISD::RSQ_LEGACY:
8698   case AMDGPUISD::RCP_IFLAG:
8699   case AMDGPUISD::TRIG_PREOP:
8700   case AMDGPUISD::DIV_SCALE:
8701   case AMDGPUISD::DIV_FMAS:
8702   case AMDGPUISD::DIV_FIXUP:
8703   case AMDGPUISD::FRACT:
8704   case AMDGPUISD::LDEXP:
8705   case AMDGPUISD::CVT_PKRTZ_F16_F32:
8706   case AMDGPUISD::CVT_F32_UBYTE0:
8707   case AMDGPUISD::CVT_F32_UBYTE1:
8708   case AMDGPUISD::CVT_F32_UBYTE2:
8709   case AMDGPUISD::CVT_F32_UBYTE3:
8710     return true;
8711 
8712   // It can/will be lowered or combined as a bit operation.
8713   // Need to check their input recursively to handle.
8714   case ISD::FNEG:
8715   case ISD::FABS:
8716   case ISD::FCOPYSIGN:
8717     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
8718 
8719   case ISD::FSIN:
8720   case ISD::FCOS:
8721   case ISD::FSINCOS:
8722     return Op.getValueType().getScalarType() != MVT::f16;
8723 
8724   case ISD::FMINNUM:
8725   case ISD::FMAXNUM:
8726   case ISD::FMINNUM_IEEE:
8727   case ISD::FMAXNUM_IEEE:
8728   case AMDGPUISD::CLAMP:
8729   case AMDGPUISD::FMED3:
8730   case AMDGPUISD::FMAX3:
8731   case AMDGPUISD::FMIN3: {
8732     // FIXME: Shouldn't treat the generic operations different based these.
8733     // However, we aren't really required to flush the result from
8734     // minnum/maxnum..
8735 
8736     // snans will be quieted, so we only need to worry about denormals.
8737     if (Subtarget->supportsMinMaxDenormModes() ||
8738         denormalsEnabledForType(DAG, Op.getValueType()))
8739       return true;
8740 
8741     // Flushing may be required.
8742     // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such
8743     // targets need to check their input recursively.
8744 
8745     // FIXME: Does this apply with clamp? It's implemented with max.
8746     for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) {
8747       if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1))
8748         return false;
8749     }
8750 
8751     return true;
8752   }
8753   case ISD::SELECT: {
8754     return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) &&
8755            isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1);
8756   }
8757   case ISD::BUILD_VECTOR: {
8758     for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
8759       SDValue SrcOp = Op.getOperand(i);
8760       if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1))
8761         return false;
8762     }
8763 
8764     return true;
8765   }
8766   case ISD::EXTRACT_VECTOR_ELT:
8767   case ISD::EXTRACT_SUBVECTOR: {
8768     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
8769   }
8770   case ISD::INSERT_VECTOR_ELT: {
8771     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
8772            isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
8773   }
8774   case ISD::UNDEF:
8775     // Could be anything.
8776     return false;
8777 
8778   case ISD::BITCAST: {
8779     // Hack round the mess we make when legalizing extract_vector_elt
8780     SDValue Src = Op.getOperand(0);
8781     if (Src.getValueType() == MVT::i16 &&
8782         Src.getOpcode() == ISD::TRUNCATE) {
8783       SDValue TruncSrc = Src.getOperand(0);
8784       if (TruncSrc.getValueType() == MVT::i32 &&
8785           TruncSrc.getOpcode() == ISD::BITCAST &&
8786           TruncSrc.getOperand(0).getValueType() == MVT::v2f16) {
8787         return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1);
8788       }
8789     }
8790 
8791     return false;
8792   }
8793   case ISD::INTRINSIC_WO_CHAIN: {
8794     unsigned IntrinsicID
8795       = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8796     // TODO: Handle more intrinsics
8797     switch (IntrinsicID) {
8798     case Intrinsic::amdgcn_cvt_pkrtz:
8799     case Intrinsic::amdgcn_cubeid:
8800     case Intrinsic::amdgcn_frexp_mant:
8801     case Intrinsic::amdgcn_fdot2:
8802       return true;
8803     default:
8804       break;
8805     }
8806 
8807     LLVM_FALLTHROUGH;
8808   }
8809   default:
8810     return denormalsEnabledForType(DAG, Op.getValueType()) &&
8811            DAG.isKnownNeverSNaN(Op);
8812   }
8813 
8814   llvm_unreachable("invalid operation");
8815 }
8816 
8817 // Constant fold canonicalize.
8818 SDValue SITargetLowering::getCanonicalConstantFP(
8819   SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const {
8820   // Flush denormals to 0 if not enabled.
8821   if (C.isDenormal() && !denormalsEnabledForType(DAG, VT))
8822     return DAG.getConstantFP(0.0, SL, VT);
8823 
8824   if (C.isNaN()) {
8825     APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
8826     if (C.isSignaling()) {
8827       // Quiet a signaling NaN.
8828       // FIXME: Is this supposed to preserve payload bits?
8829       return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8830     }
8831 
8832     // Make sure it is the canonical NaN bitpattern.
8833     //
8834     // TODO: Can we use -1 as the canonical NaN value since it's an inline
8835     // immediate?
8836     if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
8837       return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8838   }
8839 
8840   // Already canonical.
8841   return DAG.getConstantFP(C, SL, VT);
8842 }
8843 
8844 static bool vectorEltWillFoldAway(SDValue Op) {
8845   return Op.isUndef() || isa<ConstantFPSDNode>(Op);
8846 }
8847 
8848 SDValue SITargetLowering::performFCanonicalizeCombine(
8849   SDNode *N,
8850   DAGCombinerInfo &DCI) const {
8851   SelectionDAG &DAG = DCI.DAG;
8852   SDValue N0 = N->getOperand(0);
8853   EVT VT = N->getValueType(0);
8854 
8855   // fcanonicalize undef -> qnan
8856   if (N0.isUndef()) {
8857     APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT));
8858     return DAG.getConstantFP(QNaN, SDLoc(N), VT);
8859   }
8860 
8861   if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) {
8862     EVT VT = N->getValueType(0);
8863     return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF());
8864   }
8865 
8866   // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x),
8867   //                                                   (fcanonicalize k)
8868   //
8869   // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0
8870 
8871   // TODO: This could be better with wider vectors that will be split to v2f16,
8872   // and to consider uses since there aren't that many packed operations.
8873   if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 &&
8874       isTypeLegal(MVT::v2f16)) {
8875     SDLoc SL(N);
8876     SDValue NewElts[2];
8877     SDValue Lo = N0.getOperand(0);
8878     SDValue Hi = N0.getOperand(1);
8879     EVT EltVT = Lo.getValueType();
8880 
8881     if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) {
8882       for (unsigned I = 0; I != 2; ++I) {
8883         SDValue Op = N0.getOperand(I);
8884         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8885           NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT,
8886                                               CFP->getValueAPF());
8887         } else if (Op.isUndef()) {
8888           // Handled below based on what the other operand is.
8889           NewElts[I] = Op;
8890         } else {
8891           NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op);
8892         }
8893       }
8894 
8895       // If one half is undef, and one is constant, perfer a splat vector rather
8896       // than the normal qNaN. If it's a register, prefer 0.0 since that's
8897       // cheaper to use and may be free with a packed operation.
8898       if (NewElts[0].isUndef()) {
8899         if (isa<ConstantFPSDNode>(NewElts[1]))
8900           NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ?
8901             NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT);
8902       }
8903 
8904       if (NewElts[1].isUndef()) {
8905         NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ?
8906           NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT);
8907       }
8908 
8909       return DAG.getBuildVector(VT, SL, NewElts);
8910     }
8911   }
8912 
8913   unsigned SrcOpc = N0.getOpcode();
8914 
8915   // If it's free to do so, push canonicalizes further up the source, which may
8916   // find a canonical source.
8917   //
8918   // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for
8919   // sNaNs.
8920   if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) {
8921     auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
8922     if (CRHS && N0.hasOneUse()) {
8923       SDLoc SL(N);
8924       SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT,
8925                                    N0.getOperand(0));
8926       SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF());
8927       DCI.AddToWorklist(Canon0.getNode());
8928 
8929       return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1);
8930     }
8931   }
8932 
8933   return isCanonicalized(DAG, N0) ? N0 : SDValue();
8934 }
8935 
8936 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
8937   switch (Opc) {
8938   case ISD::FMAXNUM:
8939   case ISD::FMAXNUM_IEEE:
8940     return AMDGPUISD::FMAX3;
8941   case ISD::SMAX:
8942     return AMDGPUISD::SMAX3;
8943   case ISD::UMAX:
8944     return AMDGPUISD::UMAX3;
8945   case ISD::FMINNUM:
8946   case ISD::FMINNUM_IEEE:
8947     return AMDGPUISD::FMIN3;
8948   case ISD::SMIN:
8949     return AMDGPUISD::SMIN3;
8950   case ISD::UMIN:
8951     return AMDGPUISD::UMIN3;
8952   default:
8953     llvm_unreachable("Not a min/max opcode");
8954   }
8955 }
8956 
8957 SDValue SITargetLowering::performIntMed3ImmCombine(
8958   SelectionDAG &DAG, const SDLoc &SL,
8959   SDValue Op0, SDValue Op1, bool Signed) const {
8960   ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
8961   if (!K1)
8962     return SDValue();
8963 
8964   ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
8965   if (!K0)
8966     return SDValue();
8967 
8968   if (Signed) {
8969     if (K0->getAPIntValue().sge(K1->getAPIntValue()))
8970       return SDValue();
8971   } else {
8972     if (K0->getAPIntValue().uge(K1->getAPIntValue()))
8973       return SDValue();
8974   }
8975 
8976   EVT VT = K0->getValueType(0);
8977   unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
8978   if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
8979     return DAG.getNode(Med3Opc, SL, VT,
8980                        Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
8981   }
8982 
8983   // If there isn't a 16-bit med3 operation, convert to 32-bit.
8984   MVT NVT = MVT::i32;
8985   unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
8986 
8987   SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
8988   SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
8989   SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
8990 
8991   SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
8992   return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
8993 }
8994 
8995 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
8996   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
8997     return C;
8998 
8999   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
9000     if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
9001       return C;
9002   }
9003 
9004   return nullptr;
9005 }
9006 
9007 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
9008                                                   const SDLoc &SL,
9009                                                   SDValue Op0,
9010                                                   SDValue Op1) const {
9011   ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
9012   if (!K1)
9013     return SDValue();
9014 
9015   ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
9016   if (!K0)
9017     return SDValue();
9018 
9019   // Ordered >= (although NaN inputs should have folded away by now).
9020   if (K0->getValueAPF() > K1->getValueAPF())
9021     return SDValue();
9022 
9023   const MachineFunction &MF = DAG.getMachineFunction();
9024   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
9025 
9026   // TODO: Check IEEE bit enabled?
9027   EVT VT = Op0.getValueType();
9028   if (Info->getMode().DX10Clamp) {
9029     // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
9030     // hardware fmed3 behavior converting to a min.
9031     // FIXME: Should this be allowing -0.0?
9032     if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
9033       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
9034   }
9035 
9036   // med3 for f16 is only available on gfx9+, and not available for v2f16.
9037   if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
9038     // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
9039     // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
9040     // then give the other result, which is different from med3 with a NaN
9041     // input.
9042     SDValue Var = Op0.getOperand(0);
9043     if (!DAG.isKnownNeverSNaN(Var))
9044       return SDValue();
9045 
9046     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
9047 
9048     if ((!K0->hasOneUse() ||
9049          TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) &&
9050         (!K1->hasOneUse() ||
9051          TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) {
9052       return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
9053                          Var, SDValue(K0, 0), SDValue(K1, 0));
9054     }
9055   }
9056 
9057   return SDValue();
9058 }
9059 
9060 SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
9061                                                DAGCombinerInfo &DCI) const {
9062   SelectionDAG &DAG = DCI.DAG;
9063 
9064   EVT VT = N->getValueType(0);
9065   unsigned Opc = N->getOpcode();
9066   SDValue Op0 = N->getOperand(0);
9067   SDValue Op1 = N->getOperand(1);
9068 
9069   // Only do this if the inner op has one use since this will just increases
9070   // register pressure for no benefit.
9071 
9072   if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
9073       !VT.isVector() &&
9074       (VT == MVT::i32 || VT == MVT::f32 ||
9075        ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) {
9076     // max(max(a, b), c) -> max3(a, b, c)
9077     // min(min(a, b), c) -> min3(a, b, c)
9078     if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
9079       SDLoc DL(N);
9080       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
9081                          DL,
9082                          N->getValueType(0),
9083                          Op0.getOperand(0),
9084                          Op0.getOperand(1),
9085                          Op1);
9086     }
9087 
9088     // Try commuted.
9089     // max(a, max(b, c)) -> max3(a, b, c)
9090     // min(a, min(b, c)) -> min3(a, b, c)
9091     if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
9092       SDLoc DL(N);
9093       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
9094                          DL,
9095                          N->getValueType(0),
9096                          Op0,
9097                          Op1.getOperand(0),
9098                          Op1.getOperand(1));
9099     }
9100   }
9101 
9102   // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
9103   if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
9104     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
9105       return Med3;
9106   }
9107 
9108   if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
9109     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
9110       return Med3;
9111   }
9112 
9113   // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
9114   if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
9115        (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) ||
9116        (Opc == AMDGPUISD::FMIN_LEGACY &&
9117         Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
9118       (VT == MVT::f32 || VT == MVT::f64 ||
9119        (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
9120        (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
9121       Op0.hasOneUse()) {
9122     if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
9123       return Res;
9124   }
9125 
9126   return SDValue();
9127 }
9128 
9129 static bool isClampZeroToOne(SDValue A, SDValue B) {
9130   if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
9131     if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
9132       // FIXME: Should this be allowing -0.0?
9133       return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
9134              (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
9135     }
9136   }
9137 
9138   return false;
9139 }
9140 
9141 // FIXME: Should only worry about snans for version with chain.
9142 SDValue SITargetLowering::performFMed3Combine(SDNode *N,
9143                                               DAGCombinerInfo &DCI) const {
9144   EVT VT = N->getValueType(0);
9145   // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
9146   // NaNs. With a NaN input, the order of the operands may change the result.
9147 
9148   SelectionDAG &DAG = DCI.DAG;
9149   SDLoc SL(N);
9150 
9151   SDValue Src0 = N->getOperand(0);
9152   SDValue Src1 = N->getOperand(1);
9153   SDValue Src2 = N->getOperand(2);
9154 
9155   if (isClampZeroToOne(Src0, Src1)) {
9156     // const_a, const_b, x -> clamp is safe in all cases including signaling
9157     // nans.
9158     // FIXME: Should this be allowing -0.0?
9159     return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
9160   }
9161 
9162   const MachineFunction &MF = DAG.getMachineFunction();
9163   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
9164 
9165   // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
9166   // handling no dx10-clamp?
9167   if (Info->getMode().DX10Clamp) {
9168     // If NaNs is clamped to 0, we are free to reorder the inputs.
9169 
9170     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
9171       std::swap(Src0, Src1);
9172 
9173     if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
9174       std::swap(Src1, Src2);
9175 
9176     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
9177       std::swap(Src0, Src1);
9178 
9179     if (isClampZeroToOne(Src1, Src2))
9180       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
9181   }
9182 
9183   return SDValue();
9184 }
9185 
9186 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
9187                                                  DAGCombinerInfo &DCI) const {
9188   SDValue Src0 = N->getOperand(0);
9189   SDValue Src1 = N->getOperand(1);
9190   if (Src0.isUndef() && Src1.isUndef())
9191     return DCI.DAG.getUNDEF(N->getValueType(0));
9192   return SDValue();
9193 }
9194 
9195 SDValue SITargetLowering::performExtractVectorEltCombine(
9196   SDNode *N, DAGCombinerInfo &DCI) const {
9197   SDValue Vec = N->getOperand(0);
9198   SelectionDAG &DAG = DCI.DAG;
9199 
9200   EVT VecVT = Vec.getValueType();
9201   EVT EltVT = VecVT.getVectorElementType();
9202 
9203   if ((Vec.getOpcode() == ISD::FNEG ||
9204        Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
9205     SDLoc SL(N);
9206     EVT EltVT = N->getValueType(0);
9207     SDValue Idx = N->getOperand(1);
9208     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9209                               Vec.getOperand(0), Idx);
9210     return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
9211   }
9212 
9213   // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
9214   //    =>
9215   // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
9216   // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
9217   // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
9218   if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
9219     SDLoc SL(N);
9220     EVT EltVT = N->getValueType(0);
9221     SDValue Idx = N->getOperand(1);
9222     unsigned Opc = Vec.getOpcode();
9223 
9224     switch(Opc) {
9225     default:
9226       break;
9227       // TODO: Support other binary operations.
9228     case ISD::FADD:
9229     case ISD::FSUB:
9230     case ISD::FMUL:
9231     case ISD::ADD:
9232     case ISD::UMIN:
9233     case ISD::UMAX:
9234     case ISD::SMIN:
9235     case ISD::SMAX:
9236     case ISD::FMAXNUM:
9237     case ISD::FMINNUM:
9238     case ISD::FMAXNUM_IEEE:
9239     case ISD::FMINNUM_IEEE: {
9240       SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9241                                  Vec.getOperand(0), Idx);
9242       SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9243                                  Vec.getOperand(1), Idx);
9244 
9245       DCI.AddToWorklist(Elt0.getNode());
9246       DCI.AddToWorklist(Elt1.getNode());
9247       return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags());
9248     }
9249     }
9250   }
9251 
9252   unsigned VecSize = VecVT.getSizeInBits();
9253   unsigned EltSize = EltVT.getSizeInBits();
9254 
9255   // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx)
9256   // This elminates non-constant index and subsequent movrel or scratch access.
9257   // Sub-dword vectors of size 2 dword or less have better implementation.
9258   // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
9259   // instructions.
9260   if (VecSize <= 256 && (VecSize > 64 || EltSize >= 32) &&
9261       !isa<ConstantSDNode>(N->getOperand(1))) {
9262     SDLoc SL(N);
9263     SDValue Idx = N->getOperand(1);
9264     SDValue V;
9265     for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
9266       SDValue IC = DAG.getVectorIdxConstant(I, SL);
9267       SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
9268       if (I == 0)
9269         V = Elt;
9270       else
9271         V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ);
9272     }
9273     return V;
9274   }
9275 
9276   if (!DCI.isBeforeLegalize())
9277     return SDValue();
9278 
9279   // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
9280   // elements. This exposes more load reduction opportunities by replacing
9281   // multiple small extract_vector_elements with a single 32-bit extract.
9282   auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
9283   if (isa<MemSDNode>(Vec) &&
9284       EltSize <= 16 &&
9285       EltVT.isByteSized() &&
9286       VecSize > 32 &&
9287       VecSize % 32 == 0 &&
9288       Idx) {
9289     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
9290 
9291     unsigned BitIndex = Idx->getZExtValue() * EltSize;
9292     unsigned EltIdx = BitIndex / 32;
9293     unsigned LeftoverBitIdx = BitIndex % 32;
9294     SDLoc SL(N);
9295 
9296     SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
9297     DCI.AddToWorklist(Cast.getNode());
9298 
9299     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
9300                               DAG.getConstant(EltIdx, SL, MVT::i32));
9301     DCI.AddToWorklist(Elt.getNode());
9302     SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
9303                               DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
9304     DCI.AddToWorklist(Srl.getNode());
9305 
9306     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
9307     DCI.AddToWorklist(Trunc.getNode());
9308     return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
9309   }
9310 
9311   return SDValue();
9312 }
9313 
9314 SDValue
9315 SITargetLowering::performInsertVectorEltCombine(SDNode *N,
9316                                                 DAGCombinerInfo &DCI) const {
9317   SDValue Vec = N->getOperand(0);
9318   SDValue Idx = N->getOperand(2);
9319   EVT VecVT = Vec.getValueType();
9320   EVT EltVT = VecVT.getVectorElementType();
9321   unsigned VecSize = VecVT.getSizeInBits();
9322   unsigned EltSize = EltVT.getSizeInBits();
9323 
9324   // INSERT_VECTOR_ELT (<n x e>, var-idx)
9325   // => BUILD_VECTOR n x select (e, const-idx)
9326   // This elminates non-constant index and subsequent movrel or scratch access.
9327   // Sub-dword vectors of size 2 dword or less have better implementation.
9328   // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
9329   // instructions.
9330   if (isa<ConstantSDNode>(Idx) ||
9331       VecSize > 256 || (VecSize <= 64 && EltSize < 32))
9332     return SDValue();
9333 
9334   SelectionDAG &DAG = DCI.DAG;
9335   SDLoc SL(N);
9336   SDValue Ins = N->getOperand(1);
9337   EVT IdxVT = Idx.getValueType();
9338 
9339   SmallVector<SDValue, 16> Ops;
9340   for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
9341     SDValue IC = DAG.getConstant(I, SL, IdxVT);
9342     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
9343     SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ);
9344     Ops.push_back(V);
9345   }
9346 
9347   return DAG.getBuildVector(VecVT, SL, Ops);
9348 }
9349 
9350 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
9351                                           const SDNode *N0,
9352                                           const SDNode *N1) const {
9353   EVT VT = N0->getValueType(0);
9354 
9355   // Only do this if we are not trying to support denormals. v_mad_f32 does not
9356   // support denormals ever.
9357   if (((VT == MVT::f32 && !hasFP32Denormals(DAG.getMachineFunction())) ||
9358        (VT == MVT::f16 && !hasFP64FP16Denormals(DAG.getMachineFunction()) &&
9359         getSubtarget()->hasMadF16())) &&
9360        isOperationLegal(ISD::FMAD, VT))
9361     return ISD::FMAD;
9362 
9363   const TargetOptions &Options = DAG.getTarget().Options;
9364   if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
9365        (N0->getFlags().hasAllowContract() &&
9366         N1->getFlags().hasAllowContract())) &&
9367       isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
9368     return ISD::FMA;
9369   }
9370 
9371   return 0;
9372 }
9373 
9374 // For a reassociatable opcode perform:
9375 // op x, (op y, z) -> op (op x, z), y, if x and z are uniform
9376 SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
9377                                                SelectionDAG &DAG) const {
9378   EVT VT = N->getValueType(0);
9379   if (VT != MVT::i32 && VT != MVT::i64)
9380     return SDValue();
9381 
9382   unsigned Opc = N->getOpcode();
9383   SDValue Op0 = N->getOperand(0);
9384   SDValue Op1 = N->getOperand(1);
9385 
9386   if (!(Op0->isDivergent() ^ Op1->isDivergent()))
9387     return SDValue();
9388 
9389   if (Op0->isDivergent())
9390     std::swap(Op0, Op1);
9391 
9392   if (Op1.getOpcode() != Opc || !Op1.hasOneUse())
9393     return SDValue();
9394 
9395   SDValue Op2 = Op1.getOperand(1);
9396   Op1 = Op1.getOperand(0);
9397   if (!(Op1->isDivergent() ^ Op2->isDivergent()))
9398     return SDValue();
9399 
9400   if (Op1->isDivergent())
9401     std::swap(Op1, Op2);
9402 
9403   // If either operand is constant this will conflict with
9404   // DAGCombiner::ReassociateOps().
9405   if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) ||
9406       DAG.isConstantIntBuildVectorOrConstantInt(Op1))
9407     return SDValue();
9408 
9409   SDLoc SL(N);
9410   SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1);
9411   return DAG.getNode(Opc, SL, VT, Add1, Op2);
9412 }
9413 
9414 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
9415                            EVT VT,
9416                            SDValue N0, SDValue N1, SDValue N2,
9417                            bool Signed) {
9418   unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
9419   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
9420   SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
9421   return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
9422 }
9423 
9424 SDValue SITargetLowering::performAddCombine(SDNode *N,
9425                                             DAGCombinerInfo &DCI) const {
9426   SelectionDAG &DAG = DCI.DAG;
9427   EVT VT = N->getValueType(0);
9428   SDLoc SL(N);
9429   SDValue LHS = N->getOperand(0);
9430   SDValue RHS = N->getOperand(1);
9431 
9432   if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
9433       && Subtarget->hasMad64_32() &&
9434       !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
9435       VT.getScalarSizeInBits() <= 64) {
9436     if (LHS.getOpcode() != ISD::MUL)
9437       std::swap(LHS, RHS);
9438 
9439     SDValue MulLHS = LHS.getOperand(0);
9440     SDValue MulRHS = LHS.getOperand(1);
9441     SDValue AddRHS = RHS;
9442 
9443     // TODO: Maybe restrict if SGPR inputs.
9444     if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
9445         numBitsUnsigned(MulRHS, DAG) <= 32) {
9446       MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
9447       MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
9448       AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
9449       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
9450     }
9451 
9452     if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
9453       MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
9454       MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
9455       AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
9456       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
9457     }
9458 
9459     return SDValue();
9460   }
9461 
9462   if (SDValue V = reassociateScalarOps(N, DAG)) {
9463     return V;
9464   }
9465 
9466   if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
9467     return SDValue();
9468 
9469   // add x, zext (setcc) => addcarry x, 0, setcc
9470   // add x, sext (setcc) => subcarry x, 0, setcc
9471   unsigned Opc = LHS.getOpcode();
9472   if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
9473       Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
9474     std::swap(RHS, LHS);
9475 
9476   Opc = RHS.getOpcode();
9477   switch (Opc) {
9478   default: break;
9479   case ISD::ZERO_EXTEND:
9480   case ISD::SIGN_EXTEND:
9481   case ISD::ANY_EXTEND: {
9482     auto Cond = RHS.getOperand(0);
9483     // If this won't be a real VOPC output, we would still need to insert an
9484     // extra instruction anyway.
9485     if (!isBoolSGPR(Cond))
9486       break;
9487     SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
9488     SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
9489     Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
9490     return DAG.getNode(Opc, SL, VTList, Args);
9491   }
9492   case ISD::ADDCARRY: {
9493     // add x, (addcarry y, 0, cc) => addcarry x, y, cc
9494     auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
9495     if (!C || C->getZExtValue() != 0) break;
9496     SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
9497     return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
9498   }
9499   }
9500   return SDValue();
9501 }
9502 
9503 SDValue SITargetLowering::performSubCombine(SDNode *N,
9504                                             DAGCombinerInfo &DCI) const {
9505   SelectionDAG &DAG = DCI.DAG;
9506   EVT VT = N->getValueType(0);
9507 
9508   if (VT != MVT::i32)
9509     return SDValue();
9510 
9511   SDLoc SL(N);
9512   SDValue LHS = N->getOperand(0);
9513   SDValue RHS = N->getOperand(1);
9514 
9515   // sub x, zext (setcc) => subcarry x, 0, setcc
9516   // sub x, sext (setcc) => addcarry x, 0, setcc
9517   unsigned Opc = RHS.getOpcode();
9518   switch (Opc) {
9519   default: break;
9520   case ISD::ZERO_EXTEND:
9521   case ISD::SIGN_EXTEND:
9522   case ISD::ANY_EXTEND: {
9523     auto Cond = RHS.getOperand(0);
9524     // If this won't be a real VOPC output, we would still need to insert an
9525     // extra instruction anyway.
9526     if (!isBoolSGPR(Cond))
9527       break;
9528     SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
9529     SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
9530     Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::ADDCARRY : ISD::SUBCARRY;
9531     return DAG.getNode(Opc, SL, VTList, Args);
9532   }
9533   }
9534 
9535   if (LHS.getOpcode() == ISD::SUBCARRY) {
9536     // sub (subcarry x, 0, cc), y => subcarry x, y, cc
9537     auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
9538     if (!C || !C->isNullValue())
9539       return SDValue();
9540     SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
9541     return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
9542   }
9543   return SDValue();
9544 }
9545 
9546 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
9547   DAGCombinerInfo &DCI) const {
9548 
9549   if (N->getValueType(0) != MVT::i32)
9550     return SDValue();
9551 
9552   auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
9553   if (!C || C->getZExtValue() != 0)
9554     return SDValue();
9555 
9556   SelectionDAG &DAG = DCI.DAG;
9557   SDValue LHS = N->getOperand(0);
9558 
9559   // addcarry (add x, y), 0, cc => addcarry x, y, cc
9560   // subcarry (sub x, y), 0, cc => subcarry x, y, cc
9561   unsigned LHSOpc = LHS.getOpcode();
9562   unsigned Opc = N->getOpcode();
9563   if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
9564       (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
9565     SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
9566     return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
9567   }
9568   return SDValue();
9569 }
9570 
9571 SDValue SITargetLowering::performFAddCombine(SDNode *N,
9572                                              DAGCombinerInfo &DCI) const {
9573   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9574     return SDValue();
9575 
9576   SelectionDAG &DAG = DCI.DAG;
9577   EVT VT = N->getValueType(0);
9578 
9579   SDLoc SL(N);
9580   SDValue LHS = N->getOperand(0);
9581   SDValue RHS = N->getOperand(1);
9582 
9583   // These should really be instruction patterns, but writing patterns with
9584   // source modiifiers is a pain.
9585 
9586   // fadd (fadd (a, a), b) -> mad 2.0, a, b
9587   if (LHS.getOpcode() == ISD::FADD) {
9588     SDValue A = LHS.getOperand(0);
9589     if (A == LHS.getOperand(1)) {
9590       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
9591       if (FusedOp != 0) {
9592         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9593         return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
9594       }
9595     }
9596   }
9597 
9598   // fadd (b, fadd (a, a)) -> mad 2.0, a, b
9599   if (RHS.getOpcode() == ISD::FADD) {
9600     SDValue A = RHS.getOperand(0);
9601     if (A == RHS.getOperand(1)) {
9602       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
9603       if (FusedOp != 0) {
9604         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9605         return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
9606       }
9607     }
9608   }
9609 
9610   return SDValue();
9611 }
9612 
9613 SDValue SITargetLowering::performFSubCombine(SDNode *N,
9614                                              DAGCombinerInfo &DCI) const {
9615   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9616     return SDValue();
9617 
9618   SelectionDAG &DAG = DCI.DAG;
9619   SDLoc SL(N);
9620   EVT VT = N->getValueType(0);
9621   assert(!VT.isVector());
9622 
9623   // Try to get the fneg to fold into the source modifier. This undoes generic
9624   // DAG combines and folds them into the mad.
9625   //
9626   // Only do this if we are not trying to support denormals. v_mad_f32 does
9627   // not support denormals ever.
9628   SDValue LHS = N->getOperand(0);
9629   SDValue RHS = N->getOperand(1);
9630   if (LHS.getOpcode() == ISD::FADD) {
9631     // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
9632     SDValue A = LHS.getOperand(0);
9633     if (A == LHS.getOperand(1)) {
9634       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
9635       if (FusedOp != 0){
9636         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9637         SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
9638 
9639         return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
9640       }
9641     }
9642   }
9643 
9644   if (RHS.getOpcode() == ISD::FADD) {
9645     // (fsub c, (fadd a, a)) -> mad -2.0, a, c
9646 
9647     SDValue A = RHS.getOperand(0);
9648     if (A == RHS.getOperand(1)) {
9649       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
9650       if (FusedOp != 0){
9651         const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
9652         return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
9653       }
9654     }
9655   }
9656 
9657   return SDValue();
9658 }
9659 
9660 SDValue SITargetLowering::performFMACombine(SDNode *N,
9661                                             DAGCombinerInfo &DCI) const {
9662   SelectionDAG &DAG = DCI.DAG;
9663   EVT VT = N->getValueType(0);
9664   SDLoc SL(N);
9665 
9666   if (!Subtarget->hasDot2Insts() || VT != MVT::f32)
9667     return SDValue();
9668 
9669   // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) ->
9670   //   FDOT2((V2F16)S0, (V2F16)S1, (F32)z))
9671   SDValue Op1 = N->getOperand(0);
9672   SDValue Op2 = N->getOperand(1);
9673   SDValue FMA = N->getOperand(2);
9674 
9675   if (FMA.getOpcode() != ISD::FMA ||
9676       Op1.getOpcode() != ISD::FP_EXTEND ||
9677       Op2.getOpcode() != ISD::FP_EXTEND)
9678     return SDValue();
9679 
9680   // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
9681   // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract
9682   // is sufficient to allow generaing fdot2.
9683   const TargetOptions &Options = DAG.getTarget().Options;
9684   if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
9685       (N->getFlags().hasAllowContract() &&
9686        FMA->getFlags().hasAllowContract())) {
9687     Op1 = Op1.getOperand(0);
9688     Op2 = Op2.getOperand(0);
9689     if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9690         Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9691       return SDValue();
9692 
9693     SDValue Vec1 = Op1.getOperand(0);
9694     SDValue Idx1 = Op1.getOperand(1);
9695     SDValue Vec2 = Op2.getOperand(0);
9696 
9697     SDValue FMAOp1 = FMA.getOperand(0);
9698     SDValue FMAOp2 = FMA.getOperand(1);
9699     SDValue FMAAcc = FMA.getOperand(2);
9700 
9701     if (FMAOp1.getOpcode() != ISD::FP_EXTEND ||
9702         FMAOp2.getOpcode() != ISD::FP_EXTEND)
9703       return SDValue();
9704 
9705     FMAOp1 = FMAOp1.getOperand(0);
9706     FMAOp2 = FMAOp2.getOperand(0);
9707     if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9708         FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9709       return SDValue();
9710 
9711     SDValue Vec3 = FMAOp1.getOperand(0);
9712     SDValue Vec4 = FMAOp2.getOperand(0);
9713     SDValue Idx2 = FMAOp1.getOperand(1);
9714 
9715     if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) ||
9716         // Idx1 and Idx2 cannot be the same.
9717         Idx1 == Idx2)
9718       return SDValue();
9719 
9720     if (Vec1 == Vec2 || Vec3 == Vec4)
9721       return SDValue();
9722 
9723     if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16)
9724       return SDValue();
9725 
9726     if ((Vec1 == Vec3 && Vec2 == Vec4) ||
9727         (Vec1 == Vec4 && Vec2 == Vec3)) {
9728       return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc,
9729                          DAG.getTargetConstant(0, SL, MVT::i1));
9730     }
9731   }
9732   return SDValue();
9733 }
9734 
9735 SDValue SITargetLowering::performSetCCCombine(SDNode *N,
9736                                               DAGCombinerInfo &DCI) const {
9737   SelectionDAG &DAG = DCI.DAG;
9738   SDLoc SL(N);
9739 
9740   SDValue LHS = N->getOperand(0);
9741   SDValue RHS = N->getOperand(1);
9742   EVT VT = LHS.getValueType();
9743   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
9744 
9745   auto CRHS = dyn_cast<ConstantSDNode>(RHS);
9746   if (!CRHS) {
9747     CRHS = dyn_cast<ConstantSDNode>(LHS);
9748     if (CRHS) {
9749       std::swap(LHS, RHS);
9750       CC = getSetCCSwappedOperands(CC);
9751     }
9752   }
9753 
9754   if (CRHS) {
9755     if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
9756         isBoolSGPR(LHS.getOperand(0))) {
9757       // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
9758       // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
9759       // setcc (sext from i1 cc),  0, eq|sge|ule) => not cc => xor cc, -1
9760       // setcc (sext from i1 cc),  0, ne|ugt|slt) => cc
9761       if ((CRHS->isAllOnesValue() &&
9762            (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
9763           (CRHS->isNullValue() &&
9764            (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
9765         return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9766                            DAG.getConstant(-1, SL, MVT::i1));
9767       if ((CRHS->isAllOnesValue() &&
9768            (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
9769           (CRHS->isNullValue() &&
9770            (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
9771         return LHS.getOperand(0);
9772     }
9773 
9774     uint64_t CRHSVal = CRHS->getZExtValue();
9775     if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
9776         LHS.getOpcode() == ISD::SELECT &&
9777         isa<ConstantSDNode>(LHS.getOperand(1)) &&
9778         isa<ConstantSDNode>(LHS.getOperand(2)) &&
9779         LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) &&
9780         isBoolSGPR(LHS.getOperand(0))) {
9781       // Given CT != FT:
9782       // setcc (select cc, CT, CF), CF, eq => xor cc, -1
9783       // setcc (select cc, CT, CF), CF, ne => cc
9784       // setcc (select cc, CT, CF), CT, ne => xor cc, -1
9785       // setcc (select cc, CT, CF), CT, eq => cc
9786       uint64_t CT = LHS.getConstantOperandVal(1);
9787       uint64_t CF = LHS.getConstantOperandVal(2);
9788 
9789       if ((CF == CRHSVal && CC == ISD::SETEQ) ||
9790           (CT == CRHSVal && CC == ISD::SETNE))
9791         return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9792                            DAG.getConstant(-1, SL, MVT::i1));
9793       if ((CF == CRHSVal && CC == ISD::SETNE) ||
9794           (CT == CRHSVal && CC == ISD::SETEQ))
9795         return LHS.getOperand(0);
9796     }
9797   }
9798 
9799   if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
9800                                            VT != MVT::f16))
9801     return SDValue();
9802 
9803   // Match isinf/isfinite pattern
9804   // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
9805   // (fcmp one (fabs x), inf) -> (fp_class x,
9806   // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero)
9807   if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) {
9808     const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
9809     if (!CRHS)
9810       return SDValue();
9811 
9812     const APFloat &APF = CRHS->getValueAPF();
9813     if (APF.isInfinity() && !APF.isNegative()) {
9814       const unsigned IsInfMask = SIInstrFlags::P_INFINITY |
9815                                  SIInstrFlags::N_INFINITY;
9816       const unsigned IsFiniteMask = SIInstrFlags::N_ZERO |
9817                                     SIInstrFlags::P_ZERO |
9818                                     SIInstrFlags::N_NORMAL |
9819                                     SIInstrFlags::P_NORMAL |
9820                                     SIInstrFlags::N_SUBNORMAL |
9821                                     SIInstrFlags::P_SUBNORMAL;
9822       unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask;
9823       return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
9824                          DAG.getConstant(Mask, SL, MVT::i32));
9825     }
9826   }
9827 
9828   return SDValue();
9829 }
9830 
9831 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
9832                                                      DAGCombinerInfo &DCI) const {
9833   SelectionDAG &DAG = DCI.DAG;
9834   SDLoc SL(N);
9835   unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
9836 
9837   SDValue Src = N->getOperand(0);
9838   SDValue Shift = N->getOperand(0);
9839   if (Shift.getOpcode() == ISD::ZERO_EXTEND)
9840     Shift = Shift.getOperand(0);
9841 
9842   if (Shift.getOpcode() == ISD::SRL || Shift.getOpcode() == ISD::SHL) {
9843     // cvt_f32_ubyte1 (shl x,  8) -> cvt_f32_ubyte0 x
9844     // cvt_f32_ubyte3 (shl x, 16) -> cvt_f32_ubyte1 x
9845     // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
9846     // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
9847     // cvt_f32_ubyte0 (srl x,  8) -> cvt_f32_ubyte1 x
9848     if (auto *C = dyn_cast<ConstantSDNode>(Shift.getOperand(1))) {
9849       Shift = DAG.getZExtOrTrunc(Shift.getOperand(0),
9850                                  SDLoc(Shift.getOperand(0)), MVT::i32);
9851 
9852       unsigned ShiftOffset = 8 * Offset;
9853       if (Shift.getOpcode() == ISD::SHL)
9854         ShiftOffset -= C->getZExtValue();
9855       else
9856         ShiftOffset += C->getZExtValue();
9857 
9858       if (ShiftOffset < 32 && (ShiftOffset % 8) == 0) {
9859         return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + ShiftOffset / 8, SL,
9860                            MVT::f32, Shift);
9861       }
9862     }
9863   }
9864 
9865   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9866   APInt DemandedBits = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
9867   if (TLI.SimplifyDemandedBits(Src, DemandedBits, DCI)) {
9868     // We simplified Src. If this node is not dead, visit it again so it is
9869     // folded properly.
9870     if (N->getOpcode() != ISD::DELETED_NODE)
9871       DCI.AddToWorklist(N);
9872     return SDValue(N, 0);
9873   }
9874 
9875   // Handle (or x, (srl y, 8)) pattern when known bits are zero.
9876   if (SDValue DemandedSrc =
9877           TLI.SimplifyMultipleUseDemandedBits(Src, DemandedBits, DAG))
9878     return DAG.getNode(N->getOpcode(), SL, MVT::f32, DemandedSrc);
9879 
9880   return SDValue();
9881 }
9882 
9883 SDValue SITargetLowering::performClampCombine(SDNode *N,
9884                                               DAGCombinerInfo &DCI) const {
9885   ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
9886   if (!CSrc)
9887     return SDValue();
9888 
9889   const MachineFunction &MF = DCI.DAG.getMachineFunction();
9890   const APFloat &F = CSrc->getValueAPF();
9891   APFloat Zero = APFloat::getZero(F.getSemantics());
9892   if (F < Zero ||
9893       (F.isNaN() && MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) {
9894     return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
9895   }
9896 
9897   APFloat One(F.getSemantics(), "1.0");
9898   if (F > One)
9899     return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
9900 
9901   return SDValue(CSrc, 0);
9902 }
9903 
9904 
9905 SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
9906                                             DAGCombinerInfo &DCI) const {
9907   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
9908     return SDValue();
9909   switch (N->getOpcode()) {
9910   default:
9911     return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
9912   case ISD::ADD:
9913     return performAddCombine(N, DCI);
9914   case ISD::SUB:
9915     return performSubCombine(N, DCI);
9916   case ISD::ADDCARRY:
9917   case ISD::SUBCARRY:
9918     return performAddCarrySubCarryCombine(N, DCI);
9919   case ISD::FADD:
9920     return performFAddCombine(N, DCI);
9921   case ISD::FSUB:
9922     return performFSubCombine(N, DCI);
9923   case ISD::SETCC:
9924     return performSetCCCombine(N, DCI);
9925   case ISD::FMAXNUM:
9926   case ISD::FMINNUM:
9927   case ISD::FMAXNUM_IEEE:
9928   case ISD::FMINNUM_IEEE:
9929   case ISD::SMAX:
9930   case ISD::SMIN:
9931   case ISD::UMAX:
9932   case ISD::UMIN:
9933   case AMDGPUISD::FMIN_LEGACY:
9934   case AMDGPUISD::FMAX_LEGACY:
9935     return performMinMaxCombine(N, DCI);
9936   case ISD::FMA:
9937     return performFMACombine(N, DCI);
9938   case ISD::LOAD: {
9939     if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
9940       return Widended;
9941     LLVM_FALLTHROUGH;
9942   }
9943   case ISD::STORE:
9944   case ISD::ATOMIC_LOAD:
9945   case ISD::ATOMIC_STORE:
9946   case ISD::ATOMIC_CMP_SWAP:
9947   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
9948   case ISD::ATOMIC_SWAP:
9949   case ISD::ATOMIC_LOAD_ADD:
9950   case ISD::ATOMIC_LOAD_SUB:
9951   case ISD::ATOMIC_LOAD_AND:
9952   case ISD::ATOMIC_LOAD_OR:
9953   case ISD::ATOMIC_LOAD_XOR:
9954   case ISD::ATOMIC_LOAD_NAND:
9955   case ISD::ATOMIC_LOAD_MIN:
9956   case ISD::ATOMIC_LOAD_MAX:
9957   case ISD::ATOMIC_LOAD_UMIN:
9958   case ISD::ATOMIC_LOAD_UMAX:
9959   case ISD::ATOMIC_LOAD_FADD:
9960   case AMDGPUISD::ATOMIC_INC:
9961   case AMDGPUISD::ATOMIC_DEC:
9962   case AMDGPUISD::ATOMIC_LOAD_FMIN:
9963   case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
9964     if (DCI.isBeforeLegalize())
9965       break;
9966     return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
9967   case ISD::AND:
9968     return performAndCombine(N, DCI);
9969   case ISD::OR:
9970     return performOrCombine(N, DCI);
9971   case ISD::XOR:
9972     return performXorCombine(N, DCI);
9973   case ISD::ZERO_EXTEND:
9974     return performZeroExtendCombine(N, DCI);
9975   case ISD::SIGN_EXTEND_INREG:
9976     return performSignExtendInRegCombine(N , DCI);
9977   case AMDGPUISD::FP_CLASS:
9978     return performClassCombine(N, DCI);
9979   case ISD::FCANONICALIZE:
9980     return performFCanonicalizeCombine(N, DCI);
9981   case AMDGPUISD::RCP:
9982     return performRcpCombine(N, DCI);
9983   case AMDGPUISD::FRACT:
9984   case AMDGPUISD::RSQ:
9985   case AMDGPUISD::RCP_LEGACY:
9986   case AMDGPUISD::RSQ_LEGACY:
9987   case AMDGPUISD::RCP_IFLAG:
9988   case AMDGPUISD::RSQ_CLAMP:
9989   case AMDGPUISD::LDEXP: {
9990     SDValue Src = N->getOperand(0);
9991     if (Src.isUndef())
9992       return Src;
9993     break;
9994   }
9995   case ISD::SINT_TO_FP:
9996   case ISD::UINT_TO_FP:
9997     return performUCharToFloatCombine(N, DCI);
9998   case AMDGPUISD::CVT_F32_UBYTE0:
9999   case AMDGPUISD::CVT_F32_UBYTE1:
10000   case AMDGPUISD::CVT_F32_UBYTE2:
10001   case AMDGPUISD::CVT_F32_UBYTE3:
10002     return performCvtF32UByteNCombine(N, DCI);
10003   case AMDGPUISD::FMED3:
10004     return performFMed3Combine(N, DCI);
10005   case AMDGPUISD::CVT_PKRTZ_F16_F32:
10006     return performCvtPkRTZCombine(N, DCI);
10007   case AMDGPUISD::CLAMP:
10008     return performClampCombine(N, DCI);
10009   case ISD::SCALAR_TO_VECTOR: {
10010     SelectionDAG &DAG = DCI.DAG;
10011     EVT VT = N->getValueType(0);
10012 
10013     // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
10014     if (VT == MVT::v2i16 || VT == MVT::v2f16) {
10015       SDLoc SL(N);
10016       SDValue Src = N->getOperand(0);
10017       EVT EltVT = Src.getValueType();
10018       if (EltVT == MVT::f16)
10019         Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
10020 
10021       SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
10022       return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
10023     }
10024 
10025     break;
10026   }
10027   case ISD::EXTRACT_VECTOR_ELT:
10028     return performExtractVectorEltCombine(N, DCI);
10029   case ISD::INSERT_VECTOR_ELT:
10030     return performInsertVectorEltCombine(N, DCI);
10031   }
10032   return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
10033 }
10034 
10035 /// Helper function for adjustWritemask
10036 static unsigned SubIdx2Lane(unsigned Idx) {
10037   switch (Idx) {
10038   default: return 0;
10039   case AMDGPU::sub0: return 0;
10040   case AMDGPU::sub1: return 1;
10041   case AMDGPU::sub2: return 2;
10042   case AMDGPU::sub3: return 3;
10043   case AMDGPU::sub4: return 4; // Possible with TFE/LWE
10044   }
10045 }
10046 
10047 /// Adjust the writemask of MIMG instructions
10048 SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
10049                                           SelectionDAG &DAG) const {
10050   unsigned Opcode = Node->getMachineOpcode();
10051 
10052   // Subtract 1 because the vdata output is not a MachineSDNode operand.
10053   int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1;
10054   if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
10055     return Node; // not implemented for D16
10056 
10057   SDNode *Users[5] = { nullptr };
10058   unsigned Lane = 0;
10059   unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
10060   unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
10061   unsigned NewDmask = 0;
10062   unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1;
10063   unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1;
10064   bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) ||
10065                   Node->getConstantOperandVal(LWEIdx)) ? 1 : 0;
10066   unsigned TFCLane = 0;
10067   bool HasChain = Node->getNumValues() > 1;
10068 
10069   if (OldDmask == 0) {
10070     // These are folded out, but on the chance it happens don't assert.
10071     return Node;
10072   }
10073 
10074   unsigned OldBitsSet = countPopulation(OldDmask);
10075   // Work out which is the TFE/LWE lane if that is enabled.
10076   if (UsesTFC) {
10077     TFCLane = OldBitsSet;
10078   }
10079 
10080   // Try to figure out the used register components
10081   for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
10082        I != E; ++I) {
10083 
10084     // Don't look at users of the chain.
10085     if (I.getUse().getResNo() != 0)
10086       continue;
10087 
10088     // Abort if we can't understand the usage
10089     if (!I->isMachineOpcode() ||
10090         I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
10091       return Node;
10092 
10093     // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
10094     // Note that subregs are packed, i.e. Lane==0 is the first bit set
10095     // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
10096     // set, etc.
10097     Lane = SubIdx2Lane(I->getConstantOperandVal(1));
10098 
10099     // Check if the use is for the TFE/LWE generated result at VGPRn+1.
10100     if (UsesTFC && Lane == TFCLane) {
10101       Users[Lane] = *I;
10102     } else {
10103       // Set which texture component corresponds to the lane.
10104       unsigned Comp;
10105       for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) {
10106         Comp = countTrailingZeros(Dmask);
10107         Dmask &= ~(1 << Comp);
10108       }
10109 
10110       // Abort if we have more than one user per component.
10111       if (Users[Lane])
10112         return Node;
10113 
10114       Users[Lane] = *I;
10115       NewDmask |= 1 << Comp;
10116     }
10117   }
10118 
10119   // Don't allow 0 dmask, as hardware assumes one channel enabled.
10120   bool NoChannels = !NewDmask;
10121   if (NoChannels) {
10122     if (!UsesTFC) {
10123       // No uses of the result and not using TFC. Then do nothing.
10124       return Node;
10125     }
10126     // If the original dmask has one channel - then nothing to do
10127     if (OldBitsSet == 1)
10128       return Node;
10129     // Use an arbitrary dmask - required for the instruction to work
10130     NewDmask = 1;
10131   }
10132   // Abort if there's no change
10133   if (NewDmask == OldDmask)
10134     return Node;
10135 
10136   unsigned BitsSet = countPopulation(NewDmask);
10137 
10138   // Check for TFE or LWE - increase the number of channels by one to account
10139   // for the extra return value
10140   // This will need adjustment for D16 if this is also included in
10141   // adjustWriteMask (this function) but at present D16 are excluded.
10142   unsigned NewChannels = BitsSet + UsesTFC;
10143 
10144   int NewOpcode =
10145       AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels);
10146   assert(NewOpcode != -1 &&
10147          NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
10148          "failed to find equivalent MIMG op");
10149 
10150   // Adjust the writemask in the node
10151   SmallVector<SDValue, 12> Ops;
10152   Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
10153   Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
10154   Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
10155 
10156   MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
10157 
10158   MVT ResultVT = NewChannels == 1 ?
10159     SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 :
10160                            NewChannels == 5 ? 8 : NewChannels);
10161   SDVTList NewVTList = HasChain ?
10162     DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
10163 
10164 
10165   MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
10166                                               NewVTList, Ops);
10167 
10168   if (HasChain) {
10169     // Update chain.
10170     DAG.setNodeMemRefs(NewNode, Node->memoperands());
10171     DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
10172   }
10173 
10174   if (NewChannels == 1) {
10175     assert(Node->hasNUsesOfValue(1, 0));
10176     SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
10177                                       SDLoc(Node), Users[Lane]->getValueType(0),
10178                                       SDValue(NewNode, 0));
10179     DAG.ReplaceAllUsesWith(Users[Lane], Copy);
10180     return nullptr;
10181   }
10182 
10183   // Update the users of the node with the new indices
10184   for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) {
10185     SDNode *User = Users[i];
10186     if (!User) {
10187       // Handle the special case of NoChannels. We set NewDmask to 1 above, but
10188       // Users[0] is still nullptr because channel 0 doesn't really have a use.
10189       if (i || !NoChannels)
10190         continue;
10191     } else {
10192       SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
10193       DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
10194     }
10195 
10196     switch (Idx) {
10197     default: break;
10198     case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
10199     case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
10200     case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
10201     case AMDGPU::sub3: Idx = AMDGPU::sub4; break;
10202     }
10203   }
10204 
10205   DAG.RemoveDeadNode(Node);
10206   return nullptr;
10207 }
10208 
10209 static bool isFrameIndexOp(SDValue Op) {
10210   if (Op.getOpcode() == ISD::AssertZext)
10211     Op = Op.getOperand(0);
10212 
10213   return isa<FrameIndexSDNode>(Op);
10214 }
10215 
10216 /// Legalize target independent instructions (e.g. INSERT_SUBREG)
10217 /// with frame index operands.
10218 /// LLVM assumes that inputs are to these instructions are registers.
10219 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
10220                                                         SelectionDAG &DAG) const {
10221   if (Node->getOpcode() == ISD::CopyToReg) {
10222     RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
10223     SDValue SrcVal = Node->getOperand(2);
10224 
10225     // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
10226     // to try understanding copies to physical registers.
10227     if (SrcVal.getValueType() == MVT::i1 &&
10228         Register::isPhysicalRegister(DestReg->getReg())) {
10229       SDLoc SL(Node);
10230       MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
10231       SDValue VReg = DAG.getRegister(
10232         MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
10233 
10234       SDNode *Glued = Node->getGluedNode();
10235       SDValue ToVReg
10236         = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
10237                          SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
10238       SDValue ToResultReg
10239         = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
10240                            VReg, ToVReg.getValue(1));
10241       DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
10242       DAG.RemoveDeadNode(Node);
10243       return ToResultReg.getNode();
10244     }
10245   }
10246 
10247   SmallVector<SDValue, 8> Ops;
10248   for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
10249     if (!isFrameIndexOp(Node->getOperand(i))) {
10250       Ops.push_back(Node->getOperand(i));
10251       continue;
10252     }
10253 
10254     SDLoc DL(Node);
10255     Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
10256                                      Node->getOperand(i).getValueType(),
10257                                      Node->getOperand(i)), 0));
10258   }
10259 
10260   return DAG.UpdateNodeOperands(Node, Ops);
10261 }
10262 
10263 /// Fold the instructions after selecting them.
10264 /// Returns null if users were already updated.
10265 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
10266                                           SelectionDAG &DAG) const {
10267   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10268   unsigned Opcode = Node->getMachineOpcode();
10269 
10270   if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
10271       !TII->isGather4(Opcode)) {
10272     return adjustWritemask(Node, DAG);
10273   }
10274 
10275   if (Opcode == AMDGPU::INSERT_SUBREG ||
10276       Opcode == AMDGPU::REG_SEQUENCE) {
10277     legalizeTargetIndependentNode(Node, DAG);
10278     return Node;
10279   }
10280 
10281   switch (Opcode) {
10282   case AMDGPU::V_DIV_SCALE_F32:
10283   case AMDGPU::V_DIV_SCALE_F64: {
10284     // Satisfy the operand register constraint when one of the inputs is
10285     // undefined. Ordinarily each undef value will have its own implicit_def of
10286     // a vreg, so force these to use a single register.
10287     SDValue Src0 = Node->getOperand(0);
10288     SDValue Src1 = Node->getOperand(1);
10289     SDValue Src2 = Node->getOperand(2);
10290 
10291     if ((Src0.isMachineOpcode() &&
10292          Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
10293         (Src0 == Src1 || Src0 == Src2))
10294       break;
10295 
10296     MVT VT = Src0.getValueType().getSimpleVT();
10297     const TargetRegisterClass *RC =
10298         getRegClassFor(VT, Src0.getNode()->isDivergent());
10299 
10300     MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
10301     SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
10302 
10303     SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
10304                                       UndefReg, Src0, SDValue());
10305 
10306     // src0 must be the same register as src1 or src2, even if the value is
10307     // undefined, so make sure we don't violate this constraint.
10308     if (Src0.isMachineOpcode() &&
10309         Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
10310       if (Src1.isMachineOpcode() &&
10311           Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
10312         Src0 = Src1;
10313       else if (Src2.isMachineOpcode() &&
10314                Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
10315         Src0 = Src2;
10316       else {
10317         assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
10318         Src0 = UndefReg;
10319         Src1 = UndefReg;
10320       }
10321     } else
10322       break;
10323 
10324     SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
10325     for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
10326       Ops.push_back(Node->getOperand(I));
10327 
10328     Ops.push_back(ImpDef.getValue(1));
10329     return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
10330   }
10331   default:
10332     break;
10333   }
10334 
10335   return Node;
10336 }
10337 
10338 /// Assign the register class depending on the number of
10339 /// bits set in the writemask
10340 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
10341                                                      SDNode *Node) const {
10342   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10343 
10344   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
10345 
10346   if (TII->isVOP3(MI.getOpcode())) {
10347     // Make sure constant bus requirements are respected.
10348     TII->legalizeOperandsVOP3(MRI, MI);
10349 
10350     // Prefer VGPRs over AGPRs in mAI instructions where possible.
10351     // This saves a chain-copy of registers and better ballance register
10352     // use between vgpr and agpr as agpr tuples tend to be big.
10353     if (const MCOperandInfo *OpInfo = MI.getDesc().OpInfo) {
10354       unsigned Opc = MI.getOpcode();
10355       const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
10356       for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
10357                       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) {
10358         if (I == -1)
10359           break;
10360         MachineOperand &Op = MI.getOperand(I);
10361         if ((OpInfo[I].RegClass != llvm::AMDGPU::AV_64RegClassID &&
10362              OpInfo[I].RegClass != llvm::AMDGPU::AV_32RegClassID) ||
10363             !Register::isVirtualRegister(Op.getReg()) ||
10364             !TRI->isAGPR(MRI, Op.getReg()))
10365           continue;
10366         auto *Src = MRI.getUniqueVRegDef(Op.getReg());
10367         if (!Src || !Src->isCopy() ||
10368             !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg()))
10369           continue;
10370         auto *RC = TRI->getRegClassForReg(MRI, Op.getReg());
10371         auto *NewRC = TRI->getEquivalentVGPRClass(RC);
10372         // All uses of agpr64 and agpr32 can also accept vgpr except for
10373         // v_accvgpr_read, but we do not produce agpr reads during selection,
10374         // so no use checks are needed.
10375         MRI.setRegClass(Op.getReg(), NewRC);
10376       }
10377     }
10378 
10379     return;
10380   }
10381 
10382   // Replace unused atomics with the no return version.
10383   int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
10384   if (NoRetAtomicOp != -1) {
10385     if (!Node->hasAnyUseOfValue(0)) {
10386       MI.setDesc(TII->get(NoRetAtomicOp));
10387       MI.RemoveOperand(0);
10388       return;
10389     }
10390 
10391     // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
10392     // instruction, because the return type of these instructions is a vec2 of
10393     // the memory type, so it can be tied to the input operand.
10394     // This means these instructions always have a use, so we need to add a
10395     // special case to check if the atomic has only one extract_subreg use,
10396     // which itself has no uses.
10397     if ((Node->hasNUsesOfValue(1, 0) &&
10398          Node->use_begin()->isMachineOpcode() &&
10399          Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
10400          !Node->use_begin()->hasAnyUseOfValue(0))) {
10401       Register Def = MI.getOperand(0).getReg();
10402 
10403       // Change this into a noret atomic.
10404       MI.setDesc(TII->get(NoRetAtomicOp));
10405       MI.RemoveOperand(0);
10406 
10407       // If we only remove the def operand from the atomic instruction, the
10408       // extract_subreg will be left with a use of a vreg without a def.
10409       // So we need to insert an implicit_def to avoid machine verifier
10410       // errors.
10411       BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
10412               TII->get(AMDGPU::IMPLICIT_DEF), Def);
10413     }
10414     return;
10415   }
10416 }
10417 
10418 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
10419                               uint64_t Val) {
10420   SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
10421   return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
10422 }
10423 
10424 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
10425                                                 const SDLoc &DL,
10426                                                 SDValue Ptr) const {
10427   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10428 
10429   // Build the half of the subregister with the constants before building the
10430   // full 128-bit register. If we are building multiple resource descriptors,
10431   // this will allow CSEing of the 2-component register.
10432   const SDValue Ops0[] = {
10433     DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
10434     buildSMovImm32(DAG, DL, 0),
10435     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
10436     buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
10437     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
10438   };
10439 
10440   SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
10441                                                 MVT::v2i32, Ops0), 0);
10442 
10443   // Combine the constants and the pointer.
10444   const SDValue Ops1[] = {
10445     DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32),
10446     Ptr,
10447     DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
10448     SubRegHi,
10449     DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
10450   };
10451 
10452   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
10453 }
10454 
10455 /// Return a resource descriptor with the 'Add TID' bit enabled
10456 ///        The TID (Thread ID) is multiplied by the stride value (bits [61:48]
10457 ///        of the resource descriptor) to create an offset, which is added to
10458 ///        the resource pointer.
10459 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
10460                                            SDValue Ptr, uint32_t RsrcDword1,
10461                                            uint64_t RsrcDword2And3) const {
10462   SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
10463   SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
10464   if (RsrcDword1) {
10465     PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
10466                                      DAG.getConstant(RsrcDword1, DL, MVT::i32)),
10467                     0);
10468   }
10469 
10470   SDValue DataLo = buildSMovImm32(DAG, DL,
10471                                   RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
10472   SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
10473 
10474   const SDValue Ops[] = {
10475     DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32),
10476     PtrLo,
10477     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
10478     PtrHi,
10479     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
10480     DataLo,
10481     DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
10482     DataHi,
10483     DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
10484   };
10485 
10486   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
10487 }
10488 
10489 //===----------------------------------------------------------------------===//
10490 //                         SI Inline Assembly Support
10491 //===----------------------------------------------------------------------===//
10492 
10493 std::pair<unsigned, const TargetRegisterClass *>
10494 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
10495                                                StringRef Constraint,
10496                                                MVT VT) const {
10497   const TargetRegisterClass *RC = nullptr;
10498   if (Constraint.size() == 1) {
10499     switch (Constraint[0]) {
10500     default:
10501       return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10502     case 's':
10503     case 'r':
10504       switch (VT.getSizeInBits()) {
10505       default:
10506         return std::make_pair(0U, nullptr);
10507       case 32:
10508       case 16:
10509         RC = &AMDGPU::SReg_32RegClass;
10510         break;
10511       case 64:
10512         RC = &AMDGPU::SGPR_64RegClass;
10513         break;
10514       case 96:
10515         RC = &AMDGPU::SReg_96RegClass;
10516         break;
10517       case 128:
10518         RC = &AMDGPU::SGPR_128RegClass;
10519         break;
10520       case 160:
10521         RC = &AMDGPU::SReg_160RegClass;
10522         break;
10523       case 256:
10524         RC = &AMDGPU::SReg_256RegClass;
10525         break;
10526       case 512:
10527         RC = &AMDGPU::SReg_512RegClass;
10528         break;
10529       }
10530       break;
10531     case 'v':
10532       switch (VT.getSizeInBits()) {
10533       default:
10534         return std::make_pair(0U, nullptr);
10535       case 32:
10536       case 16:
10537         RC = &AMDGPU::VGPR_32RegClass;
10538         break;
10539       case 64:
10540         RC = &AMDGPU::VReg_64RegClass;
10541         break;
10542       case 96:
10543         RC = &AMDGPU::VReg_96RegClass;
10544         break;
10545       case 128:
10546         RC = &AMDGPU::VReg_128RegClass;
10547         break;
10548       case 160:
10549         RC = &AMDGPU::VReg_160RegClass;
10550         break;
10551       case 256:
10552         RC = &AMDGPU::VReg_256RegClass;
10553         break;
10554       case 512:
10555         RC = &AMDGPU::VReg_512RegClass;
10556         break;
10557       }
10558       break;
10559     case 'a':
10560       if (!Subtarget->hasMAIInsts())
10561         break;
10562       switch (VT.getSizeInBits()) {
10563       default:
10564         return std::make_pair(0U, nullptr);
10565       case 32:
10566       case 16:
10567         RC = &AMDGPU::AGPR_32RegClass;
10568         break;
10569       case 64:
10570         RC = &AMDGPU::AReg_64RegClass;
10571         break;
10572       case 128:
10573         RC = &AMDGPU::AReg_128RegClass;
10574         break;
10575       case 512:
10576         RC = &AMDGPU::AReg_512RegClass;
10577         break;
10578       case 1024:
10579         RC = &AMDGPU::AReg_1024RegClass;
10580         // v32 types are not legal but we support them here.
10581         return std::make_pair(0U, RC);
10582       }
10583       break;
10584     }
10585     // We actually support i128, i16 and f16 as inline parameters
10586     // even if they are not reported as legal
10587     if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
10588                VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
10589       return std::make_pair(0U, RC);
10590   }
10591 
10592   if (Constraint.size() > 1) {
10593     if (Constraint[1] == 'v') {
10594       RC = &AMDGPU::VGPR_32RegClass;
10595     } else if (Constraint[1] == 's') {
10596       RC = &AMDGPU::SGPR_32RegClass;
10597     } else if (Constraint[1] == 'a') {
10598       RC = &AMDGPU::AGPR_32RegClass;
10599     }
10600 
10601     if (RC) {
10602       uint32_t Idx;
10603       bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
10604       if (!Failed && Idx < RC->getNumRegs())
10605         return std::make_pair(RC->getRegister(Idx), RC);
10606     }
10607   }
10608 
10609   // FIXME: Returns VS_32 for physical SGPR constraints
10610   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10611 }
10612 
10613 SITargetLowering::ConstraintType
10614 SITargetLowering::getConstraintType(StringRef Constraint) const {
10615   if (Constraint.size() == 1) {
10616     switch (Constraint[0]) {
10617     default: break;
10618     case 's':
10619     case 'v':
10620     case 'a':
10621       return C_RegisterClass;
10622     }
10623   }
10624   return TargetLowering::getConstraintType(Constraint);
10625 }
10626 
10627 // Figure out which registers should be reserved for stack access. Only after
10628 // the function is legalized do we know all of the non-spill stack objects or if
10629 // calls are present.
10630 void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
10631   MachineRegisterInfo &MRI = MF.getRegInfo();
10632   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10633   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
10634   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
10635 
10636   if (Info->isEntryFunction()) {
10637     // Callable functions have fixed registers used for stack access.
10638     reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
10639   }
10640 
10641   assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
10642                              Info->getStackPtrOffsetReg()));
10643   if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG)
10644     MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
10645 
10646   // We need to worry about replacing the default register with itself in case
10647   // of MIR testcases missing the MFI.
10648   if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG)
10649     MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
10650 
10651   if (Info->getFrameOffsetReg() != AMDGPU::FP_REG)
10652     MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
10653 
10654   if (Info->getScratchWaveOffsetReg() != AMDGPU::SCRATCH_WAVE_OFFSET_REG) {
10655     MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
10656                        Info->getScratchWaveOffsetReg());
10657   }
10658 
10659   Info->limitOccupancy(MF);
10660 
10661   if (ST.isWave32() && !MF.empty()) {
10662     // Add VCC_HI def because many instructions marked as imp-use VCC where
10663     // we may only define VCC_LO. If nothing defines VCC_HI we may end up
10664     // having a use of undef.
10665 
10666     const SIInstrInfo *TII = ST.getInstrInfo();
10667     DebugLoc DL;
10668 
10669     MachineBasicBlock &MBB = MF.front();
10670     MachineBasicBlock::iterator I = MBB.getFirstNonDebugInstr();
10671     BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), AMDGPU::VCC_HI);
10672 
10673     for (auto &MBB : MF) {
10674       for (auto &MI : MBB) {
10675         TII->fixImplicitOperands(MI);
10676       }
10677     }
10678   }
10679 
10680   TargetLoweringBase::finalizeLowering(MF);
10681 }
10682 
10683 void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
10684                                                      KnownBits &Known,
10685                                                      const APInt &DemandedElts,
10686                                                      const SelectionDAG &DAG,
10687                                                      unsigned Depth) const {
10688   TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
10689                                                 DAG, Depth);
10690 
10691   // Set the high bits to zero based on the maximum allowed scratch size per
10692   // wave. We can't use vaddr in MUBUF instructions if we don't know the address
10693   // calculation won't overflow, so assume the sign bit is never set.
10694   Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
10695 }
10696 
10697 Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
10698   const Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
10699   const Align CacheLineAlign = Align(64);
10700 
10701   // Pre-GFX10 target did not benefit from loop alignment
10702   if (!ML || DisableLoopAlignment ||
10703       (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
10704       getSubtarget()->hasInstFwdPrefetchBug())
10705     return PrefAlign;
10706 
10707   // On GFX10 I$ is 4 x 64 bytes cache lines.
10708   // By default prefetcher keeps one cache line behind and reads two ahead.
10709   // We can modify it with S_INST_PREFETCH for larger loops to have two lines
10710   // behind and one ahead.
10711   // Therefor we can benefit from aligning loop headers if loop fits 192 bytes.
10712   // If loop fits 64 bytes it always spans no more than two cache lines and
10713   // does not need an alignment.
10714   // Else if loop is less or equal 128 bytes we do not need to modify prefetch,
10715   // Else if loop is less or equal 192 bytes we need two lines behind.
10716 
10717   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10718   const MachineBasicBlock *Header = ML->getHeader();
10719   if (Header->getAlignment() != PrefAlign)
10720     return Header->getAlignment(); // Already processed.
10721 
10722   unsigned LoopSize = 0;
10723   for (const MachineBasicBlock *MBB : ML->blocks()) {
10724     // If inner loop block is aligned assume in average half of the alignment
10725     // size to be added as nops.
10726     if (MBB != Header)
10727       LoopSize += MBB->getAlignment().value() / 2;
10728 
10729     for (const MachineInstr &MI : *MBB) {
10730       LoopSize += TII->getInstSizeInBytes(MI);
10731       if (LoopSize > 192)
10732         return PrefAlign;
10733     }
10734   }
10735 
10736   if (LoopSize <= 64)
10737     return PrefAlign;
10738 
10739   if (LoopSize <= 128)
10740     return CacheLineAlign;
10741 
10742   // If any of parent loops is surrounded by prefetch instructions do not
10743   // insert new for inner loop, which would reset parent's settings.
10744   for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) {
10745     if (MachineBasicBlock *Exit = P->getExitBlock()) {
10746       auto I = Exit->getFirstNonDebugInstr();
10747       if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
10748         return CacheLineAlign;
10749     }
10750   }
10751 
10752   MachineBasicBlock *Pre = ML->getLoopPreheader();
10753   MachineBasicBlock *Exit = ML->getExitBlock();
10754 
10755   if (Pre && Exit) {
10756     BuildMI(*Pre, Pre->getFirstTerminator(), DebugLoc(),
10757             TII->get(AMDGPU::S_INST_PREFETCH))
10758       .addImm(1); // prefetch 2 lines behind PC
10759 
10760     BuildMI(*Exit, Exit->getFirstNonDebugInstr(), DebugLoc(),
10761             TII->get(AMDGPU::S_INST_PREFETCH))
10762       .addImm(2); // prefetch 1 line behind PC
10763   }
10764 
10765   return CacheLineAlign;
10766 }
10767 
10768 LLVM_ATTRIBUTE_UNUSED
10769 static bool isCopyFromRegOfInlineAsm(const SDNode *N) {
10770   assert(N->getOpcode() == ISD::CopyFromReg);
10771   do {
10772     // Follow the chain until we find an INLINEASM node.
10773     N = N->getOperand(0).getNode();
10774     if (N->getOpcode() == ISD::INLINEASM ||
10775         N->getOpcode() == ISD::INLINEASM_BR)
10776       return true;
10777   } while (N->getOpcode() == ISD::CopyFromReg);
10778   return false;
10779 }
10780 
10781 bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
10782   FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const
10783 {
10784   switch (N->getOpcode()) {
10785     case ISD::CopyFromReg:
10786     {
10787       const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1));
10788       const MachineFunction * MF = FLI->MF;
10789       const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
10790       const MachineRegisterInfo &MRI = MF->getRegInfo();
10791       const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
10792       unsigned Reg = R->getReg();
10793       if (Register::isPhysicalRegister(Reg))
10794         return !TRI.isSGPRReg(MRI, Reg);
10795 
10796       if (MRI.isLiveIn(Reg)) {
10797         // workitem.id.x workitem.id.y workitem.id.z
10798         // Any VGPR formal argument is also considered divergent
10799         if (!TRI.isSGPRReg(MRI, Reg))
10800           return true;
10801         // Formal arguments of non-entry functions
10802         // are conservatively considered divergent
10803         else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv()))
10804           return true;
10805         return false;
10806       }
10807       const Value *V = FLI->getValueFromVirtualReg(Reg);
10808       if (V)
10809         return KDA->isDivergent(V);
10810       assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N));
10811       return !TRI.isSGPRReg(MRI, Reg);
10812     }
10813     break;
10814     case ISD::LOAD: {
10815       const LoadSDNode *L = cast<LoadSDNode>(N);
10816       unsigned AS = L->getAddressSpace();
10817       // A flat load may access private memory.
10818       return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
10819     } break;
10820     case ISD::CALLSEQ_END:
10821     return true;
10822     break;
10823     case ISD::INTRINSIC_WO_CHAIN:
10824     {
10825 
10826     }
10827       return AMDGPU::isIntrinsicSourceOfDivergence(
10828       cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
10829     case ISD::INTRINSIC_W_CHAIN:
10830       return AMDGPU::isIntrinsicSourceOfDivergence(
10831       cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
10832   }
10833   return false;
10834 }
10835 
10836 bool SITargetLowering::denormalsEnabledForType(const SelectionDAG &DAG,
10837                                                EVT VT) const {
10838   switch (VT.getScalarType().getSimpleVT().SimpleTy) {
10839   case MVT::f32:
10840     return hasFP32Denormals(DAG.getMachineFunction());
10841   case MVT::f64:
10842   case MVT::f16:
10843     return hasFP64FP16Denormals(DAG.getMachineFunction());
10844   default:
10845     return false;
10846   }
10847 }
10848 
10849 bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
10850                                                     const SelectionDAG &DAG,
10851                                                     bool SNaN,
10852                                                     unsigned Depth) const {
10853   if (Op.getOpcode() == AMDGPUISD::CLAMP) {
10854     const MachineFunction &MF = DAG.getMachineFunction();
10855     const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10856 
10857     if (Info->getMode().DX10Clamp)
10858       return true; // Clamped to 0.
10859     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
10860   }
10861 
10862   return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG,
10863                                                             SNaN, Depth);
10864 }
10865 
10866 TargetLowering::AtomicExpansionKind
10867 SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
10868   switch (RMW->getOperation()) {
10869   case AtomicRMWInst::FAdd: {
10870     Type *Ty = RMW->getType();
10871 
10872     // We don't have a way to support 16-bit atomics now, so just leave them
10873     // as-is.
10874     if (Ty->isHalfTy())
10875       return AtomicExpansionKind::None;
10876 
10877     if (!Ty->isFloatTy())
10878       return AtomicExpansionKind::CmpXChg;
10879 
10880     // TODO: Do have these for flat. Older targets also had them for buffers.
10881     unsigned AS = RMW->getPointerAddressSpace();
10882 
10883     if (AS == AMDGPUAS::GLOBAL_ADDRESS && Subtarget->hasAtomicFaddInsts()) {
10884       return RMW->use_empty() ? AtomicExpansionKind::None :
10885                                 AtomicExpansionKind::CmpXChg;
10886     }
10887 
10888     return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ?
10889       AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg;
10890   }
10891   default:
10892     break;
10893   }
10894 
10895   return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
10896 }
10897 
10898 const TargetRegisterClass *
10899 SITargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {
10900   const TargetRegisterClass *RC = TargetLoweringBase::getRegClassFor(VT, false);
10901   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
10902   if (RC == &AMDGPU::VReg_1RegClass && !isDivergent)
10903     return Subtarget->getWavefrontSize() == 64 ? &AMDGPU::SReg_64RegClass
10904                                                : &AMDGPU::SReg_32RegClass;
10905   if (!TRI->isSGPRClass(RC) && !isDivergent)
10906     return TRI->getEquivalentSGPRClass(RC);
10907   else if (TRI->isSGPRClass(RC) && isDivergent)
10908     return TRI->getEquivalentVGPRClass(RC);
10909 
10910   return RC;
10911 }
10912 
10913 static bool hasCFUser(const Value *V, SmallPtrSet<const Value *, 16> &Visited,
10914                       unsigned WaveSize) {
10915   // FIXME: We asssume we never cast the mask results of a control flow
10916   // intrinsic.
10917   // Early exit if the type won't be consistent as a compile time hack.
10918   IntegerType *IT = dyn_cast<IntegerType>(V->getType());
10919   if (!IT || IT->getBitWidth() != WaveSize)
10920     return false;
10921 
10922   if (!isa<Instruction>(V))
10923     return false;
10924   if (!Visited.insert(V).second)
10925     return false;
10926   bool Result = false;
10927   for (auto U : V->users()) {
10928     if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(U)) {
10929       if (V == U->getOperand(1)) {
10930         switch (Intrinsic->getIntrinsicID()) {
10931         default:
10932           Result = false;
10933           break;
10934         case Intrinsic::amdgcn_if_break:
10935         case Intrinsic::amdgcn_if:
10936         case Intrinsic::amdgcn_else:
10937           Result = true;
10938           break;
10939         }
10940       }
10941       if (V == U->getOperand(0)) {
10942         switch (Intrinsic->getIntrinsicID()) {
10943         default:
10944           Result = false;
10945           break;
10946         case Intrinsic::amdgcn_end_cf:
10947         case Intrinsic::amdgcn_loop:
10948           Result = true;
10949           break;
10950         }
10951       }
10952     } else {
10953       Result = hasCFUser(U, Visited, WaveSize);
10954     }
10955     if (Result)
10956       break;
10957   }
10958   return Result;
10959 }
10960 
10961 bool SITargetLowering::requiresUniformRegister(MachineFunction &MF,
10962                                                const Value *V) const {
10963   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
10964     switch (Intrinsic->getIntrinsicID()) {
10965     default:
10966       return false;
10967     case Intrinsic::amdgcn_if_break:
10968       return true;
10969     }
10970   }
10971   if (const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V)) {
10972     if (const IntrinsicInst *Intrinsic =
10973             dyn_cast<IntrinsicInst>(ExtValue->getOperand(0))) {
10974       switch (Intrinsic->getIntrinsicID()) {
10975       default:
10976         return false;
10977       case Intrinsic::amdgcn_if:
10978       case Intrinsic::amdgcn_else: {
10979         ArrayRef<unsigned> Indices = ExtValue->getIndices();
10980         if (Indices.size() == 1 && Indices[0] == 1) {
10981           return true;
10982         }
10983       }
10984       }
10985     }
10986   }
10987   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
10988     if (isa<InlineAsm>(CI->getCalledValue())) {
10989       const SIRegisterInfo *SIRI = Subtarget->getRegisterInfo();
10990       ImmutableCallSite CS(CI);
10991       TargetLowering::AsmOperandInfoVector TargetConstraints = ParseConstraints(
10992           MF.getDataLayout(), Subtarget->getRegisterInfo(), CS);
10993       for (auto &TC : TargetConstraints) {
10994         if (TC.Type == InlineAsm::isOutput) {
10995           ComputeConstraintToUse(TC, SDValue());
10996           unsigned AssignedReg;
10997           const TargetRegisterClass *RC;
10998           std::tie(AssignedReg, RC) = getRegForInlineAsmConstraint(
10999               SIRI, TC.ConstraintCode, TC.ConstraintVT);
11000           if (RC) {
11001             MachineRegisterInfo &MRI = MF.getRegInfo();
11002             if (AssignedReg != 0 && SIRI->isSGPRReg(MRI, AssignedReg))
11003               return true;
11004             else if (SIRI->isSGPRClass(RC))
11005               return true;
11006           }
11007         }
11008       }
11009     }
11010   }
11011   SmallPtrSet<const Value *, 16> Visited;
11012   return hasCFUser(V, Visited, Subtarget->getWavefrontSize());
11013 }
11014