1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Custom DAG lowering for SI
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #if defined(_MSC_VER) || defined(__MINGW32__)
15 // Provide M_PI.
16 #define _USE_MATH_DEFINES
17 #endif
18 
19 #include "SIISelLowering.h"
20 #include "AMDGPU.h"
21 #include "AMDGPUSubtarget.h"
22 #include "AMDGPUTargetMachine.h"
23 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
24 #include "SIDefines.h"
25 #include "SIInstrInfo.h"
26 #include "SIMachineFunctionInfo.h"
27 #include "SIRegisterInfo.h"
28 #include "Utils/AMDGPUBaseInfo.h"
29 #include "llvm/ADT/APFloat.h"
30 #include "llvm/ADT/APInt.h"
31 #include "llvm/ADT/ArrayRef.h"
32 #include "llvm/ADT/BitVector.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
39 #include "llvm/CodeGen/Analysis.h"
40 #include "llvm/CodeGen/CallingConvLower.h"
41 #include "llvm/CodeGen/DAGCombine.h"
42 #include "llvm/CodeGen/ISDOpcodes.h"
43 #include "llvm/CodeGen/MachineBasicBlock.h"
44 #include "llvm/CodeGen/MachineFrameInfo.h"
45 #include "llvm/CodeGen/MachineFunction.h"
46 #include "llvm/CodeGen/MachineInstr.h"
47 #include "llvm/CodeGen/MachineInstrBuilder.h"
48 #include "llvm/CodeGen/MachineLoopInfo.h"
49 #include "llvm/CodeGen/MachineMemOperand.h"
50 #include "llvm/CodeGen/MachineModuleInfo.h"
51 #include "llvm/CodeGen/MachineOperand.h"
52 #include "llvm/CodeGen/MachineRegisterInfo.h"
53 #include "llvm/CodeGen/SelectionDAG.h"
54 #include "llvm/CodeGen/SelectionDAGNodes.h"
55 #include "llvm/CodeGen/TargetCallingConv.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/Constants.h"
59 #include "llvm/IR/DataLayout.h"
60 #include "llvm/IR/DebugLoc.h"
61 #include "llvm/IR/DerivedTypes.h"
62 #include "llvm/IR/DiagnosticInfo.h"
63 #include "llvm/IR/Function.h"
64 #include "llvm/IR/GlobalValue.h"
65 #include "llvm/IR/InstrTypes.h"
66 #include "llvm/IR/Instruction.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/IntrinsicInst.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/Support/Casting.h"
71 #include "llvm/Support/CodeGen.h"
72 #include "llvm/Support/CommandLine.h"
73 #include "llvm/Support/Compiler.h"
74 #include "llvm/Support/ErrorHandling.h"
75 #include "llvm/Support/KnownBits.h"
76 #include "llvm/Support/MachineValueType.h"
77 #include "llvm/Support/MathExtras.h"
78 #include "llvm/Target/TargetOptions.h"
79 #include <cassert>
80 #include <cmath>
81 #include <cstdint>
82 #include <iterator>
83 #include <tuple>
84 #include <utility>
85 #include <vector>
86 
87 using namespace llvm;
88 
89 #define DEBUG_TYPE "si-lower"
90 
91 STATISTIC(NumTailCalls, "Number of tail calls");
92 
93 static cl::opt<bool> DisableLoopAlignment(
94   "amdgpu-disable-loop-alignment",
95   cl::desc("Do not align and prefetch loops"),
96   cl::init(false));
97 
98 static bool hasFP32Denormals(const MachineFunction &MF) {
99   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
100   return Info->getMode().allFP32Denormals();
101 }
102 
103 static bool hasFP64FP16Denormals(const MachineFunction &MF) {
104   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
105   return Info->getMode().allFP64FP16Denormals();
106 }
107 
108 static unsigned findFirstFreeSGPR(CCState &CCInfo) {
109   unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
110   for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
111     if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
112       return AMDGPU::SGPR0 + Reg;
113     }
114   }
115   llvm_unreachable("Cannot allocate sgpr");
116 }
117 
118 SITargetLowering::SITargetLowering(const TargetMachine &TM,
119                                    const GCNSubtarget &STI)
120     : AMDGPUTargetLowering(TM, STI),
121       Subtarget(&STI) {
122   addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
123   addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
124 
125   addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass);
126   addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
127 
128   addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
129   addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
130   addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
131 
132   addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
133   addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass);
134 
135   addRegisterClass(MVT::v2i64, &AMDGPU::SGPR_128RegClass);
136   addRegisterClass(MVT::v2f64, &AMDGPU::SGPR_128RegClass);
137 
138   addRegisterClass(MVT::v4i32, &AMDGPU::SGPR_128RegClass);
139   addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
140 
141   addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
142   addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass);
143 
144   addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
145   addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
146 
147   addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
148   addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
149 
150   if (Subtarget->has16BitInsts()) {
151     addRegisterClass(MVT::i16, &AMDGPU::SReg_32RegClass);
152     addRegisterClass(MVT::f16, &AMDGPU::SReg_32RegClass);
153 
154     // Unless there are also VOP3P operations, not operations are really legal.
155     addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32RegClass);
156     addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32RegClass);
157     addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
158     addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
159   }
160 
161   if (Subtarget->hasMAIInsts()) {
162     addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass);
163     addRegisterClass(MVT::v32f32, &AMDGPU::VReg_1024RegClass);
164   }
165 
166   computeRegisterProperties(Subtarget->getRegisterInfo());
167 
168   // The boolean content concept here is too inflexible. Compares only ever
169   // really produce a 1-bit result. Any copy/extend from these will turn into a
170   // select, and zext/1 or sext/-1 are equally cheap. Arbitrarily choose 0/1, as
171   // it's what most targets use.
172   setBooleanContents(ZeroOrOneBooleanContent);
173   setBooleanVectorContents(ZeroOrOneBooleanContent);
174 
175   // We need to custom lower vector stores from local memory
176   setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
177   setOperationAction(ISD::LOAD, MVT::v3i32, Custom);
178   setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
179   setOperationAction(ISD::LOAD, MVT::v5i32, Custom);
180   setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
181   setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
182   setOperationAction(ISD::LOAD, MVT::i1, Custom);
183   setOperationAction(ISD::LOAD, MVT::v32i32, Custom);
184 
185   setOperationAction(ISD::STORE, MVT::v2i32, Custom);
186   setOperationAction(ISD::STORE, MVT::v3i32, Custom);
187   setOperationAction(ISD::STORE, MVT::v4i32, Custom);
188   setOperationAction(ISD::STORE, MVT::v5i32, Custom);
189   setOperationAction(ISD::STORE, MVT::v8i32, Custom);
190   setOperationAction(ISD::STORE, MVT::v16i32, Custom);
191   setOperationAction(ISD::STORE, MVT::i1, Custom);
192   setOperationAction(ISD::STORE, MVT::v32i32, Custom);
193 
194   setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
195   setTruncStoreAction(MVT::v3i32, MVT::v3i16, Expand);
196   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
197   setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
198   setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
199   setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
200   setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
201   setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
202   setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
203   setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
204   setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
205 
206   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
207   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
208 
209   setOperationAction(ISD::SELECT, MVT::i1, Promote);
210   setOperationAction(ISD::SELECT, MVT::i64, Custom);
211   setOperationAction(ISD::SELECT, MVT::f64, Promote);
212   AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
213 
214   setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
215   setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
216   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
217   setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
218   setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
219 
220   setOperationAction(ISD::SETCC, MVT::i1, Promote);
221   setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
222   setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
223   AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
224 
225   setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
226   setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
227 
228   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
229   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
230   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
231   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
232   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
233   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v3i16, Custom);
234   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
235   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
236 
237   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
238   setOperationAction(ISD::BR_CC, MVT::i1, Expand);
239   setOperationAction(ISD::BR_CC, MVT::i32, Expand);
240   setOperationAction(ISD::BR_CC, MVT::i64, Expand);
241   setOperationAction(ISD::BR_CC, MVT::f32, Expand);
242   setOperationAction(ISD::BR_CC, MVT::f64, Expand);
243 
244   setOperationAction(ISD::UADDO, MVT::i32, Legal);
245   setOperationAction(ISD::USUBO, MVT::i32, Legal);
246 
247   setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
248   setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
249 
250   setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
251   setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
252   setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
253 
254 #if 0
255   setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
256   setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
257 #endif
258 
259   // We only support LOAD/STORE and vector manipulation ops for vectors
260   // with > 4 elements.
261   for (MVT VT : { MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
262                   MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16,
263                   MVT::v32i32, MVT::v32f32 }) {
264     for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
265       switch (Op) {
266       case ISD::LOAD:
267       case ISD::STORE:
268       case ISD::BUILD_VECTOR:
269       case ISD::BITCAST:
270       case ISD::EXTRACT_VECTOR_ELT:
271       case ISD::INSERT_VECTOR_ELT:
272       case ISD::INSERT_SUBVECTOR:
273       case ISD::EXTRACT_SUBVECTOR:
274       case ISD::SCALAR_TO_VECTOR:
275         break;
276       case ISD::CONCAT_VECTORS:
277         setOperationAction(Op, VT, Custom);
278         break;
279       default:
280         setOperationAction(Op, VT, Expand);
281         break;
282       }
283     }
284   }
285 
286   setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
287 
288   // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
289   // is expanded to avoid having two separate loops in case the index is a VGPR.
290 
291   // Most operations are naturally 32-bit vector operations. We only support
292   // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
293   for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
294     setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
295     AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
296 
297     setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
298     AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
299 
300     setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
301     AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
302 
303     setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
304     AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
305   }
306 
307   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
308   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
309   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
310   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
311 
312   setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom);
313   setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
314 
315   // Avoid stack access for these.
316   // TODO: Generalize to more vector types.
317   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
318   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
319   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
320   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
321 
322   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
323   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
324   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom);
325   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom);
326   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom);
327 
328   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom);
329   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
330   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom);
331 
332   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom);
333   setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom);
334   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
335   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
336 
337   // Deal with vec3 vector operations when widened to vec4.
338   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom);
339   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom);
340   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom);
341   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom);
342 
343   // Deal with vec5 vector operations when widened to vec8.
344   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom);
345   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom);
346   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom);
347   setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom);
348 
349   // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
350   // and output demarshalling
351   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
352   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
353 
354   // We can't return success/failure, only the old value,
355   // let LLVM add the comparison
356   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
357   setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
358 
359   if (Subtarget->hasFlatAddressSpace()) {
360     setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
361     setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
362   }
363 
364   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
365 
366   // FIXME: This should be narrowed to i32, but that only happens if i64 is
367   // illegal.
368   setOperationAction(ISD::BSWAP, MVT::i64, Legal);
369   setOperationAction(ISD::BSWAP, MVT::i32, Legal);
370 
371   // On SI this is s_memtime and s_memrealtime on VI.
372   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
373   setOperationAction(ISD::TRAP, MVT::Other, Custom);
374   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
375 
376   if (Subtarget->has16BitInsts()) {
377     setOperationAction(ISD::FPOW, MVT::f16, Promote);
378     setOperationAction(ISD::FLOG, MVT::f16, Custom);
379     setOperationAction(ISD::FEXP, MVT::f16, Custom);
380     setOperationAction(ISD::FLOG10, MVT::f16, Custom);
381   }
382 
383   // v_mad_f32 does not support denormals. We report it as unconditionally
384   // legal, and the context where it is formed will disallow it when fp32
385   // denormals are enabled.
386   setOperationAction(ISD::FMAD, MVT::f32, Legal);
387 
388   if (!Subtarget->hasBFI()) {
389     // fcopysign can be done in a single instruction with BFI.
390     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
391     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
392   }
393 
394   if (!Subtarget->hasBCNT(32))
395     setOperationAction(ISD::CTPOP, MVT::i32, Expand);
396 
397   if (!Subtarget->hasBCNT(64))
398     setOperationAction(ISD::CTPOP, MVT::i64, Expand);
399 
400   if (Subtarget->hasFFBH())
401     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
402 
403   if (Subtarget->hasFFBL())
404     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
405 
406   // We only really have 32-bit BFE instructions (and 16-bit on VI).
407   //
408   // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
409   // effort to match them now. We want this to be false for i64 cases when the
410   // extraction isn't restricted to the upper or lower half. Ideally we would
411   // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
412   // span the midpoint are probably relatively rare, so don't worry about them
413   // for now.
414   if (Subtarget->hasBFE())
415     setHasExtractBitsInsn(true);
416 
417   setOperationAction(ISD::FMINNUM, MVT::f32, Custom);
418   setOperationAction(ISD::FMAXNUM, MVT::f32, Custom);
419   setOperationAction(ISD::FMINNUM, MVT::f64, Custom);
420   setOperationAction(ISD::FMAXNUM, MVT::f64, Custom);
421 
422 
423   // These are really only legal for ieee_mode functions. We should be avoiding
424   // them for functions that don't have ieee_mode enabled, so just say they are
425   // legal.
426   setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
427   setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
428   setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
429   setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
430 
431 
432   if (Subtarget->haveRoundOpsF64()) {
433     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
434     setOperationAction(ISD::FCEIL, MVT::f64, Legal);
435     setOperationAction(ISD::FRINT, MVT::f64, Legal);
436   } else {
437     setOperationAction(ISD::FCEIL, MVT::f64, Custom);
438     setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
439     setOperationAction(ISD::FRINT, MVT::f64, Custom);
440     setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
441   }
442 
443   setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
444 
445   setOperationAction(ISD::FSIN, MVT::f32, Custom);
446   setOperationAction(ISD::FCOS, MVT::f32, Custom);
447   setOperationAction(ISD::FDIV, MVT::f32, Custom);
448   setOperationAction(ISD::FDIV, MVT::f64, Custom);
449 
450   if (Subtarget->has16BitInsts()) {
451     setOperationAction(ISD::Constant, MVT::i16, Legal);
452 
453     setOperationAction(ISD::SMIN, MVT::i16, Legal);
454     setOperationAction(ISD::SMAX, MVT::i16, Legal);
455 
456     setOperationAction(ISD::UMIN, MVT::i16, Legal);
457     setOperationAction(ISD::UMAX, MVT::i16, Legal);
458 
459     setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
460     AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
461 
462     setOperationAction(ISD::ROTR, MVT::i16, Promote);
463     setOperationAction(ISD::ROTL, MVT::i16, Promote);
464 
465     setOperationAction(ISD::SDIV, MVT::i16, Promote);
466     setOperationAction(ISD::UDIV, MVT::i16, Promote);
467     setOperationAction(ISD::SREM, MVT::i16, Promote);
468     setOperationAction(ISD::UREM, MVT::i16, Promote);
469 
470     setOperationAction(ISD::BSWAP, MVT::i16, Promote);
471     setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
472 
473     setOperationAction(ISD::CTTZ, MVT::i16, Promote);
474     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
475     setOperationAction(ISD::CTLZ, MVT::i16, Promote);
476     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
477     setOperationAction(ISD::CTPOP, MVT::i16, Promote);
478 
479     setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
480 
481     setOperationAction(ISD::BR_CC, MVT::i16, Expand);
482 
483     setOperationAction(ISD::LOAD, MVT::i16, Custom);
484 
485     setTruncStoreAction(MVT::i64, MVT::i16, Expand);
486 
487     setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
488     AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
489     setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
490     AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
491 
492     setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
493     setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
494 
495     // F16 - Constant Actions.
496     setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
497 
498     // F16 - Load/Store Actions.
499     setOperationAction(ISD::LOAD, MVT::f16, Promote);
500     AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
501     setOperationAction(ISD::STORE, MVT::f16, Promote);
502     AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
503 
504     // F16 - VOP1 Actions.
505     setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
506     setOperationAction(ISD::FCOS, MVT::f16, Custom);
507     setOperationAction(ISD::FSIN, MVT::f16, Custom);
508 
509     setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom);
510     setOperationAction(ISD::UINT_TO_FP, MVT::i16, Custom);
511 
512     setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
513     setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
514     setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
515     setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
516     setOperationAction(ISD::FROUND, MVT::f16, Custom);
517 
518     // F16 - VOP2 Actions.
519     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
520     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
521 
522     setOperationAction(ISD::FDIV, MVT::f16, Custom);
523 
524     // F16 - VOP3 Actions.
525     setOperationAction(ISD::FMA, MVT::f16, Legal);
526     if (STI.hasMadF16())
527       setOperationAction(ISD::FMAD, MVT::f16, Legal);
528 
529     for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
530       for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
531         switch (Op) {
532         case ISD::LOAD:
533         case ISD::STORE:
534         case ISD::BUILD_VECTOR:
535         case ISD::BITCAST:
536         case ISD::EXTRACT_VECTOR_ELT:
537         case ISD::INSERT_VECTOR_ELT:
538         case ISD::INSERT_SUBVECTOR:
539         case ISD::EXTRACT_SUBVECTOR:
540         case ISD::SCALAR_TO_VECTOR:
541           break;
542         case ISD::CONCAT_VECTORS:
543           setOperationAction(Op, VT, Custom);
544           break;
545         default:
546           setOperationAction(Op, VT, Expand);
547           break;
548         }
549       }
550     }
551 
552     // XXX - Do these do anything? Vector constants turn into build_vector.
553     setOperationAction(ISD::Constant, MVT::v2i16, Legal);
554     setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
555 
556     setOperationAction(ISD::UNDEF, MVT::v2i16, Legal);
557     setOperationAction(ISD::UNDEF, MVT::v2f16, Legal);
558 
559     setOperationAction(ISD::STORE, MVT::v2i16, Promote);
560     AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
561     setOperationAction(ISD::STORE, MVT::v2f16, Promote);
562     AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
563 
564     setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
565     AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
566     setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
567     AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
568 
569     setOperationAction(ISD::AND, MVT::v2i16, Promote);
570     AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
571     setOperationAction(ISD::OR, MVT::v2i16, Promote);
572     AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
573     setOperationAction(ISD::XOR, MVT::v2i16, Promote);
574     AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
575 
576     setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
577     AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
578     setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
579     AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
580 
581     setOperationAction(ISD::STORE, MVT::v4i16, Promote);
582     AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
583     setOperationAction(ISD::STORE, MVT::v4f16, Promote);
584     AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
585 
586     setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
587     setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
588     setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
589     setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
590 
591     setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand);
592     setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand);
593     setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand);
594 
595     if (!Subtarget->hasVOP3PInsts()) {
596       setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
597       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
598     }
599 
600     setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
601     // This isn't really legal, but this avoids the legalizer unrolling it (and
602     // allows matching fneg (fabs x) patterns)
603     setOperationAction(ISD::FABS, MVT::v2f16, Legal);
604 
605     setOperationAction(ISD::FMAXNUM, MVT::f16, Custom);
606     setOperationAction(ISD::FMINNUM, MVT::f16, Custom);
607     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal);
608     setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal);
609 
610     setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom);
611     setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom);
612 
613     setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand);
614     setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand);
615   }
616 
617   if (Subtarget->hasVOP3PInsts()) {
618     setOperationAction(ISD::ADD, MVT::v2i16, Legal);
619     setOperationAction(ISD::SUB, MVT::v2i16, Legal);
620     setOperationAction(ISD::MUL, MVT::v2i16, Legal);
621     setOperationAction(ISD::SHL, MVT::v2i16, Legal);
622     setOperationAction(ISD::SRL, MVT::v2i16, Legal);
623     setOperationAction(ISD::SRA, MVT::v2i16, Legal);
624     setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
625     setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
626     setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
627     setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
628 
629     setOperationAction(ISD::FADD, MVT::v2f16, Legal);
630     setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
631     setOperationAction(ISD::FMA, MVT::v2f16, Legal);
632 
633     setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal);
634     setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal);
635 
636     setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal);
637 
638     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
639     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
640 
641     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f16, Custom);
642     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
643 
644     setOperationAction(ISD::SHL, MVT::v4i16, Custom);
645     setOperationAction(ISD::SRA, MVT::v4i16, Custom);
646     setOperationAction(ISD::SRL, MVT::v4i16, Custom);
647     setOperationAction(ISD::ADD, MVT::v4i16, Custom);
648     setOperationAction(ISD::SUB, MVT::v4i16, Custom);
649     setOperationAction(ISD::MUL, MVT::v4i16, Custom);
650 
651     setOperationAction(ISD::SMIN, MVT::v4i16, Custom);
652     setOperationAction(ISD::SMAX, MVT::v4i16, Custom);
653     setOperationAction(ISD::UMIN, MVT::v4i16, Custom);
654     setOperationAction(ISD::UMAX, MVT::v4i16, Custom);
655 
656     setOperationAction(ISD::FADD, MVT::v4f16, Custom);
657     setOperationAction(ISD::FMUL, MVT::v4f16, Custom);
658     setOperationAction(ISD::FMA, MVT::v4f16, Custom);
659 
660     setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom);
661     setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom);
662 
663     setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom);
664     setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom);
665     setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom);
666 
667     setOperationAction(ISD::FEXP, MVT::v2f16, Custom);
668     setOperationAction(ISD::SELECT, MVT::v4i16, Custom);
669     setOperationAction(ISD::SELECT, MVT::v4f16, Custom);
670   }
671 
672   setOperationAction(ISD::FNEG, MVT::v4f16, Custom);
673   setOperationAction(ISD::FABS, MVT::v4f16, Custom);
674 
675   if (Subtarget->has16BitInsts()) {
676     setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
677     AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
678     setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
679     AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
680   } else {
681     // Legalization hack.
682     setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
683     setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
684 
685     setOperationAction(ISD::FNEG, MVT::v2f16, Custom);
686     setOperationAction(ISD::FABS, MVT::v2f16, Custom);
687   }
688 
689   for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
690     setOperationAction(ISD::SELECT, VT, Custom);
691   }
692 
693   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
694   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
695   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
696   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
697   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom);
698   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
699   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
700 
701   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
702   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2i16, Custom);
703   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
704   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4i16, Custom);
705   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom);
706   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
707   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::f16, Custom);
708   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
709   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
710 
711   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
712   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
713   setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
714   setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
715   setOperationAction(ISD::INTRINSIC_VOID, MVT::v4i16, Custom);
716   setOperationAction(ISD::INTRINSIC_VOID, MVT::f16, Custom);
717   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
718   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
719 
720   setTargetDAGCombine(ISD::ADD);
721   setTargetDAGCombine(ISD::ADDCARRY);
722   setTargetDAGCombine(ISD::SUB);
723   setTargetDAGCombine(ISD::SUBCARRY);
724   setTargetDAGCombine(ISD::FADD);
725   setTargetDAGCombine(ISD::FSUB);
726   setTargetDAGCombine(ISD::FMINNUM);
727   setTargetDAGCombine(ISD::FMAXNUM);
728   setTargetDAGCombine(ISD::FMINNUM_IEEE);
729   setTargetDAGCombine(ISD::FMAXNUM_IEEE);
730   setTargetDAGCombine(ISD::FMA);
731   setTargetDAGCombine(ISD::SMIN);
732   setTargetDAGCombine(ISD::SMAX);
733   setTargetDAGCombine(ISD::UMIN);
734   setTargetDAGCombine(ISD::UMAX);
735   setTargetDAGCombine(ISD::SETCC);
736   setTargetDAGCombine(ISD::AND);
737   setTargetDAGCombine(ISD::OR);
738   setTargetDAGCombine(ISD::XOR);
739   setTargetDAGCombine(ISD::SINT_TO_FP);
740   setTargetDAGCombine(ISD::UINT_TO_FP);
741   setTargetDAGCombine(ISD::FCANONICALIZE);
742   setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
743   setTargetDAGCombine(ISD::ZERO_EXTEND);
744   setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
745   setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
746   setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
747 
748   // All memory operations. Some folding on the pointer operand is done to help
749   // matching the constant offsets in the addressing modes.
750   setTargetDAGCombine(ISD::LOAD);
751   setTargetDAGCombine(ISD::STORE);
752   setTargetDAGCombine(ISD::ATOMIC_LOAD);
753   setTargetDAGCombine(ISD::ATOMIC_STORE);
754   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
755   setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
756   setTargetDAGCombine(ISD::ATOMIC_SWAP);
757   setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
758   setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
759   setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
760   setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
761   setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
762   setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
763   setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
764   setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
765   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
766   setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
767   setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD);
768 
769   setSchedulingPreference(Sched::RegPressure);
770 }
771 
772 const GCNSubtarget *SITargetLowering::getSubtarget() const {
773   return Subtarget;
774 }
775 
776 //===----------------------------------------------------------------------===//
777 // TargetLowering queries
778 //===----------------------------------------------------------------------===//
779 
780 // v_mad_mix* support a conversion from f16 to f32.
781 //
782 // There is only one special case when denormals are enabled we don't currently,
783 // where this is OK to use.
784 bool SITargetLowering::isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
785                                        EVT DestVT, EVT SrcVT) const {
786   return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
787           (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
788     DestVT.getScalarType() == MVT::f32 &&
789     SrcVT.getScalarType() == MVT::f16 &&
790     // TODO: This probably only requires no input flushing?
791     !hasFP32Denormals(DAG.getMachineFunction());
792 }
793 
794 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
795   // SI has some legal vector types, but no legal vector operations. Say no
796   // shuffles are legal in order to prefer scalarizing some vector operations.
797   return false;
798 }
799 
800 MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
801                                                     CallingConv::ID CC,
802                                                     EVT VT) const {
803   if (CC == CallingConv::AMDGPU_KERNEL)
804     return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
805 
806   if (VT.isVector()) {
807     EVT ScalarVT = VT.getScalarType();
808     unsigned Size = ScalarVT.getSizeInBits();
809     if (Size == 32)
810       return ScalarVT.getSimpleVT();
811 
812     if (Size > 32)
813       return MVT::i32;
814 
815     if (Size == 16 && Subtarget->has16BitInsts())
816       return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
817   } else if (VT.getSizeInBits() > 32)
818     return MVT::i32;
819 
820   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
821 }
822 
823 unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
824                                                          CallingConv::ID CC,
825                                                          EVT VT) const {
826   if (CC == CallingConv::AMDGPU_KERNEL)
827     return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
828 
829   if (VT.isVector()) {
830     unsigned NumElts = VT.getVectorNumElements();
831     EVT ScalarVT = VT.getScalarType();
832     unsigned Size = ScalarVT.getSizeInBits();
833 
834     if (Size == 32)
835       return NumElts;
836 
837     if (Size > 32)
838       return NumElts * ((Size + 31) / 32);
839 
840     if (Size == 16 && Subtarget->has16BitInsts())
841       return (NumElts + 1) / 2;
842   } else if (VT.getSizeInBits() > 32)
843     return (VT.getSizeInBits() + 31) / 32;
844 
845   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
846 }
847 
848 unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
849   LLVMContext &Context, CallingConv::ID CC,
850   EVT VT, EVT &IntermediateVT,
851   unsigned &NumIntermediates, MVT &RegisterVT) const {
852   if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
853     unsigned NumElts = VT.getVectorNumElements();
854     EVT ScalarVT = VT.getScalarType();
855     unsigned Size = ScalarVT.getSizeInBits();
856     if (Size == 32) {
857       RegisterVT = ScalarVT.getSimpleVT();
858       IntermediateVT = RegisterVT;
859       NumIntermediates = NumElts;
860       return NumIntermediates;
861     }
862 
863     if (Size > 32) {
864       RegisterVT = MVT::i32;
865       IntermediateVT = RegisterVT;
866       NumIntermediates = NumElts * ((Size + 31) / 32);
867       return NumIntermediates;
868     }
869 
870     // FIXME: We should fix the ABI to be the same on targets without 16-bit
871     // support, but unless we can properly handle 3-vectors, it will be still be
872     // inconsistent.
873     if (Size == 16 && Subtarget->has16BitInsts()) {
874       RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
875       IntermediateVT = RegisterVT;
876       NumIntermediates = (NumElts + 1) / 2;
877       return NumIntermediates;
878     }
879   }
880 
881   return TargetLowering::getVectorTypeBreakdownForCallingConv(
882     Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
883 }
884 
885 // Peek through TFE struct returns to only use the data size.
886 static EVT memVTFromImageReturn(Type *Ty) {
887   auto *ST = dyn_cast<StructType>(Ty);
888   if (!ST)
889     return EVT::getEVT(Ty, true);
890 
891   // Some intrinsics return an aggregate type - special case to work out the
892   // correct memVT.
893   //
894   // Only limited forms of aggregate type currently expected.
895   if (ST->getNumContainedTypes() != 2 ||
896       !ST->getContainedType(1)->isIntegerTy(32))
897     return EVT();
898   return EVT::getEVT(ST->getContainedType(0));
899 }
900 
901 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
902                                           const CallInst &CI,
903                                           MachineFunction &MF,
904                                           unsigned IntrID) const {
905   if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
906           AMDGPU::lookupRsrcIntrinsic(IntrID)) {
907     AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
908                                                   (Intrinsic::ID)IntrID);
909     if (Attr.hasFnAttribute(Attribute::ReadNone))
910       return false;
911 
912     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
913 
914     if (RsrcIntr->IsImage) {
915       Info.ptrVal = MFI->getImagePSV(
916         *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
917         CI.getArgOperand(RsrcIntr->RsrcArg));
918       Info.align.reset();
919     } else {
920       Info.ptrVal = MFI->getBufferPSV(
921         *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
922         CI.getArgOperand(RsrcIntr->RsrcArg));
923     }
924 
925     Info.flags = MachineMemOperand::MODereferenceable;
926     if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
927       Info.opc = ISD::INTRINSIC_W_CHAIN;
928       // TODO: Account for dmask reducing loaded size.
929       Info.memVT = memVTFromImageReturn(CI.getType());
930       Info.flags |= MachineMemOperand::MOLoad;
931     } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
932       Info.opc = ISD::INTRINSIC_VOID;
933       Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
934       Info.flags |= MachineMemOperand::MOStore;
935     } else {
936       // Atomic
937       Info.opc = ISD::INTRINSIC_W_CHAIN;
938       Info.memVT = MVT::getVT(CI.getType());
939       Info.flags = MachineMemOperand::MOLoad |
940                    MachineMemOperand::MOStore |
941                    MachineMemOperand::MODereferenceable;
942 
943       // XXX - Should this be volatile without known ordering?
944       Info.flags |= MachineMemOperand::MOVolatile;
945     }
946     return true;
947   }
948 
949   switch (IntrID) {
950   case Intrinsic::amdgcn_atomic_inc:
951   case Intrinsic::amdgcn_atomic_dec:
952   case Intrinsic::amdgcn_ds_ordered_add:
953   case Intrinsic::amdgcn_ds_ordered_swap:
954   case Intrinsic::amdgcn_ds_fadd:
955   case Intrinsic::amdgcn_ds_fmin:
956   case Intrinsic::amdgcn_ds_fmax: {
957     Info.opc = ISD::INTRINSIC_W_CHAIN;
958     Info.memVT = MVT::getVT(CI.getType());
959     Info.ptrVal = CI.getOperand(0);
960     Info.align.reset();
961     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
962 
963     const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4));
964     if (!Vol->isZero())
965       Info.flags |= MachineMemOperand::MOVolatile;
966 
967     return true;
968   }
969   case Intrinsic::amdgcn_buffer_atomic_fadd: {
970     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
971 
972     Info.opc = ISD::INTRINSIC_VOID;
973     Info.memVT = MVT::getVT(CI.getOperand(0)->getType());
974     Info.ptrVal = MFI->getBufferPSV(
975       *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
976       CI.getArgOperand(1));
977     Info.align.reset();
978     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
979 
980     const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
981     if (!Vol || !Vol->isZero())
982       Info.flags |= MachineMemOperand::MOVolatile;
983 
984     return true;
985   }
986   case Intrinsic::amdgcn_global_atomic_fadd: {
987     Info.opc = ISD::INTRINSIC_VOID;
988     Info.memVT = MVT::getVT(CI.getOperand(0)->getType()
989                             ->getPointerElementType());
990     Info.ptrVal = CI.getOperand(0);
991     Info.align.reset();
992     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
993 
994     return true;
995   }
996   case Intrinsic::amdgcn_ds_append:
997   case Intrinsic::amdgcn_ds_consume: {
998     Info.opc = ISD::INTRINSIC_W_CHAIN;
999     Info.memVT = MVT::getVT(CI.getType());
1000     Info.ptrVal = CI.getOperand(0);
1001     Info.align.reset();
1002     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1003 
1004     const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1));
1005     if (!Vol->isZero())
1006       Info.flags |= MachineMemOperand::MOVolatile;
1007 
1008     return true;
1009   }
1010   case Intrinsic::amdgcn_ds_gws_init:
1011   case Intrinsic::amdgcn_ds_gws_barrier:
1012   case Intrinsic::amdgcn_ds_gws_sema_v:
1013   case Intrinsic::amdgcn_ds_gws_sema_br:
1014   case Intrinsic::amdgcn_ds_gws_sema_p:
1015   case Intrinsic::amdgcn_ds_gws_sema_release_all: {
1016     Info.opc = ISD::INTRINSIC_VOID;
1017 
1018     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1019     Info.ptrVal =
1020         MFI->getGWSPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo());
1021 
1022     // This is an abstract access, but we need to specify a type and size.
1023     Info.memVT = MVT::i32;
1024     Info.size = 4;
1025     Info.align = Align(4);
1026 
1027     Info.flags = MachineMemOperand::MOStore;
1028     if (IntrID == Intrinsic::amdgcn_ds_gws_barrier)
1029       Info.flags = MachineMemOperand::MOLoad;
1030     return true;
1031   }
1032   default:
1033     return false;
1034   }
1035 }
1036 
1037 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
1038                                             SmallVectorImpl<Value*> &Ops,
1039                                             Type *&AccessTy) const {
1040   switch (II->getIntrinsicID()) {
1041   case Intrinsic::amdgcn_atomic_inc:
1042   case Intrinsic::amdgcn_atomic_dec:
1043   case Intrinsic::amdgcn_ds_ordered_add:
1044   case Intrinsic::amdgcn_ds_ordered_swap:
1045   case Intrinsic::amdgcn_ds_fadd:
1046   case Intrinsic::amdgcn_ds_fmin:
1047   case Intrinsic::amdgcn_ds_fmax: {
1048     Value *Ptr = II->getArgOperand(0);
1049     AccessTy = II->getType();
1050     Ops.push_back(Ptr);
1051     return true;
1052   }
1053   default:
1054     return false;
1055   }
1056 }
1057 
1058 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
1059   if (!Subtarget->hasFlatInstOffsets()) {
1060     // Flat instructions do not have offsets, and only have the register
1061     // address.
1062     return AM.BaseOffs == 0 && AM.Scale == 0;
1063   }
1064 
1065   return AM.Scale == 0 &&
1066          (AM.BaseOffs == 0 || Subtarget->getInstrInfo()->isLegalFLATOffset(
1067                                   AM.BaseOffs, AMDGPUAS::FLAT_ADDRESS,
1068                                   /*Signed=*/false));
1069 }
1070 
1071 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
1072   if (Subtarget->hasFlatGlobalInsts())
1073     return AM.Scale == 0 &&
1074            (AM.BaseOffs == 0 || Subtarget->getInstrInfo()->isLegalFLATOffset(
1075                                     AM.BaseOffs, AMDGPUAS::GLOBAL_ADDRESS,
1076                                     /*Signed=*/true));
1077 
1078   if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
1079       // Assume the we will use FLAT for all global memory accesses
1080       // on VI.
1081       // FIXME: This assumption is currently wrong.  On VI we still use
1082       // MUBUF instructions for the r + i addressing mode.  As currently
1083       // implemented, the MUBUF instructions only work on buffer < 4GB.
1084       // It may be possible to support > 4GB buffers with MUBUF instructions,
1085       // by setting the stride value in the resource descriptor which would
1086       // increase the size limit to (stride * 4GB).  However, this is risky,
1087       // because it has never been validated.
1088     return isLegalFlatAddressingMode(AM);
1089   }
1090 
1091   return isLegalMUBUFAddressingMode(AM);
1092 }
1093 
1094 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
1095   // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
1096   // additionally can do r + r + i with addr64. 32-bit has more addressing
1097   // mode options. Depending on the resource constant, it can also do
1098   // (i64 r0) + (i32 r1) * (i14 i).
1099   //
1100   // Private arrays end up using a scratch buffer most of the time, so also
1101   // assume those use MUBUF instructions. Scratch loads / stores are currently
1102   // implemented as mubuf instructions with offen bit set, so slightly
1103   // different than the normal addr64.
1104   if (!isUInt<12>(AM.BaseOffs))
1105     return false;
1106 
1107   // FIXME: Since we can split immediate into soffset and immediate offset,
1108   // would it make sense to allow any immediate?
1109 
1110   switch (AM.Scale) {
1111   case 0: // r + i or just i, depending on HasBaseReg.
1112     return true;
1113   case 1:
1114     return true; // We have r + r or r + i.
1115   case 2:
1116     if (AM.HasBaseReg) {
1117       // Reject 2 * r + r.
1118       return false;
1119     }
1120 
1121     // Allow 2 * r as r + r
1122     // Or  2 * r + i is allowed as r + r + i.
1123     return true;
1124   default: // Don't allow n * r
1125     return false;
1126   }
1127 }
1128 
1129 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
1130                                              const AddrMode &AM, Type *Ty,
1131                                              unsigned AS, Instruction *I) const {
1132   // No global is ever allowed as a base.
1133   if (AM.BaseGV)
1134     return false;
1135 
1136   if (AS == AMDGPUAS::GLOBAL_ADDRESS)
1137     return isLegalGlobalAddressingMode(AM);
1138 
1139   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
1140       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
1141       AS == AMDGPUAS::BUFFER_FAT_POINTER) {
1142     // If the offset isn't a multiple of 4, it probably isn't going to be
1143     // correctly aligned.
1144     // FIXME: Can we get the real alignment here?
1145     if (AM.BaseOffs % 4 != 0)
1146       return isLegalMUBUFAddressingMode(AM);
1147 
1148     // There are no SMRD extloads, so if we have to do a small type access we
1149     // will use a MUBUF load.
1150     // FIXME?: We also need to do this if unaligned, but we don't know the
1151     // alignment here.
1152     if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
1153       return isLegalGlobalAddressingMode(AM);
1154 
1155     if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
1156       // SMRD instructions have an 8-bit, dword offset on SI.
1157       if (!isUInt<8>(AM.BaseOffs / 4))
1158         return false;
1159     } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
1160       // On CI+, this can also be a 32-bit literal constant offset. If it fits
1161       // in 8-bits, it can use a smaller encoding.
1162       if (!isUInt<32>(AM.BaseOffs / 4))
1163         return false;
1164     } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
1165       // On VI, these use the SMEM format and the offset is 20-bit in bytes.
1166       if (!isUInt<20>(AM.BaseOffs))
1167         return false;
1168     } else
1169       llvm_unreachable("unhandled generation");
1170 
1171     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1172       return true;
1173 
1174     if (AM.Scale == 1 && AM.HasBaseReg)
1175       return true;
1176 
1177     return false;
1178 
1179   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1180     return isLegalMUBUFAddressingMode(AM);
1181   } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
1182              AS == AMDGPUAS::REGION_ADDRESS) {
1183     // Basic, single offset DS instructions allow a 16-bit unsigned immediate
1184     // field.
1185     // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
1186     // an 8-bit dword offset but we don't know the alignment here.
1187     if (!isUInt<16>(AM.BaseOffs))
1188       return false;
1189 
1190     if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1191       return true;
1192 
1193     if (AM.Scale == 1 && AM.HasBaseReg)
1194       return true;
1195 
1196     return false;
1197   } else if (AS == AMDGPUAS::FLAT_ADDRESS ||
1198              AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) {
1199     // For an unknown address space, this usually means that this is for some
1200     // reason being used for pure arithmetic, and not based on some addressing
1201     // computation. We don't have instructions that compute pointers with any
1202     // addressing modes, so treat them as having no offset like flat
1203     // instructions.
1204     return isLegalFlatAddressingMode(AM);
1205   } else {
1206     llvm_unreachable("unhandled address space");
1207   }
1208 }
1209 
1210 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1211                                         const SelectionDAG &DAG) const {
1212   if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
1213     return (MemVT.getSizeInBits() <= 4 * 32);
1214   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1215     unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1216     return (MemVT.getSizeInBits() <= MaxPrivateBits);
1217   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
1218     return (MemVT.getSizeInBits() <= 2 * 32);
1219   }
1220   return true;
1221 }
1222 
1223 bool SITargetLowering::allowsMisalignedMemoryAccessesImpl(
1224     unsigned Size, unsigned AddrSpace, unsigned Align,
1225     MachineMemOperand::Flags Flags, bool *IsFast) const {
1226   if (IsFast)
1227     *IsFast = false;
1228 
1229   if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1230       AddrSpace == AMDGPUAS::REGION_ADDRESS) {
1231     // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1232     // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1233     // with adjacent offsets.
1234     bool AlignedBy4 = (Align % 4 == 0);
1235     if (IsFast)
1236       *IsFast = AlignedBy4;
1237 
1238     return AlignedBy4;
1239   }
1240 
1241   // FIXME: We have to be conservative here and assume that flat operations
1242   // will access scratch.  If we had access to the IR function, then we
1243   // could determine if any private memory was used in the function.
1244   if (!Subtarget->hasUnalignedScratchAccess() &&
1245       (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1246        AddrSpace == AMDGPUAS::FLAT_ADDRESS)) {
1247     bool AlignedBy4 = Align >= 4;
1248     if (IsFast)
1249       *IsFast = AlignedBy4;
1250 
1251     return AlignedBy4;
1252   }
1253 
1254   if (Subtarget->hasUnalignedBufferAccess()) {
1255     // If we have an uniform constant load, it still requires using a slow
1256     // buffer instruction if unaligned.
1257     if (IsFast) {
1258       // Accesses can really be issued as 1-byte aligned or 4-byte aligned, so
1259       // 2-byte alignment is worse than 1 unless doing a 2-byte accesss.
1260       *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
1261                  AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
1262         Align >= 4 : Align != 2;
1263     }
1264 
1265     return true;
1266   }
1267 
1268   // Smaller than dword value must be aligned.
1269   if (Size < 32)
1270     return false;
1271 
1272   // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1273   // byte-address are ignored, thus forcing Dword alignment.
1274   // This applies to private, global, and constant memory.
1275   if (IsFast)
1276     *IsFast = true;
1277 
1278   return Size >= 32 && Align >= 4;
1279 }
1280 
1281 bool SITargetLowering::allowsMisalignedMemoryAccesses(
1282     EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
1283     bool *IsFast) const {
1284   if (IsFast)
1285     *IsFast = false;
1286 
1287   // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
1288   // which isn't a simple VT.
1289   // Until MVT is extended to handle this, simply check for the size and
1290   // rely on the condition below: allow accesses if the size is a multiple of 4.
1291   if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
1292                            VT.getStoreSize() > 16)) {
1293     return false;
1294   }
1295 
1296   return allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AddrSpace,
1297                                             Align, Flags, IsFast);
1298 }
1299 
1300 EVT SITargetLowering::getOptimalMemOpType(
1301     const MemOp &Op, const AttributeList &FuncAttributes) const {
1302   // FIXME: Should account for address space here.
1303 
1304   // The default fallback uses the private pointer size as a guess for a type to
1305   // use. Make sure we switch these to 64-bit accesses.
1306 
1307   if (Op.size() >= 16 &&
1308       Op.isDstAligned(Align(4))) // XXX: Should only do for global
1309     return MVT::v4i32;
1310 
1311   if (Op.size() >= 8 && Op.isDstAligned(Align(4)))
1312     return MVT::v2i32;
1313 
1314   // Use the default.
1315   return MVT::Other;
1316 }
1317 
1318 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1319                                            unsigned DestAS) const {
1320   return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
1321 }
1322 
1323 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
1324   const MemSDNode *MemNode = cast<MemSDNode>(N);
1325   const Value *Ptr = MemNode->getMemOperand()->getValue();
1326   const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
1327   return I && I->getMetadata("amdgpu.noclobber");
1328 }
1329 
1330 bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS,
1331                                            unsigned DestAS) const {
1332   // Flat -> private/local is a simple truncate.
1333   // Flat -> global is no-op
1334   if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
1335     return true;
1336 
1337   return isNoopAddrSpaceCast(SrcAS, DestAS);
1338 }
1339 
1340 bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1341   const MemSDNode *MemNode = cast<MemSDNode>(N);
1342 
1343   return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
1344 }
1345 
1346 TargetLoweringBase::LegalizeTypeAction
1347 SITargetLowering::getPreferredVectorAction(MVT VT) const {
1348   int NumElts = VT.getVectorNumElements();
1349   if (NumElts != 1 && VT.getScalarType().bitsLE(MVT::i16))
1350     return VT.isPow2VectorType() ? TypeSplitVector : TypeWidenVector;
1351   return TargetLoweringBase::getPreferredVectorAction(VT);
1352 }
1353 
1354 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1355                                                          Type *Ty) const {
1356   // FIXME: Could be smarter if called for vector constants.
1357   return true;
1358 }
1359 
1360 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
1361   if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1362     switch (Op) {
1363     case ISD::LOAD:
1364     case ISD::STORE:
1365 
1366     // These operations are done with 32-bit instructions anyway.
1367     case ISD::AND:
1368     case ISD::OR:
1369     case ISD::XOR:
1370     case ISD::SELECT:
1371       // TODO: Extensions?
1372       return true;
1373     default:
1374       return false;
1375     }
1376   }
1377 
1378   // SimplifySetCC uses this function to determine whether or not it should
1379   // create setcc with i1 operands.  We don't have instructions for i1 setcc.
1380   if (VT == MVT::i1 && Op == ISD::SETCC)
1381     return false;
1382 
1383   return TargetLowering::isTypeDesirableForOp(Op, VT);
1384 }
1385 
1386 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1387                                                    const SDLoc &SL,
1388                                                    SDValue Chain,
1389                                                    uint64_t Offset) const {
1390   const DataLayout &DL = DAG.getDataLayout();
1391   MachineFunction &MF = DAG.getMachineFunction();
1392   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1393 
1394   const ArgDescriptor *InputPtrReg;
1395   const TargetRegisterClass *RC;
1396 
1397   std::tie(InputPtrReg, RC)
1398     = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1399 
1400   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1401   MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
1402   SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
1403     MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1404 
1405   return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
1406 }
1407 
1408 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1409                                             const SDLoc &SL) const {
1410   uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1411                                                FIRST_IMPLICIT);
1412   return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1413 }
1414 
1415 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1416                                          const SDLoc &SL, SDValue Val,
1417                                          bool Signed,
1418                                          const ISD::InputArg *Arg) const {
1419   // First, if it is a widened vector, narrow it.
1420   if (VT.isVector() &&
1421       VT.getVectorNumElements() != MemVT.getVectorNumElements()) {
1422     EVT NarrowedVT =
1423         EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(),
1424                          VT.getVectorNumElements());
1425     Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val,
1426                       DAG.getConstant(0, SL, MVT::i32));
1427   }
1428 
1429   // Then convert the vector elements or scalar value.
1430   if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1431       VT.bitsLT(MemVT)) {
1432     unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1433     Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1434   }
1435 
1436   if (MemVT.isFloatingPoint())
1437     Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
1438   else if (Signed)
1439     Val = DAG.getSExtOrTrunc(Val, SL, VT);
1440   else
1441     Val = DAG.getZExtOrTrunc(Val, SL, VT);
1442 
1443   return Val;
1444 }
1445 
1446 SDValue SITargetLowering::lowerKernargMemParameter(
1447   SelectionDAG &DAG, EVT VT, EVT MemVT,
1448   const SDLoc &SL, SDValue Chain,
1449   uint64_t Offset, unsigned Align, bool Signed,
1450   const ISD::InputArg *Arg) const {
1451   MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
1452 
1453   // Try to avoid using an extload by loading earlier than the argument address,
1454   // and extracting the relevant bits. The load should hopefully be merged with
1455   // the previous argument.
1456   if (MemVT.getStoreSize() < 4 && Align < 4) {
1457     // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
1458     int64_t AlignDownOffset = alignDown(Offset, 4);
1459     int64_t OffsetDiff = Offset - AlignDownOffset;
1460 
1461     EVT IntVT = MemVT.changeTypeToInteger();
1462 
1463     // TODO: If we passed in the base kernel offset we could have a better
1464     // alignment than 4, but we don't really need it.
1465     SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1466     SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1467                                MachineMemOperand::MODereferenceable |
1468                                MachineMemOperand::MOInvariant);
1469 
1470     SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1471     SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1472 
1473     SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1474     ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1475     ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1476 
1477 
1478     return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1479   }
1480 
1481   SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1482   SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
1483                              MachineMemOperand::MODereferenceable |
1484                              MachineMemOperand::MOInvariant);
1485 
1486   SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
1487   return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
1488 }
1489 
1490 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1491                                               const SDLoc &SL, SDValue Chain,
1492                                               const ISD::InputArg &Arg) const {
1493   MachineFunction &MF = DAG.getMachineFunction();
1494   MachineFrameInfo &MFI = MF.getFrameInfo();
1495 
1496   if (Arg.Flags.isByVal()) {
1497     unsigned Size = Arg.Flags.getByValSize();
1498     int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1499     return DAG.getFrameIndex(FrameIdx, MVT::i32);
1500   }
1501 
1502   unsigned ArgOffset = VA.getLocMemOffset();
1503   unsigned ArgSize = VA.getValVT().getStoreSize();
1504 
1505   int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1506 
1507   // Create load nodes to retrieve arguments from the stack.
1508   SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1509   SDValue ArgValue;
1510 
1511   // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1512   ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1513   MVT MemVT = VA.getValVT();
1514 
1515   switch (VA.getLocInfo()) {
1516   default:
1517     break;
1518   case CCValAssign::BCvt:
1519     MemVT = VA.getLocVT();
1520     break;
1521   case CCValAssign::SExt:
1522     ExtType = ISD::SEXTLOAD;
1523     break;
1524   case CCValAssign::ZExt:
1525     ExtType = ISD::ZEXTLOAD;
1526     break;
1527   case CCValAssign::AExt:
1528     ExtType = ISD::EXTLOAD;
1529     break;
1530   }
1531 
1532   ArgValue = DAG.getExtLoad(
1533     ExtType, SL, VA.getLocVT(), Chain, FIN,
1534     MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1535     MemVT);
1536   return ArgValue;
1537 }
1538 
1539 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1540   const SIMachineFunctionInfo &MFI,
1541   EVT VT,
1542   AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1543   const ArgDescriptor *Reg;
1544   const TargetRegisterClass *RC;
1545 
1546   std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1547   return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1548 }
1549 
1550 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1551                                    CallingConv::ID CallConv,
1552                                    ArrayRef<ISD::InputArg> Ins,
1553                                    BitVector &Skipped,
1554                                    FunctionType *FType,
1555                                    SIMachineFunctionInfo *Info) {
1556   for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1557     const ISD::InputArg *Arg = &Ins[I];
1558 
1559     assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1560            "vector type argument should have been split");
1561 
1562     // First check if it's a PS input addr.
1563     if (CallConv == CallingConv::AMDGPU_PS &&
1564         !Arg->Flags.isInReg() && PSInputNum <= 15) {
1565       bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1566 
1567       // Inconveniently only the first part of the split is marked as isSplit,
1568       // so skip to the end. We only want to increment PSInputNum once for the
1569       // entire split argument.
1570       if (Arg->Flags.isSplit()) {
1571         while (!Arg->Flags.isSplitEnd()) {
1572           assert((!Arg->VT.isVector() ||
1573                   Arg->VT.getScalarSizeInBits() == 16) &&
1574                  "unexpected vector split in ps argument type");
1575           if (!SkipArg)
1576             Splits.push_back(*Arg);
1577           Arg = &Ins[++I];
1578         }
1579       }
1580 
1581       if (SkipArg) {
1582         // We can safely skip PS inputs.
1583         Skipped.set(Arg->getOrigArgIndex());
1584         ++PSInputNum;
1585         continue;
1586       }
1587 
1588       Info->markPSInputAllocated(PSInputNum);
1589       if (Arg->Used)
1590         Info->markPSInputEnabled(PSInputNum);
1591 
1592       ++PSInputNum;
1593     }
1594 
1595     Splits.push_back(*Arg);
1596   }
1597 }
1598 
1599 // Allocate special inputs passed in VGPRs.
1600 void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1601                                                       MachineFunction &MF,
1602                                                       const SIRegisterInfo &TRI,
1603                                                       SIMachineFunctionInfo &Info) const {
1604   const LLT S32 = LLT::scalar(32);
1605   MachineRegisterInfo &MRI = MF.getRegInfo();
1606 
1607   if (Info.hasWorkItemIDX()) {
1608     Register Reg = AMDGPU::VGPR0;
1609     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1610 
1611     CCInfo.AllocateReg(Reg);
1612     Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1613   }
1614 
1615   if (Info.hasWorkItemIDY()) {
1616     Register Reg = AMDGPU::VGPR1;
1617     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1618 
1619     CCInfo.AllocateReg(Reg);
1620     Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1621   }
1622 
1623   if (Info.hasWorkItemIDZ()) {
1624     Register Reg = AMDGPU::VGPR2;
1625     MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
1626 
1627     CCInfo.AllocateReg(Reg);
1628     Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1629   }
1630 }
1631 
1632 // Try to allocate a VGPR at the end of the argument list, or if no argument
1633 // VGPRs are left allocating a stack slot.
1634 // If \p Mask is is given it indicates bitfield position in the register.
1635 // If \p Arg is given use it with new ]p Mask instead of allocating new.
1636 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u,
1637                                          ArgDescriptor Arg = ArgDescriptor()) {
1638   if (Arg.isSet())
1639     return ArgDescriptor::createArg(Arg, Mask);
1640 
1641   ArrayRef<MCPhysReg> ArgVGPRs
1642     = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1643   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1644   if (RegIdx == ArgVGPRs.size()) {
1645     // Spill to stack required.
1646     int64_t Offset = CCInfo.AllocateStack(4, 4);
1647 
1648     return ArgDescriptor::createStack(Offset, Mask);
1649   }
1650 
1651   unsigned Reg = ArgVGPRs[RegIdx];
1652   Reg = CCInfo.AllocateReg(Reg);
1653   assert(Reg != AMDGPU::NoRegister);
1654 
1655   MachineFunction &MF = CCInfo.getMachineFunction();
1656   Register LiveInVReg = MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1657   MF.getRegInfo().setType(LiveInVReg, LLT::scalar(32));
1658   return ArgDescriptor::createRegister(Reg, Mask);
1659 }
1660 
1661 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1662                                              const TargetRegisterClass *RC,
1663                                              unsigned NumArgRegs) {
1664   ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1665   unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1666   if (RegIdx == ArgSGPRs.size())
1667     report_fatal_error("ran out of SGPRs for arguments");
1668 
1669   unsigned Reg = ArgSGPRs[RegIdx];
1670   Reg = CCInfo.AllocateReg(Reg);
1671   assert(Reg != AMDGPU::NoRegister);
1672 
1673   MachineFunction &MF = CCInfo.getMachineFunction();
1674   MF.addLiveIn(Reg, RC);
1675   return ArgDescriptor::createRegister(Reg);
1676 }
1677 
1678 static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1679   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1680 }
1681 
1682 static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1683   return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1684 }
1685 
1686 void SITargetLowering::allocateSpecialInputVGPRs(CCState &CCInfo,
1687                                                  MachineFunction &MF,
1688                                                  const SIRegisterInfo &TRI,
1689                                                  SIMachineFunctionInfo &Info) const {
1690   const unsigned Mask = 0x3ff;
1691   ArgDescriptor Arg;
1692 
1693   if (Info.hasWorkItemIDX()) {
1694     Arg = allocateVGPR32Input(CCInfo, Mask);
1695     Info.setWorkItemIDX(Arg);
1696   }
1697 
1698   if (Info.hasWorkItemIDY()) {
1699     Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg);
1700     Info.setWorkItemIDY(Arg);
1701   }
1702 
1703   if (Info.hasWorkItemIDZ())
1704     Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg));
1705 }
1706 
1707 void SITargetLowering::allocateSpecialInputSGPRs(
1708   CCState &CCInfo,
1709   MachineFunction &MF,
1710   const SIRegisterInfo &TRI,
1711   SIMachineFunctionInfo &Info) const {
1712   auto &ArgInfo = Info.getArgInfo();
1713 
1714   // TODO: Unify handling with private memory pointers.
1715 
1716   if (Info.hasDispatchPtr())
1717     ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1718 
1719   if (Info.hasQueuePtr())
1720     ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1721 
1722   if (Info.hasKernargSegmentPtr())
1723     ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1724 
1725   if (Info.hasDispatchID())
1726     ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1727 
1728   // flat_scratch_init is not applicable for non-kernel functions.
1729 
1730   if (Info.hasWorkGroupIDX())
1731     ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1732 
1733   if (Info.hasWorkGroupIDY())
1734     ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1735 
1736   if (Info.hasWorkGroupIDZ())
1737     ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
1738 
1739   if (Info.hasImplicitArgPtr())
1740     ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
1741 }
1742 
1743 // Allocate special inputs passed in user SGPRs.
1744 void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo,
1745                                             MachineFunction &MF,
1746                                             const SIRegisterInfo &TRI,
1747                                             SIMachineFunctionInfo &Info) const {
1748   if (Info.hasImplicitBufferPtr()) {
1749     unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1750     MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1751     CCInfo.AllocateReg(ImplicitBufferPtrReg);
1752   }
1753 
1754   // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1755   if (Info.hasPrivateSegmentBuffer()) {
1756     unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1757     MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1758     CCInfo.AllocateReg(PrivateSegmentBufferReg);
1759   }
1760 
1761   if (Info.hasDispatchPtr()) {
1762     unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1763     MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1764     CCInfo.AllocateReg(DispatchPtrReg);
1765   }
1766 
1767   if (Info.hasQueuePtr()) {
1768     unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1769     MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1770     CCInfo.AllocateReg(QueuePtrReg);
1771   }
1772 
1773   if (Info.hasKernargSegmentPtr()) {
1774     MachineRegisterInfo &MRI = MF.getRegInfo();
1775     Register InputPtrReg = Info.addKernargSegmentPtr(TRI);
1776     CCInfo.AllocateReg(InputPtrReg);
1777 
1778     Register VReg = MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1779     MRI.setType(VReg, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
1780   }
1781 
1782   if (Info.hasDispatchID()) {
1783     unsigned DispatchIDReg = Info.addDispatchID(TRI);
1784     MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1785     CCInfo.AllocateReg(DispatchIDReg);
1786   }
1787 
1788   if (Info.hasFlatScratchInit()) {
1789     unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1790     MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1791     CCInfo.AllocateReg(FlatScratchInitReg);
1792   }
1793 
1794   // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1795   // these from the dispatch pointer.
1796 }
1797 
1798 // Allocate special input registers that are initialized per-wave.
1799 void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo,
1800                                            MachineFunction &MF,
1801                                            SIMachineFunctionInfo &Info,
1802                                            CallingConv::ID CallConv,
1803                                            bool IsShader) const {
1804   if (Info.hasWorkGroupIDX()) {
1805     unsigned Reg = Info.addWorkGroupIDX();
1806     MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
1807     CCInfo.AllocateReg(Reg);
1808   }
1809 
1810   if (Info.hasWorkGroupIDY()) {
1811     unsigned Reg = Info.addWorkGroupIDY();
1812     MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
1813     CCInfo.AllocateReg(Reg);
1814   }
1815 
1816   if (Info.hasWorkGroupIDZ()) {
1817     unsigned Reg = Info.addWorkGroupIDZ();
1818     MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
1819     CCInfo.AllocateReg(Reg);
1820   }
1821 
1822   if (Info.hasWorkGroupInfo()) {
1823     unsigned Reg = Info.addWorkGroupInfo();
1824     MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass);
1825     CCInfo.AllocateReg(Reg);
1826   }
1827 
1828   if (Info.hasPrivateSegmentWaveByteOffset()) {
1829     // Scratch wave offset passed in system SGPR.
1830     unsigned PrivateSegmentWaveByteOffsetReg;
1831 
1832     if (IsShader) {
1833       PrivateSegmentWaveByteOffsetReg =
1834         Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1835 
1836       // This is true if the scratch wave byte offset doesn't have a fixed
1837       // location.
1838       if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1839         PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1840         Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1841       }
1842     } else
1843       PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1844 
1845     MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1846     CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1847   }
1848 }
1849 
1850 static void reservePrivateMemoryRegs(const TargetMachine &TM,
1851                                      MachineFunction &MF,
1852                                      const SIRegisterInfo &TRI,
1853                                      SIMachineFunctionInfo &Info) {
1854   // Now that we've figured out where the scratch register inputs are, see if
1855   // should reserve the arguments and use them directly.
1856   MachineFrameInfo &MFI = MF.getFrameInfo();
1857   bool HasStackObjects = MFI.hasStackObjects();
1858   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1859 
1860   // Record that we know we have non-spill stack objects so we don't need to
1861   // check all stack objects later.
1862   if (HasStackObjects)
1863     Info.setHasNonSpillStackObjects(true);
1864 
1865   // Everything live out of a block is spilled with fast regalloc, so it's
1866   // almost certain that spilling will be required.
1867   if (TM.getOptLevel() == CodeGenOpt::None)
1868     HasStackObjects = true;
1869 
1870   // For now assume stack access is needed in any callee functions, so we need
1871   // the scratch registers to pass in.
1872   bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1873 
1874   if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) {
1875     // If we have stack objects, we unquestionably need the private buffer
1876     // resource. For the Code Object V2 ABI, this will be the first 4 user
1877     // SGPR inputs. We can reserve those and use them directly.
1878 
1879     Register PrivateSegmentBufferReg =
1880         Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
1881     Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1882   } else {
1883     unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1884     // We tentatively reserve the last registers (skipping the last registers
1885     // which may contain VCC, FLAT_SCR, and XNACK). After register allocation,
1886     // we'll replace these with the ones immediately after those which were
1887     // really allocated. In the prologue copies will be inserted from the
1888     // argument to these reserved registers.
1889 
1890     // Without HSA, relocations are used for the scratch pointer and the
1891     // buffer resource setup is always inserted in the prologue. Scratch wave
1892     // offset is still in an input SGPR.
1893     Info.setScratchRSrcReg(ReservedBufferReg);
1894   }
1895 
1896   // hasFP should be accurate for kernels even before the frame is finalized.
1897   if (ST.getFrameLowering()->hasFP(MF)) {
1898     MachineRegisterInfo &MRI = MF.getRegInfo();
1899 
1900     // Try to use s32 as the SP, but move it if it would interfere with input
1901     // arguments. This won't work with calls though.
1902     //
1903     // FIXME: Move SP to avoid any possible inputs, or find a way to spill input
1904     // registers.
1905     if (!MRI.isLiveIn(AMDGPU::SGPR32)) {
1906       Info.setStackPtrOffsetReg(AMDGPU::SGPR32);
1907     } else {
1908       assert(AMDGPU::isShader(MF.getFunction().getCallingConv()));
1909 
1910       if (MFI.hasCalls())
1911         report_fatal_error("call in graphics shader with too many input SGPRs");
1912 
1913       for (unsigned Reg : AMDGPU::SGPR_32RegClass) {
1914         if (!MRI.isLiveIn(Reg)) {
1915           Info.setStackPtrOffsetReg(Reg);
1916           break;
1917         }
1918       }
1919 
1920       if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG)
1921         report_fatal_error("failed to find register for SP");
1922     }
1923 
1924     if (MFI.hasCalls()) {
1925       Info.setScratchWaveOffsetReg(AMDGPU::SGPR33);
1926       Info.setFrameOffsetReg(AMDGPU::SGPR33);
1927     } else {
1928       unsigned ReservedOffsetReg =
1929         TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1930       Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1931       Info.setFrameOffsetReg(ReservedOffsetReg);
1932     }
1933   } else if (RequiresStackAccess) {
1934     assert(!MFI.hasCalls());
1935     // We know there are accesses and they will be done relative to SP, so just
1936     // pin it to the input.
1937     //
1938     // FIXME: Should not do this if inline asm is reading/writing these
1939     // registers.
1940     Register PreloadedSP = Info.getPreloadedReg(
1941         AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1942 
1943     Info.setStackPtrOffsetReg(PreloadedSP);
1944     Info.setScratchWaveOffsetReg(PreloadedSP);
1945     Info.setFrameOffsetReg(PreloadedSP);
1946   } else {
1947     assert(!MFI.hasCalls());
1948 
1949     // There may not be stack access at all. There may still be spills, or
1950     // access of a constant pointer (in which cases an extra copy will be
1951     // emitted in the prolog).
1952     unsigned ReservedOffsetReg
1953       = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1954     Info.setStackPtrOffsetReg(ReservedOffsetReg);
1955     Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1956     Info.setFrameOffsetReg(ReservedOffsetReg);
1957   }
1958 }
1959 
1960 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1961   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1962   return !Info->isEntryFunction();
1963 }
1964 
1965 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1966 
1967 }
1968 
1969 void SITargetLowering::insertCopiesSplitCSR(
1970   MachineBasicBlock *Entry,
1971   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1972   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1973 
1974   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1975   if (!IStart)
1976     return;
1977 
1978   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1979   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1980   MachineBasicBlock::iterator MBBI = Entry->begin();
1981   for (const MCPhysReg *I = IStart; *I; ++I) {
1982     const TargetRegisterClass *RC = nullptr;
1983     if (AMDGPU::SReg_64RegClass.contains(*I))
1984       RC = &AMDGPU::SGPR_64RegClass;
1985     else if (AMDGPU::SReg_32RegClass.contains(*I))
1986       RC = &AMDGPU::SGPR_32RegClass;
1987     else
1988       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1989 
1990     Register NewVR = MRI->createVirtualRegister(RC);
1991     // Create copy from CSR to a virtual register.
1992     Entry->addLiveIn(*I);
1993     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1994       .addReg(*I);
1995 
1996     // Insert the copy-back instructions right before the terminator.
1997     for (auto *Exit : Exits)
1998       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1999               TII->get(TargetOpcode::COPY), *I)
2000         .addReg(NewVR);
2001   }
2002 }
2003 
2004 SDValue SITargetLowering::LowerFormalArguments(
2005     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
2006     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2007     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2008   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2009 
2010   MachineFunction &MF = DAG.getMachineFunction();
2011   const Function &Fn = MF.getFunction();
2012   FunctionType *FType = MF.getFunction().getFunctionType();
2013   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2014 
2015   if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
2016     DiagnosticInfoUnsupported NoGraphicsHSA(
2017         Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
2018     DAG.getContext()->diagnose(NoGraphicsHSA);
2019     return DAG.getEntryNode();
2020   }
2021 
2022   SmallVector<ISD::InputArg, 16> Splits;
2023   SmallVector<CCValAssign, 16> ArgLocs;
2024   BitVector Skipped(Ins.size());
2025   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
2026                  *DAG.getContext());
2027 
2028   bool IsShader = AMDGPU::isShader(CallConv);
2029   bool IsKernel = AMDGPU::isKernel(CallConv);
2030   bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
2031 
2032   if (IsShader) {
2033     processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
2034 
2035     // At least one interpolation mode must be enabled or else the GPU will
2036     // hang.
2037     //
2038     // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
2039     // set PSInputAddr, the user wants to enable some bits after the compilation
2040     // based on run-time states. Since we can't know what the final PSInputEna
2041     // will look like, so we shouldn't do anything here and the user should take
2042     // responsibility for the correct programming.
2043     //
2044     // Otherwise, the following restrictions apply:
2045     // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
2046     // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
2047     //   enabled too.
2048     if (CallConv == CallingConv::AMDGPU_PS) {
2049       if ((Info->getPSInputAddr() & 0x7F) == 0 ||
2050            ((Info->getPSInputAddr() & 0xF) == 0 &&
2051             Info->isPSInputAllocated(11))) {
2052         CCInfo.AllocateReg(AMDGPU::VGPR0);
2053         CCInfo.AllocateReg(AMDGPU::VGPR1);
2054         Info->markPSInputAllocated(0);
2055         Info->markPSInputEnabled(0);
2056       }
2057       if (Subtarget->isAmdPalOS()) {
2058         // For isAmdPalOS, the user does not enable some bits after compilation
2059         // based on run-time states; the register values being generated here are
2060         // the final ones set in hardware. Therefore we need to apply the
2061         // workaround to PSInputAddr and PSInputEnable together.  (The case where
2062         // a bit is set in PSInputAddr but not PSInputEnable is where the
2063         // frontend set up an input arg for a particular interpolation mode, but
2064         // nothing uses that input arg. Really we should have an earlier pass
2065         // that removes such an arg.)
2066         unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
2067         if ((PsInputBits & 0x7F) == 0 ||
2068             ((PsInputBits & 0xF) == 0 &&
2069              (PsInputBits >> 11 & 1)))
2070           Info->markPSInputEnabled(
2071               countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
2072       }
2073     }
2074 
2075     assert(!Info->hasDispatchPtr() &&
2076            !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
2077            !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
2078            !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
2079            !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
2080            !Info->hasWorkItemIDZ());
2081   } else if (IsKernel) {
2082     assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
2083   } else {
2084     Splits.append(Ins.begin(), Ins.end());
2085   }
2086 
2087   if (IsEntryFunc) {
2088     allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
2089     allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
2090   }
2091 
2092   if (IsKernel) {
2093     analyzeFormalArgumentsCompute(CCInfo, Ins);
2094   } else {
2095     CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
2096     CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
2097   }
2098 
2099   SmallVector<SDValue, 16> Chains;
2100 
2101   // FIXME: This is the minimum kernel argument alignment. We should improve
2102   // this to the maximum alignment of the arguments.
2103   //
2104   // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
2105   // kern arg offset.
2106   const unsigned KernelArgBaseAlign = 16;
2107 
2108    for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
2109     const ISD::InputArg &Arg = Ins[i];
2110     if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
2111       InVals.push_back(DAG.getUNDEF(Arg.VT));
2112       continue;
2113     }
2114 
2115     CCValAssign &VA = ArgLocs[ArgIdx++];
2116     MVT VT = VA.getLocVT();
2117 
2118     if (IsEntryFunc && VA.isMemLoc()) {
2119       VT = Ins[i].VT;
2120       EVT MemVT = VA.getLocVT();
2121 
2122       const uint64_t Offset = VA.getLocMemOffset();
2123       unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
2124 
2125       SDValue Arg = lowerKernargMemParameter(
2126         DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
2127       Chains.push_back(Arg.getValue(1));
2128 
2129       auto *ParamTy =
2130         dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
2131       if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
2132           ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
2133                       ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) {
2134         // On SI local pointers are just offsets into LDS, so they are always
2135         // less than 16-bits.  On CI and newer they could potentially be
2136         // real pointers, so we can't guarantee their size.
2137         Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
2138                           DAG.getValueType(MVT::i16));
2139       }
2140 
2141       InVals.push_back(Arg);
2142       continue;
2143     } else if (!IsEntryFunc && VA.isMemLoc()) {
2144       SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
2145       InVals.push_back(Val);
2146       if (!Arg.Flags.isByVal())
2147         Chains.push_back(Val.getValue(1));
2148       continue;
2149     }
2150 
2151     assert(VA.isRegLoc() && "Parameter must be in a register!");
2152 
2153     Register Reg = VA.getLocReg();
2154     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2155     EVT ValVT = VA.getValVT();
2156 
2157     Reg = MF.addLiveIn(Reg, RC);
2158     SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
2159 
2160     if (Arg.Flags.isSRet()) {
2161       // The return object should be reasonably addressable.
2162 
2163       // FIXME: This helps when the return is a real sret. If it is a
2164       // automatically inserted sret (i.e. CanLowerReturn returns false), an
2165       // extra copy is inserted in SelectionDAGBuilder which obscures this.
2166       unsigned NumBits
2167         = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex();
2168       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2169         DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
2170     }
2171 
2172     // If this is an 8 or 16-bit value, it is really passed promoted
2173     // to 32 bits. Insert an assert[sz]ext to capture this, then
2174     // truncate to the right size.
2175     switch (VA.getLocInfo()) {
2176     case CCValAssign::Full:
2177       break;
2178     case CCValAssign::BCvt:
2179       Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
2180       break;
2181     case CCValAssign::SExt:
2182       Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
2183                         DAG.getValueType(ValVT));
2184       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2185       break;
2186     case CCValAssign::ZExt:
2187       Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2188                         DAG.getValueType(ValVT));
2189       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2190       break;
2191     case CCValAssign::AExt:
2192       Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2193       break;
2194     default:
2195       llvm_unreachable("Unknown loc info!");
2196     }
2197 
2198     InVals.push_back(Val);
2199   }
2200 
2201   if (!IsEntryFunc) {
2202     // Special inputs come after user arguments.
2203     allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
2204   }
2205 
2206   // Start adding system SGPRs.
2207   if (IsEntryFunc) {
2208     allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
2209   } else {
2210     CCInfo.AllocateReg(Info->getScratchRSrcReg());
2211     CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
2212     CCInfo.AllocateReg(Info->getFrameOffsetReg());
2213     allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
2214   }
2215 
2216   auto &ArgUsageInfo =
2217     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2218   ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
2219 
2220   unsigned StackArgSize = CCInfo.getNextStackOffset();
2221   Info->setBytesInStackArgArea(StackArgSize);
2222 
2223   return Chains.empty() ? Chain :
2224     DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
2225 }
2226 
2227 // TODO: If return values can't fit in registers, we should return as many as
2228 // possible in registers before passing on stack.
2229 bool SITargetLowering::CanLowerReturn(
2230   CallingConv::ID CallConv,
2231   MachineFunction &MF, bool IsVarArg,
2232   const SmallVectorImpl<ISD::OutputArg> &Outs,
2233   LLVMContext &Context) const {
2234   // Replacing returns with sret/stack usage doesn't make sense for shaders.
2235   // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
2236   // for shaders. Vector types should be explicitly handled by CC.
2237   if (AMDGPU::isEntryFunctionCC(CallConv))
2238     return true;
2239 
2240   SmallVector<CCValAssign, 16> RVLocs;
2241   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2242   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2243 }
2244 
2245 SDValue
2246 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2247                               bool isVarArg,
2248                               const SmallVectorImpl<ISD::OutputArg> &Outs,
2249                               const SmallVectorImpl<SDValue> &OutVals,
2250                               const SDLoc &DL, SelectionDAG &DAG) const {
2251   MachineFunction &MF = DAG.getMachineFunction();
2252   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2253 
2254   if (AMDGPU::isKernel(CallConv)) {
2255     return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2256                                              OutVals, DL, DAG);
2257   }
2258 
2259   bool IsShader = AMDGPU::isShader(CallConv);
2260 
2261   Info->setIfReturnsVoid(Outs.empty());
2262   bool IsWaveEnd = Info->returnsVoid() && IsShader;
2263 
2264   // CCValAssign - represent the assignment of the return value to a location.
2265   SmallVector<CCValAssign, 48> RVLocs;
2266   SmallVector<ISD::OutputArg, 48> Splits;
2267 
2268   // CCState - Info about the registers and stack slots.
2269   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2270                  *DAG.getContext());
2271 
2272   // Analyze outgoing return values.
2273   CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2274 
2275   SDValue Flag;
2276   SmallVector<SDValue, 48> RetOps;
2277   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2278 
2279   // Add return address for callable functions.
2280   if (!Info->isEntryFunction()) {
2281     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2282     SDValue ReturnAddrReg = CreateLiveInRegister(
2283       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2284 
2285     SDValue ReturnAddrVirtualReg = DAG.getRegister(
2286         MF.getRegInfo().createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass),
2287         MVT::i64);
2288     Chain =
2289         DAG.getCopyToReg(Chain, DL, ReturnAddrVirtualReg, ReturnAddrReg, Flag);
2290     Flag = Chain.getValue(1);
2291     RetOps.push_back(ReturnAddrVirtualReg);
2292   }
2293 
2294   // Copy the result values into the output registers.
2295   for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2296        ++I, ++RealRVLocIdx) {
2297     CCValAssign &VA = RVLocs[I];
2298     assert(VA.isRegLoc() && "Can only return in registers!");
2299     // TODO: Partially return in registers if return values don't fit.
2300     SDValue Arg = OutVals[RealRVLocIdx];
2301 
2302     // Copied from other backends.
2303     switch (VA.getLocInfo()) {
2304     case CCValAssign::Full:
2305       break;
2306     case CCValAssign::BCvt:
2307       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2308       break;
2309     case CCValAssign::SExt:
2310       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2311       break;
2312     case CCValAssign::ZExt:
2313       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2314       break;
2315     case CCValAssign::AExt:
2316       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2317       break;
2318     default:
2319       llvm_unreachable("Unknown loc info!");
2320     }
2321 
2322     Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2323     Flag = Chain.getValue(1);
2324     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2325   }
2326 
2327   // FIXME: Does sret work properly?
2328   if (!Info->isEntryFunction()) {
2329     const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2330     const MCPhysReg *I =
2331       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2332     if (I) {
2333       for (; *I; ++I) {
2334         if (AMDGPU::SReg_64RegClass.contains(*I))
2335           RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2336         else if (AMDGPU::SReg_32RegClass.contains(*I))
2337           RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2338         else
2339           llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2340       }
2341     }
2342   }
2343 
2344   // Update chain and glue.
2345   RetOps[0] = Chain;
2346   if (Flag.getNode())
2347     RetOps.push_back(Flag);
2348 
2349   unsigned Opc = AMDGPUISD::ENDPGM;
2350   if (!IsWaveEnd)
2351     Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
2352   return DAG.getNode(Opc, DL, MVT::Other, RetOps);
2353 }
2354 
2355 SDValue SITargetLowering::LowerCallResult(
2356     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2357     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2358     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2359     SDValue ThisVal) const {
2360   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2361 
2362   // Assign locations to each value returned by this call.
2363   SmallVector<CCValAssign, 16> RVLocs;
2364   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2365                  *DAG.getContext());
2366   CCInfo.AnalyzeCallResult(Ins, RetCC);
2367 
2368   // Copy all of the result registers out of their specified physreg.
2369   for (unsigned i = 0; i != RVLocs.size(); ++i) {
2370     CCValAssign VA = RVLocs[i];
2371     SDValue Val;
2372 
2373     if (VA.isRegLoc()) {
2374       Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2375       Chain = Val.getValue(1);
2376       InFlag = Val.getValue(2);
2377     } else if (VA.isMemLoc()) {
2378       report_fatal_error("TODO: return values in memory");
2379     } else
2380       llvm_unreachable("unknown argument location type");
2381 
2382     switch (VA.getLocInfo()) {
2383     case CCValAssign::Full:
2384       break;
2385     case CCValAssign::BCvt:
2386       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2387       break;
2388     case CCValAssign::ZExt:
2389       Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2390                         DAG.getValueType(VA.getValVT()));
2391       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2392       break;
2393     case CCValAssign::SExt:
2394       Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2395                         DAG.getValueType(VA.getValVT()));
2396       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2397       break;
2398     case CCValAssign::AExt:
2399       Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2400       break;
2401     default:
2402       llvm_unreachable("Unknown loc info!");
2403     }
2404 
2405     InVals.push_back(Val);
2406   }
2407 
2408   return Chain;
2409 }
2410 
2411 // Add code to pass special inputs required depending on used features separate
2412 // from the explicit user arguments present in the IR.
2413 void SITargetLowering::passSpecialInputs(
2414     CallLoweringInfo &CLI,
2415     CCState &CCInfo,
2416     const SIMachineFunctionInfo &Info,
2417     SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2418     SmallVectorImpl<SDValue> &MemOpChains,
2419     SDValue Chain) const {
2420   // If we don't have a call site, this was a call inserted by
2421   // legalization. These can never use special inputs.
2422   if (!CLI.CS)
2423     return;
2424 
2425   const Function *CalleeFunc = CLI.CS.getCalledFunction();
2426   assert(CalleeFunc);
2427 
2428   SelectionDAG &DAG = CLI.DAG;
2429   const SDLoc &DL = CLI.DL;
2430 
2431   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2432 
2433   auto &ArgUsageInfo =
2434     DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2435   const AMDGPUFunctionArgInfo &CalleeArgInfo
2436     = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2437 
2438   const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2439 
2440   // TODO: Unify with private memory register handling. This is complicated by
2441   // the fact that at least in kernels, the input argument is not necessarily
2442   // in the same location as the input.
2443   AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2444     AMDGPUFunctionArgInfo::DISPATCH_PTR,
2445     AMDGPUFunctionArgInfo::QUEUE_PTR,
2446     AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
2447     AMDGPUFunctionArgInfo::DISPATCH_ID,
2448     AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2449     AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2450     AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
2451     AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
2452   };
2453 
2454   for (auto InputID : InputRegs) {
2455     const ArgDescriptor *OutgoingArg;
2456     const TargetRegisterClass *ArgRC;
2457 
2458     std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2459     if (!OutgoingArg)
2460       continue;
2461 
2462     const ArgDescriptor *IncomingArg;
2463     const TargetRegisterClass *IncomingArgRC;
2464     std::tie(IncomingArg, IncomingArgRC)
2465       = CallerArgInfo.getPreloadedValue(InputID);
2466     assert(IncomingArgRC == ArgRC);
2467 
2468     // All special arguments are ints for now.
2469     EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
2470     SDValue InputReg;
2471 
2472     if (IncomingArg) {
2473       InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2474     } else {
2475       // The implicit arg ptr is special because it doesn't have a corresponding
2476       // input for kernels, and is computed from the kernarg segment pointer.
2477       assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2478       InputReg = getImplicitArgPtr(DAG, DL);
2479     }
2480 
2481     if (OutgoingArg->isRegister()) {
2482       RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2483     } else {
2484       unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4);
2485       SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2486                                               SpecialArgOffset);
2487       MemOpChains.push_back(ArgStore);
2488     }
2489   }
2490 
2491   // Pack workitem IDs into a single register or pass it as is if already
2492   // packed.
2493   const ArgDescriptor *OutgoingArg;
2494   const TargetRegisterClass *ArgRC;
2495 
2496   std::tie(OutgoingArg, ArgRC) =
2497     CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
2498   if (!OutgoingArg)
2499     std::tie(OutgoingArg, ArgRC) =
2500       CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
2501   if (!OutgoingArg)
2502     std::tie(OutgoingArg, ArgRC) =
2503       CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
2504   if (!OutgoingArg)
2505     return;
2506 
2507   const ArgDescriptor *IncomingArgX
2508     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X).first;
2509   const ArgDescriptor *IncomingArgY
2510     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y).first;
2511   const ArgDescriptor *IncomingArgZ
2512     = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z).first;
2513 
2514   SDValue InputReg;
2515   SDLoc SL;
2516 
2517   // If incoming ids are not packed we need to pack them.
2518   if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo.WorkItemIDX)
2519     InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX);
2520 
2521   if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo.WorkItemIDY) {
2522     SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY);
2523     Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y,
2524                     DAG.getShiftAmountConstant(10, MVT::i32, SL));
2525     InputReg = InputReg.getNode() ?
2526                  DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y;
2527   }
2528 
2529   if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo.WorkItemIDZ) {
2530     SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ);
2531     Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z,
2532                     DAG.getShiftAmountConstant(20, MVT::i32, SL));
2533     InputReg = InputReg.getNode() ?
2534                  DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z;
2535   }
2536 
2537   if (!InputReg.getNode()) {
2538     // Workitem ids are already packed, any of present incoming arguments
2539     // will carry all required fields.
2540     ArgDescriptor IncomingArg = ArgDescriptor::createArg(
2541       IncomingArgX ? *IncomingArgX :
2542       IncomingArgY ? *IncomingArgY :
2543                      *IncomingArgZ, ~0u);
2544     InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg);
2545   }
2546 
2547   if (OutgoingArg->isRegister()) {
2548     RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2549   } else {
2550     unsigned SpecialArgOffset = CCInfo.AllocateStack(4, 4);
2551     SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2552                                             SpecialArgOffset);
2553     MemOpChains.push_back(ArgStore);
2554   }
2555 }
2556 
2557 static bool canGuaranteeTCO(CallingConv::ID CC) {
2558   return CC == CallingConv::Fast;
2559 }
2560 
2561 /// Return true if we might ever do TCO for calls with this calling convention.
2562 static bool mayTailCallThisCC(CallingConv::ID CC) {
2563   switch (CC) {
2564   case CallingConv::C:
2565     return true;
2566   default:
2567     return canGuaranteeTCO(CC);
2568   }
2569 }
2570 
2571 bool SITargetLowering::isEligibleForTailCallOptimization(
2572     SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2573     const SmallVectorImpl<ISD::OutputArg> &Outs,
2574     const SmallVectorImpl<SDValue> &OutVals,
2575     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2576   if (!mayTailCallThisCC(CalleeCC))
2577     return false;
2578 
2579   MachineFunction &MF = DAG.getMachineFunction();
2580   const Function &CallerF = MF.getFunction();
2581   CallingConv::ID CallerCC = CallerF.getCallingConv();
2582   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2583   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2584 
2585   // Kernels aren't callable, and don't have a live in return address so it
2586   // doesn't make sense to do a tail call with entry functions.
2587   if (!CallerPreserved)
2588     return false;
2589 
2590   bool CCMatch = CallerCC == CalleeCC;
2591 
2592   if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2593     if (canGuaranteeTCO(CalleeCC) && CCMatch)
2594       return true;
2595     return false;
2596   }
2597 
2598   // TODO: Can we handle var args?
2599   if (IsVarArg)
2600     return false;
2601 
2602   for (const Argument &Arg : CallerF.args()) {
2603     if (Arg.hasByValAttr())
2604       return false;
2605   }
2606 
2607   LLVMContext &Ctx = *DAG.getContext();
2608 
2609   // Check that the call results are passed in the same way.
2610   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2611                                   CCAssignFnForCall(CalleeCC, IsVarArg),
2612                                   CCAssignFnForCall(CallerCC, IsVarArg)))
2613     return false;
2614 
2615   // The callee has to preserve all registers the caller needs to preserve.
2616   if (!CCMatch) {
2617     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2618     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2619       return false;
2620   }
2621 
2622   // Nothing more to check if the callee is taking no arguments.
2623   if (Outs.empty())
2624     return true;
2625 
2626   SmallVector<CCValAssign, 16> ArgLocs;
2627   CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2628 
2629   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2630 
2631   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2632   // If the stack arguments for this call do not fit into our own save area then
2633   // the call cannot be made tail.
2634   // TODO: Is this really necessary?
2635   if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2636     return false;
2637 
2638   const MachineRegisterInfo &MRI = MF.getRegInfo();
2639   return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2640 }
2641 
2642 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2643   if (!CI->isTailCall())
2644     return false;
2645 
2646   const Function *ParentFn = CI->getParent()->getParent();
2647   if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2648     return false;
2649   return true;
2650 }
2651 
2652 // The wave scratch offset register is used as the global base pointer.
2653 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2654                                     SmallVectorImpl<SDValue> &InVals) const {
2655   SelectionDAG &DAG = CLI.DAG;
2656   const SDLoc &DL = CLI.DL;
2657   SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2658   SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2659   SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2660   SDValue Chain = CLI.Chain;
2661   SDValue Callee = CLI.Callee;
2662   bool &IsTailCall = CLI.IsTailCall;
2663   CallingConv::ID CallConv = CLI.CallConv;
2664   bool IsVarArg = CLI.IsVarArg;
2665   bool IsSibCall = false;
2666   bool IsThisReturn = false;
2667   MachineFunction &MF = DAG.getMachineFunction();
2668 
2669   if (Callee.isUndef() || isNullConstant(Callee)) {
2670     if (!CLI.IsTailCall) {
2671       for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
2672         InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
2673     }
2674 
2675     return Chain;
2676   }
2677 
2678   if (IsVarArg) {
2679     return lowerUnhandledCall(CLI, InVals,
2680                               "unsupported call to variadic function ");
2681   }
2682 
2683   if (!CLI.CS.getInstruction())
2684     report_fatal_error("unsupported libcall legalization");
2685 
2686   if (!CLI.CS.getCalledFunction()) {
2687     return lowerUnhandledCall(CLI, InVals,
2688                               "unsupported indirect call to function ");
2689   }
2690 
2691   if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2692     return lowerUnhandledCall(CLI, InVals,
2693                               "unsupported required tail call to function ");
2694   }
2695 
2696   if (AMDGPU::isShader(MF.getFunction().getCallingConv())) {
2697     // Note the issue is with the CC of the calling function, not of the call
2698     // itself.
2699     return lowerUnhandledCall(CLI, InVals,
2700                           "unsupported call from graphics shader of function ");
2701   }
2702 
2703   if (IsTailCall) {
2704     IsTailCall = isEligibleForTailCallOptimization(
2705       Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2706     if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2707       report_fatal_error("failed to perform tail call elimination on a call "
2708                          "site marked musttail");
2709     }
2710 
2711     bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2712 
2713     // A sibling call is one where we're under the usual C ABI and not planning
2714     // to change that but can still do a tail call:
2715     if (!TailCallOpt && IsTailCall)
2716       IsSibCall = true;
2717 
2718     if (IsTailCall)
2719       ++NumTailCalls;
2720   }
2721 
2722   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2723 
2724   // Analyze operands of the call, assigning locations to each operand.
2725   SmallVector<CCValAssign, 16> ArgLocs;
2726   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2727   CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2728 
2729   CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2730 
2731   // Get a count of how many bytes are to be pushed on the stack.
2732   unsigned NumBytes = CCInfo.getNextStackOffset();
2733 
2734   if (IsSibCall) {
2735     // Since we're not changing the ABI to make this a tail call, the memory
2736     // operands are already available in the caller's incoming argument space.
2737     NumBytes = 0;
2738   }
2739 
2740   // FPDiff is the byte offset of the call's argument area from the callee's.
2741   // Stores to callee stack arguments will be placed in FixedStackSlots offset
2742   // by this amount for a tail call. In a sibling call it must be 0 because the
2743   // caller will deallocate the entire stack and the callee still expects its
2744   // arguments to begin at SP+0. Completely unused for non-tail calls.
2745   int32_t FPDiff = 0;
2746   MachineFrameInfo &MFI = MF.getFrameInfo();
2747   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2748 
2749   // Adjust the stack pointer for the new arguments...
2750   // These operations are automatically eliminated by the prolog/epilog pass
2751   if (!IsSibCall) {
2752     Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2753 
2754     SmallVector<SDValue, 4> CopyFromChains;
2755 
2756     // In the HSA case, this should be an identity copy.
2757     SDValue ScratchRSrcReg
2758       = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2759     RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2760     CopyFromChains.push_back(ScratchRSrcReg.getValue(1));
2761     Chain = DAG.getTokenFactor(DL, CopyFromChains);
2762   }
2763 
2764   SmallVector<SDValue, 8> MemOpChains;
2765   MVT PtrVT = MVT::i32;
2766 
2767   // Walk the register/memloc assignments, inserting copies/loads.
2768   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2769     CCValAssign &VA = ArgLocs[i];
2770     SDValue Arg = OutVals[i];
2771 
2772     // Promote the value if needed.
2773     switch (VA.getLocInfo()) {
2774     case CCValAssign::Full:
2775       break;
2776     case CCValAssign::BCvt:
2777       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2778       break;
2779     case CCValAssign::ZExt:
2780       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2781       break;
2782     case CCValAssign::SExt:
2783       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2784       break;
2785     case CCValAssign::AExt:
2786       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2787       break;
2788     case CCValAssign::FPExt:
2789       Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2790       break;
2791     default:
2792       llvm_unreachable("Unknown loc info!");
2793     }
2794 
2795     if (VA.isRegLoc()) {
2796       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2797     } else {
2798       assert(VA.isMemLoc());
2799 
2800       SDValue DstAddr;
2801       MachinePointerInfo DstInfo;
2802 
2803       unsigned LocMemOffset = VA.getLocMemOffset();
2804       int32_t Offset = LocMemOffset;
2805 
2806       SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT);
2807       MaybeAlign Alignment;
2808 
2809       if (IsTailCall) {
2810         ISD::ArgFlagsTy Flags = Outs[i].Flags;
2811         unsigned OpSize = Flags.isByVal() ?
2812           Flags.getByValSize() : VA.getValVT().getStoreSize();
2813 
2814         // FIXME: We can have better than the minimum byval required alignment.
2815         Alignment =
2816             Flags.isByVal()
2817                 ? Flags.getNonZeroByValAlign()
2818                 : commonAlignment(Subtarget->getStackAlignment(), Offset);
2819 
2820         Offset = Offset + FPDiff;
2821         int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2822 
2823         DstAddr = DAG.getFrameIndex(FI, PtrVT);
2824         DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2825 
2826         // Make sure any stack arguments overlapping with where we're storing
2827         // are loaded before this eventual operation. Otherwise they'll be
2828         // clobbered.
2829 
2830         // FIXME: Why is this really necessary? This seems to just result in a
2831         // lot of code to copy the stack and write them back to the same
2832         // locations, which are supposed to be immutable?
2833         Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2834       } else {
2835         DstAddr = PtrOff;
2836         DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2837         Alignment =
2838             commonAlignment(Subtarget->getStackAlignment(), LocMemOffset);
2839       }
2840 
2841       if (Outs[i].Flags.isByVal()) {
2842         SDValue SizeNode =
2843             DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2844         SDValue Cpy =
2845             DAG.getMemcpy(Chain, DL, DstAddr, Arg, SizeNode,
2846                           Outs[i].Flags.getNonZeroByValAlign(),
2847                           /*isVol = */ false, /*AlwaysInline = */ true,
2848                           /*isTailCall = */ false, DstInfo,
2849                           MachinePointerInfo(AMDGPUAS::PRIVATE_ADDRESS));
2850 
2851         MemOpChains.push_back(Cpy);
2852       } else {
2853         SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo,
2854                                      Alignment ? Alignment->value() : 0);
2855         MemOpChains.push_back(Store);
2856       }
2857     }
2858   }
2859 
2860   // Copy special input registers after user input arguments.
2861   passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
2862 
2863   if (!MemOpChains.empty())
2864     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2865 
2866   // Build a sequence of copy-to-reg nodes chained together with token chain
2867   // and flag operands which copy the outgoing args into the appropriate regs.
2868   SDValue InFlag;
2869   for (auto &RegToPass : RegsToPass) {
2870     Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2871                              RegToPass.second, InFlag);
2872     InFlag = Chain.getValue(1);
2873   }
2874 
2875 
2876   SDValue PhysReturnAddrReg;
2877   if (IsTailCall) {
2878     // Since the return is being combined with the call, we need to pass on the
2879     // return address.
2880 
2881     const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2882     SDValue ReturnAddrReg = CreateLiveInRegister(
2883       DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2884 
2885     PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2886                                         MVT::i64);
2887     Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2888     InFlag = Chain.getValue(1);
2889   }
2890 
2891   // We don't usually want to end the call-sequence here because we would tidy
2892   // the frame up *after* the call, however in the ABI-changing tail-call case
2893   // we've carefully laid out the parameters so that when sp is reset they'll be
2894   // in the correct location.
2895   if (IsTailCall && !IsSibCall) {
2896     Chain = DAG.getCALLSEQ_END(Chain,
2897                                DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2898                                DAG.getTargetConstant(0, DL, MVT::i32),
2899                                InFlag, DL);
2900     InFlag = Chain.getValue(1);
2901   }
2902 
2903   std::vector<SDValue> Ops;
2904   Ops.push_back(Chain);
2905   Ops.push_back(Callee);
2906   // Add a redundant copy of the callee global which will not be legalized, as
2907   // we need direct access to the callee later.
2908   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee);
2909   const GlobalValue *GV = GSD->getGlobal();
2910   Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
2911 
2912   if (IsTailCall) {
2913     // Each tail call may have to adjust the stack by a different amount, so
2914     // this information must travel along with the operation for eventual
2915     // consumption by emitEpilogue.
2916     Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
2917 
2918     Ops.push_back(PhysReturnAddrReg);
2919   }
2920 
2921   // Add argument registers to the end of the list so that they are known live
2922   // into the call.
2923   for (auto &RegToPass : RegsToPass) {
2924     Ops.push_back(DAG.getRegister(RegToPass.first,
2925                                   RegToPass.second.getValueType()));
2926   }
2927 
2928   // Add a register mask operand representing the call-preserved registers.
2929 
2930   auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
2931   const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2932   assert(Mask && "Missing call preserved mask for calling convention");
2933   Ops.push_back(DAG.getRegisterMask(Mask));
2934 
2935   if (InFlag.getNode())
2936     Ops.push_back(InFlag);
2937 
2938   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2939 
2940   // If we're doing a tall call, use a TC_RETURN here rather than an
2941   // actual call instruction.
2942   if (IsTailCall) {
2943     MFI.setHasTailCall();
2944     return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
2945   }
2946 
2947   // Returns a chain and a flag for retval copy to use.
2948   SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2949   Chain = Call.getValue(0);
2950   InFlag = Call.getValue(1);
2951 
2952   uint64_t CalleePopBytes = NumBytes;
2953   Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
2954                              DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2955                              InFlag, DL);
2956   if (!Ins.empty())
2957     InFlag = Chain.getValue(1);
2958 
2959   // Handle result values, copying them out of physregs into vregs that we
2960   // return.
2961   return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2962                          InVals, IsThisReturn,
2963                          IsThisReturn ? OutVals[0] : SDValue());
2964 }
2965 
2966 Register SITargetLowering::getRegisterByName(const char* RegName, LLT VT,
2967                                              const MachineFunction &MF) const {
2968   Register Reg = StringSwitch<Register>(RegName)
2969     .Case("m0", AMDGPU::M0)
2970     .Case("exec", AMDGPU::EXEC)
2971     .Case("exec_lo", AMDGPU::EXEC_LO)
2972     .Case("exec_hi", AMDGPU::EXEC_HI)
2973     .Case("flat_scratch", AMDGPU::FLAT_SCR)
2974     .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2975     .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2976     .Default(Register());
2977 
2978   if (Reg == AMDGPU::NoRegister) {
2979     report_fatal_error(Twine("invalid register name \""
2980                              + StringRef(RegName)  + "\"."));
2981 
2982   }
2983 
2984   if (!Subtarget->hasFlatScrRegister() &&
2985        Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2986     report_fatal_error(Twine("invalid register \""
2987                              + StringRef(RegName)  + "\" for subtarget."));
2988   }
2989 
2990   switch (Reg) {
2991   case AMDGPU::M0:
2992   case AMDGPU::EXEC_LO:
2993   case AMDGPU::EXEC_HI:
2994   case AMDGPU::FLAT_SCR_LO:
2995   case AMDGPU::FLAT_SCR_HI:
2996     if (VT.getSizeInBits() == 32)
2997       return Reg;
2998     break;
2999   case AMDGPU::EXEC:
3000   case AMDGPU::FLAT_SCR:
3001     if (VT.getSizeInBits() == 64)
3002       return Reg;
3003     break;
3004   default:
3005     llvm_unreachable("missing register type checking");
3006   }
3007 
3008   report_fatal_error(Twine("invalid type for register \""
3009                            + StringRef(RegName) + "\"."));
3010 }
3011 
3012 // If kill is not the last instruction, split the block so kill is always a
3013 // proper terminator.
3014 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
3015                                                     MachineBasicBlock *BB) const {
3016   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3017 
3018   MachineBasicBlock::iterator SplitPoint(&MI);
3019   ++SplitPoint;
3020 
3021   if (SplitPoint == BB->end()) {
3022     // Don't bother with a new block.
3023     MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
3024     return BB;
3025   }
3026 
3027   MachineFunction *MF = BB->getParent();
3028   MachineBasicBlock *SplitBB
3029     = MF->CreateMachineBasicBlock(BB->getBasicBlock());
3030 
3031   MF->insert(++MachineFunction::iterator(BB), SplitBB);
3032   SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
3033 
3034   SplitBB->transferSuccessorsAndUpdatePHIs(BB);
3035   BB->addSuccessor(SplitBB);
3036 
3037   MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
3038   return SplitBB;
3039 }
3040 
3041 // Split block \p MBB at \p MI, as to insert a loop. If \p InstInLoop is true,
3042 // \p MI will be the only instruction in the loop body block. Otherwise, it will
3043 // be the first instruction in the remainder block.
3044 //
3045 /// \returns { LoopBody, Remainder }
3046 static std::pair<MachineBasicBlock *, MachineBasicBlock *>
3047 splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) {
3048   MachineFunction *MF = MBB.getParent();
3049   MachineBasicBlock::iterator I(&MI);
3050 
3051   // To insert the loop we need to split the block. Move everything after this
3052   // point to a new block, and insert a new empty block between the two.
3053   MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
3054   MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
3055   MachineFunction::iterator MBBI(MBB);
3056   ++MBBI;
3057 
3058   MF->insert(MBBI, LoopBB);
3059   MF->insert(MBBI, RemainderBB);
3060 
3061   LoopBB->addSuccessor(LoopBB);
3062   LoopBB->addSuccessor(RemainderBB);
3063 
3064   // Move the rest of the block into a new block.
3065   RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
3066 
3067   if (InstInLoop) {
3068     auto Next = std::next(I);
3069 
3070     // Move instruction to loop body.
3071     LoopBB->splice(LoopBB->begin(), &MBB, I, Next);
3072 
3073     // Move the rest of the block.
3074     RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end());
3075   } else {
3076     RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
3077   }
3078 
3079   MBB.addSuccessor(LoopBB);
3080 
3081   return std::make_pair(LoopBB, RemainderBB);
3082 }
3083 
3084 /// Insert \p MI into a BUNDLE with an S_WAITCNT 0 immediately following it.
3085 void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const {
3086   MachineBasicBlock *MBB = MI.getParent();
3087   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3088   auto I = MI.getIterator();
3089   auto E = std::next(I);
3090 
3091   BuildMI(*MBB, E, MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT))
3092     .addImm(0);
3093 
3094   MIBundleBuilder Bundler(*MBB, I, E);
3095   finalizeBundle(*MBB, Bundler.begin());
3096 }
3097 
3098 MachineBasicBlock *
3099 SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI,
3100                                          MachineBasicBlock *BB) const {
3101   const DebugLoc &DL = MI.getDebugLoc();
3102 
3103   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3104 
3105   MachineBasicBlock *LoopBB;
3106   MachineBasicBlock *RemainderBB;
3107   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3108 
3109   // Apparently kill flags are only valid if the def is in the same block?
3110   if (MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0))
3111     Src->setIsKill(false);
3112 
3113   std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true);
3114 
3115   MachineBasicBlock::iterator I = LoopBB->end();
3116 
3117   const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg(
3118     AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1);
3119 
3120   // Clear TRAP_STS.MEM_VIOL
3121   BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32))
3122     .addImm(0)
3123     .addImm(EncodedReg);
3124 
3125   bundleInstWithWaitcnt(MI);
3126 
3127   Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3128 
3129   // Load and check TRAP_STS.MEM_VIOL
3130   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg)
3131     .addImm(EncodedReg);
3132 
3133   // FIXME: Do we need to use an isel pseudo that may clobber scc?
3134   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32))
3135     .addReg(Reg, RegState::Kill)
3136     .addImm(0);
3137   BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3138     .addMBB(LoopBB);
3139 
3140   return RemainderBB;
3141 }
3142 
3143 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
3144 // wavefront. If the value is uniform and just happens to be in a VGPR, this
3145 // will only do one iteration. In the worst case, this will loop 64 times.
3146 //
3147 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
3148 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
3149   const SIInstrInfo *TII,
3150   MachineRegisterInfo &MRI,
3151   MachineBasicBlock &OrigBB,
3152   MachineBasicBlock &LoopBB,
3153   const DebugLoc &DL,
3154   const MachineOperand &IdxReg,
3155   unsigned InitReg,
3156   unsigned ResultReg,
3157   unsigned PhiReg,
3158   unsigned InitSaveExecReg,
3159   int Offset,
3160   bool UseGPRIdxMode,
3161   bool IsIndirectSrc) {
3162   MachineFunction *MF = OrigBB.getParent();
3163   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3164   const SIRegisterInfo *TRI = ST.getRegisterInfo();
3165   MachineBasicBlock::iterator I = LoopBB.begin();
3166 
3167   const TargetRegisterClass *BoolRC = TRI->getBoolRC();
3168   Register PhiExec = MRI.createVirtualRegister(BoolRC);
3169   Register NewExec = MRI.createVirtualRegister(BoolRC);
3170   Register CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3171   Register CondReg = MRI.createVirtualRegister(BoolRC);
3172 
3173   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
3174     .addReg(InitReg)
3175     .addMBB(&OrigBB)
3176     .addReg(ResultReg)
3177     .addMBB(&LoopBB);
3178 
3179   BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
3180     .addReg(InitSaveExecReg)
3181     .addMBB(&OrigBB)
3182     .addReg(NewExec)
3183     .addMBB(&LoopBB);
3184 
3185   // Read the next variant <- also loop target.
3186   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
3187     .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
3188 
3189   // Compare the just read M0 value to all possible Idx values.
3190   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
3191     .addReg(CurrentIdxReg)
3192     .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
3193 
3194   // Update EXEC, save the original EXEC value to VCC.
3195   BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32
3196                                                 : AMDGPU::S_AND_SAVEEXEC_B64),
3197           NewExec)
3198     .addReg(CondReg, RegState::Kill);
3199 
3200   MRI.setSimpleHint(NewExec, CondReg);
3201 
3202   if (UseGPRIdxMode) {
3203     unsigned IdxReg;
3204     if (Offset == 0) {
3205       IdxReg = CurrentIdxReg;
3206     } else {
3207       IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3208       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
3209         .addReg(CurrentIdxReg, RegState::Kill)
3210         .addImm(Offset);
3211     }
3212     unsigned IdxMode = IsIndirectSrc ?
3213       AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
3214     MachineInstr *SetOn =
3215       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3216       .addReg(IdxReg, RegState::Kill)
3217       .addImm(IdxMode);
3218     SetOn->getOperand(3).setIsUndef();
3219   } else {
3220     // Move index from VCC into M0
3221     if (Offset == 0) {
3222       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3223         .addReg(CurrentIdxReg, RegState::Kill);
3224     } else {
3225       BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3226         .addReg(CurrentIdxReg, RegState::Kill)
3227         .addImm(Offset);
3228     }
3229   }
3230 
3231   // Update EXEC, switch all done bits to 0 and all todo bits to 1.
3232   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3233   MachineInstr *InsertPt =
3234     BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term
3235                                                   : AMDGPU::S_XOR_B64_term), Exec)
3236       .addReg(Exec)
3237       .addReg(NewExec);
3238 
3239   // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
3240   // s_cbranch_scc0?
3241 
3242   // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
3243   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
3244     .addMBB(&LoopBB);
3245 
3246   return InsertPt->getIterator();
3247 }
3248 
3249 // This has slightly sub-optimal regalloc when the source vector is killed by
3250 // the read. The register allocator does not understand that the kill is
3251 // per-workitem, so is kept alive for the whole loop so we end up not re-using a
3252 // subregister from it, using 1 more VGPR than necessary. This was saved when
3253 // this was expanded after register allocation.
3254 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
3255                                                   MachineBasicBlock &MBB,
3256                                                   MachineInstr &MI,
3257                                                   unsigned InitResultReg,
3258                                                   unsigned PhiReg,
3259                                                   int Offset,
3260                                                   bool UseGPRIdxMode,
3261                                                   bool IsIndirectSrc) {
3262   MachineFunction *MF = MBB.getParent();
3263   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3264   const SIRegisterInfo *TRI = ST.getRegisterInfo();
3265   MachineRegisterInfo &MRI = MF->getRegInfo();
3266   const DebugLoc &DL = MI.getDebugLoc();
3267   MachineBasicBlock::iterator I(&MI);
3268 
3269   const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
3270   Register DstReg = MI.getOperand(0).getReg();
3271   Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
3272   Register TmpExec = MRI.createVirtualRegister(BoolXExecRC);
3273   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3274   unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
3275 
3276   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
3277 
3278   // Save the EXEC mask
3279   BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec)
3280     .addReg(Exec);
3281 
3282   MachineBasicBlock *LoopBB;
3283   MachineBasicBlock *RemainderBB;
3284   std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false);
3285 
3286   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3287 
3288   auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
3289                                       InitResultReg, DstReg, PhiReg, TmpExec,
3290                                       Offset, UseGPRIdxMode, IsIndirectSrc);
3291 
3292   MachineBasicBlock::iterator First = RemainderBB->begin();
3293   BuildMI(*RemainderBB, First, DL, TII->get(MovExecOpc), Exec)
3294     .addReg(SaveExec);
3295 
3296   return InsPt;
3297 }
3298 
3299 // Returns subreg index, offset
3300 static std::pair<unsigned, int>
3301 computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
3302                             const TargetRegisterClass *SuperRC,
3303                             unsigned VecReg,
3304                             int Offset) {
3305   int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
3306 
3307   // Skip out of bounds offsets, or else we would end up using an undefined
3308   // register.
3309   if (Offset >= NumElts || Offset < 0)
3310     return std::make_pair(AMDGPU::sub0, Offset);
3311 
3312   return std::make_pair(SIRegisterInfo::getSubRegFromChannel(Offset), 0);
3313 }
3314 
3315 // Return true if the index is an SGPR and was set.
3316 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
3317                                  MachineRegisterInfo &MRI,
3318                                  MachineInstr &MI,
3319                                  int Offset,
3320                                  bool UseGPRIdxMode,
3321                                  bool IsIndirectSrc) {
3322   MachineBasicBlock *MBB = MI.getParent();
3323   const DebugLoc &DL = MI.getDebugLoc();
3324   MachineBasicBlock::iterator I(&MI);
3325 
3326   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3327   const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
3328 
3329   assert(Idx->getReg() != AMDGPU::NoRegister);
3330 
3331   if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
3332     return false;
3333 
3334   if (UseGPRIdxMode) {
3335     unsigned IdxMode = IsIndirectSrc ?
3336       AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
3337     if (Offset == 0) {
3338       MachineInstr *SetOn =
3339           BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3340               .add(*Idx)
3341               .addImm(IdxMode);
3342 
3343       SetOn->getOperand(3).setIsUndef();
3344     } else {
3345       Register Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3346       BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
3347           .add(*Idx)
3348           .addImm(Offset);
3349       MachineInstr *SetOn =
3350         BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3351         .addReg(Tmp, RegState::Kill)
3352         .addImm(IdxMode);
3353 
3354       SetOn->getOperand(3).setIsUndef();
3355     }
3356 
3357     return true;
3358   }
3359 
3360   if (Offset == 0) {
3361     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3362       .add(*Idx);
3363   } else {
3364     BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3365       .add(*Idx)
3366       .addImm(Offset);
3367   }
3368 
3369   return true;
3370 }
3371 
3372 // Control flow needs to be inserted if indexing with a VGPR.
3373 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
3374                                           MachineBasicBlock &MBB,
3375                                           const GCNSubtarget &ST) {
3376   const SIInstrInfo *TII = ST.getInstrInfo();
3377   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3378   MachineFunction *MF = MBB.getParent();
3379   MachineRegisterInfo &MRI = MF->getRegInfo();
3380 
3381   Register Dst = MI.getOperand(0).getReg();
3382   Register SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
3383   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3384 
3385   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
3386 
3387   unsigned SubReg;
3388   std::tie(SubReg, Offset)
3389     = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
3390 
3391   const bool UseGPRIdxMode = ST.useVGPRIndexMode();
3392 
3393   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
3394     MachineBasicBlock::iterator I(&MI);
3395     const DebugLoc &DL = MI.getDebugLoc();
3396 
3397     if (UseGPRIdxMode) {
3398       // TODO: Look at the uses to avoid the copy. This may require rescheduling
3399       // to avoid interfering with other uses, so probably requires a new
3400       // optimization pass.
3401       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3402         .addReg(SrcReg, RegState::Undef, SubReg)
3403         .addReg(SrcReg, RegState::Implicit)
3404         .addReg(AMDGPU::M0, RegState::Implicit);
3405       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3406     } else {
3407       BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3408         .addReg(SrcReg, RegState::Undef, SubReg)
3409         .addReg(SrcReg, RegState::Implicit);
3410     }
3411 
3412     MI.eraseFromParent();
3413 
3414     return &MBB;
3415   }
3416 
3417   const DebugLoc &DL = MI.getDebugLoc();
3418   MachineBasicBlock::iterator I(&MI);
3419 
3420   Register PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3421   Register InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3422 
3423   BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3424 
3425   auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3426                               Offset, UseGPRIdxMode, true);
3427   MachineBasicBlock *LoopBB = InsPt->getParent();
3428 
3429   if (UseGPRIdxMode) {
3430     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3431       .addReg(SrcReg, RegState::Undef, SubReg)
3432       .addReg(SrcReg, RegState::Implicit)
3433       .addReg(AMDGPU::M0, RegState::Implicit);
3434     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3435   } else {
3436     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3437       .addReg(SrcReg, RegState::Undef, SubReg)
3438       .addReg(SrcReg, RegState::Implicit);
3439   }
3440 
3441   MI.eraseFromParent();
3442 
3443   return LoopBB;
3444 }
3445 
3446 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3447                                           MachineBasicBlock &MBB,
3448                                           const GCNSubtarget &ST) {
3449   const SIInstrInfo *TII = ST.getInstrInfo();
3450   const SIRegisterInfo &TRI = TII->getRegisterInfo();
3451   MachineFunction *MF = MBB.getParent();
3452   MachineRegisterInfo &MRI = MF->getRegInfo();
3453 
3454   Register Dst = MI.getOperand(0).getReg();
3455   const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3456   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3457   const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3458   int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3459   const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3460 
3461   // This can be an immediate, but will be folded later.
3462   assert(Val->getReg());
3463 
3464   unsigned SubReg;
3465   std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3466                                                          SrcVec->getReg(),
3467                                                          Offset);
3468   const bool UseGPRIdxMode = ST.useVGPRIndexMode();
3469 
3470   if (Idx->getReg() == AMDGPU::NoRegister) {
3471     MachineBasicBlock::iterator I(&MI);
3472     const DebugLoc &DL = MI.getDebugLoc();
3473 
3474     assert(Offset == 0);
3475 
3476     BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
3477         .add(*SrcVec)
3478         .add(*Val)
3479         .addImm(SubReg);
3480 
3481     MI.eraseFromParent();
3482     return &MBB;
3483   }
3484 
3485   const MCInstrDesc &MovRelDesc
3486     = TII->getIndirectRegWritePseudo(TRI.getRegSizeInBits(*VecRC), 32, false);
3487 
3488   if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
3489     MachineBasicBlock::iterator I(&MI);
3490     const DebugLoc &DL = MI.getDebugLoc();
3491     BuildMI(MBB, I, DL, MovRelDesc, Dst)
3492       .addReg(SrcVec->getReg())
3493       .add(*Val)
3494       .addImm(SubReg);
3495     if (UseGPRIdxMode)
3496       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3497 
3498     MI.eraseFromParent();
3499     return &MBB;
3500   }
3501 
3502   if (Val->isReg())
3503     MRI.clearKillFlags(Val->getReg());
3504 
3505   const DebugLoc &DL = MI.getDebugLoc();
3506 
3507   Register PhiReg = MRI.createVirtualRegister(VecRC);
3508 
3509   auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
3510                               Offset, UseGPRIdxMode, false);
3511   MachineBasicBlock *LoopBB = InsPt->getParent();
3512 
3513   BuildMI(*LoopBB, InsPt, DL, MovRelDesc, Dst)
3514     .addReg(PhiReg)
3515     .add(*Val)
3516     .addImm(AMDGPU::sub0);
3517   if (UseGPRIdxMode)
3518     BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3519 
3520   MI.eraseFromParent();
3521   return LoopBB;
3522 }
3523 
3524 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3525   MachineInstr &MI, MachineBasicBlock *BB) const {
3526 
3527   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3528   MachineFunction *MF = BB->getParent();
3529   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3530 
3531   if (TII->isMIMG(MI)) {
3532     if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3533       report_fatal_error("missing mem operand from MIMG instruction");
3534     }
3535     // Add a memoperand for mimg instructions so that they aren't assumed to
3536     // be ordered memory instuctions.
3537 
3538     return BB;
3539   }
3540 
3541   switch (MI.getOpcode()) {
3542   case AMDGPU::S_ADD_U64_PSEUDO:
3543   case AMDGPU::S_SUB_U64_PSEUDO: {
3544     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3545     const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3546     const SIRegisterInfo *TRI = ST.getRegisterInfo();
3547     const TargetRegisterClass *BoolRC = TRI->getBoolRC();
3548     const DebugLoc &DL = MI.getDebugLoc();
3549 
3550     MachineOperand &Dest = MI.getOperand(0);
3551     MachineOperand &Src0 = MI.getOperand(1);
3552     MachineOperand &Src1 = MI.getOperand(2);
3553 
3554     Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3555     Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3556 
3557     MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3558      Src0, BoolRC, AMDGPU::sub0,
3559      &AMDGPU::SReg_32RegClass);
3560     MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3561       Src0, BoolRC, AMDGPU::sub1,
3562       &AMDGPU::SReg_32RegClass);
3563 
3564     MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3565       Src1, BoolRC, AMDGPU::sub0,
3566       &AMDGPU::SReg_32RegClass);
3567     MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3568       Src1, BoolRC, AMDGPU::sub1,
3569       &AMDGPU::SReg_32RegClass);
3570 
3571     bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3572 
3573     unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3574     unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3575     BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3576       .add(Src0Sub0)
3577       .add(Src1Sub0);
3578     BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3579       .add(Src0Sub1)
3580       .add(Src1Sub1);
3581     BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3582       .addReg(DestSub0)
3583       .addImm(AMDGPU::sub0)
3584       .addReg(DestSub1)
3585       .addImm(AMDGPU::sub1);
3586     MI.eraseFromParent();
3587     return BB;
3588   }
3589   case AMDGPU::SI_INIT_M0: {
3590     BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
3591             TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3592         .add(MI.getOperand(0));
3593     MI.eraseFromParent();
3594     return BB;
3595   }
3596   case AMDGPU::SI_INIT_EXEC:
3597     // This should be before all vector instructions.
3598     BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3599             AMDGPU::EXEC)
3600         .addImm(MI.getOperand(0).getImm());
3601     MI.eraseFromParent();
3602     return BB;
3603 
3604   case AMDGPU::SI_INIT_EXEC_LO:
3605     // This should be before all vector instructions.
3606     BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B32),
3607             AMDGPU::EXEC_LO)
3608         .addImm(MI.getOperand(0).getImm());
3609     MI.eraseFromParent();
3610     return BB;
3611 
3612   case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3613     // Extract the thread count from an SGPR input and set EXEC accordingly.
3614     // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3615     //
3616     // S_BFE_U32 count, input, {shift, 7}
3617     // S_BFM_B64 exec, count, 0
3618     // S_CMP_EQ_U32 count, 64
3619     // S_CMOV_B64 exec, -1
3620     MachineInstr *FirstMI = &*BB->begin();
3621     MachineRegisterInfo &MRI = MF->getRegInfo();
3622     Register InputReg = MI.getOperand(0).getReg();
3623     Register CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3624     bool Found = false;
3625 
3626     // Move the COPY of the input reg to the beginning, so that we can use it.
3627     for (auto I = BB->begin(); I != &MI; I++) {
3628       if (I->getOpcode() != TargetOpcode::COPY ||
3629           I->getOperand(0).getReg() != InputReg)
3630         continue;
3631 
3632       if (I == FirstMI) {
3633         FirstMI = &*++BB->begin();
3634       } else {
3635         I->removeFromParent();
3636         BB->insert(FirstMI, &*I);
3637       }
3638       Found = true;
3639       break;
3640     }
3641     assert(Found);
3642     (void)Found;
3643 
3644     // This should be before all vector instructions.
3645     unsigned Mask = (getSubtarget()->getWavefrontSize() << 1) - 1;
3646     bool isWave32 = getSubtarget()->isWave32();
3647     unsigned Exec = isWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3648     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3649         .addReg(InputReg)
3650         .addImm((MI.getOperand(1).getImm() & Mask) | 0x70000);
3651     BuildMI(*BB, FirstMI, DebugLoc(),
3652             TII->get(isWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64),
3653             Exec)
3654         .addReg(CountReg)
3655         .addImm(0);
3656     BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3657         .addReg(CountReg, RegState::Kill)
3658         .addImm(getSubtarget()->getWavefrontSize());
3659     BuildMI(*BB, FirstMI, DebugLoc(),
3660             TII->get(isWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64),
3661             Exec)
3662         .addImm(-1);
3663     MI.eraseFromParent();
3664     return BB;
3665   }
3666 
3667   case AMDGPU::GET_GROUPSTATICSIZE: {
3668     assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||
3669            getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL);
3670     DebugLoc DL = MI.getDebugLoc();
3671     BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
3672         .add(MI.getOperand(0))
3673         .addImm(MFI->getLDSSize());
3674     MI.eraseFromParent();
3675     return BB;
3676   }
3677   case AMDGPU::SI_INDIRECT_SRC_V1:
3678   case AMDGPU::SI_INDIRECT_SRC_V2:
3679   case AMDGPU::SI_INDIRECT_SRC_V4:
3680   case AMDGPU::SI_INDIRECT_SRC_V8:
3681   case AMDGPU::SI_INDIRECT_SRC_V16:
3682     return emitIndirectSrc(MI, *BB, *getSubtarget());
3683   case AMDGPU::SI_INDIRECT_DST_V1:
3684   case AMDGPU::SI_INDIRECT_DST_V2:
3685   case AMDGPU::SI_INDIRECT_DST_V4:
3686   case AMDGPU::SI_INDIRECT_DST_V8:
3687   case AMDGPU::SI_INDIRECT_DST_V16:
3688     return emitIndirectDst(MI, *BB, *getSubtarget());
3689   case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3690   case AMDGPU::SI_KILL_I1_PSEUDO:
3691     return splitKillBlock(MI, BB);
3692   case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3693     MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3694     const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3695     const SIRegisterInfo *TRI = ST.getRegisterInfo();
3696 
3697     Register Dst = MI.getOperand(0).getReg();
3698     Register Src0 = MI.getOperand(1).getReg();
3699     Register Src1 = MI.getOperand(2).getReg();
3700     const DebugLoc &DL = MI.getDebugLoc();
3701     Register SrcCond = MI.getOperand(3).getReg();
3702 
3703     Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3704     Register DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3705     const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
3706     Register SrcCondCopy = MRI.createVirtualRegister(CondRC);
3707 
3708     BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3709       .addReg(SrcCond);
3710     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3711       .addImm(0)
3712       .addReg(Src0, 0, AMDGPU::sub0)
3713       .addImm(0)
3714       .addReg(Src1, 0, AMDGPU::sub0)
3715       .addReg(SrcCondCopy);
3716     BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3717       .addImm(0)
3718       .addReg(Src0, 0, AMDGPU::sub1)
3719       .addImm(0)
3720       .addReg(Src1, 0, AMDGPU::sub1)
3721       .addReg(SrcCondCopy);
3722 
3723     BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3724       .addReg(DstLo)
3725       .addImm(AMDGPU::sub0)
3726       .addReg(DstHi)
3727       .addImm(AMDGPU::sub1);
3728     MI.eraseFromParent();
3729     return BB;
3730   }
3731   case AMDGPU::SI_BR_UNDEF: {
3732     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3733     const DebugLoc &DL = MI.getDebugLoc();
3734     MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3735                            .add(MI.getOperand(0));
3736     Br->getOperand(1).setIsUndef(true); // read undef SCC
3737     MI.eraseFromParent();
3738     return BB;
3739   }
3740   case AMDGPU::ADJCALLSTACKUP:
3741   case AMDGPU::ADJCALLSTACKDOWN: {
3742     const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3743     MachineInstrBuilder MIB(*MF, &MI);
3744 
3745     // Add an implicit use of the frame offset reg to prevent the restore copy
3746     // inserted after the call from being reorderd after stack operations in the
3747     // the caller's frame.
3748     MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
3749         .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3750         .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
3751     return BB;
3752   }
3753   case AMDGPU::SI_CALL_ISEL: {
3754     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3755     const DebugLoc &DL = MI.getDebugLoc();
3756 
3757     unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
3758 
3759     MachineInstrBuilder MIB;
3760     MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
3761 
3762     for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
3763       MIB.add(MI.getOperand(I));
3764 
3765     MIB.cloneMemRefs(MI);
3766     MI.eraseFromParent();
3767     return BB;
3768   }
3769   case AMDGPU::V_ADD_I32_e32:
3770   case AMDGPU::V_SUB_I32_e32:
3771   case AMDGPU::V_SUBREV_I32_e32: {
3772     // TODO: Define distinct V_*_I32_Pseudo instructions instead.
3773     const DebugLoc &DL = MI.getDebugLoc();
3774     unsigned Opc = MI.getOpcode();
3775 
3776     bool NeedClampOperand = false;
3777     if (TII->pseudoToMCOpcode(Opc) == -1) {
3778       Opc = AMDGPU::getVOPe64(Opc);
3779       NeedClampOperand = true;
3780     }
3781 
3782     auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg());
3783     if (TII->isVOP3(*I)) {
3784       const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3785       const SIRegisterInfo *TRI = ST.getRegisterInfo();
3786       I.addReg(TRI->getVCC(), RegState::Define);
3787     }
3788     I.add(MI.getOperand(1))
3789      .add(MI.getOperand(2));
3790     if (NeedClampOperand)
3791       I.addImm(0); // clamp bit for e64 encoding
3792 
3793     TII->legalizeOperands(*I);
3794 
3795     MI.eraseFromParent();
3796     return BB;
3797   }
3798   case AMDGPU::DS_GWS_INIT:
3799   case AMDGPU::DS_GWS_SEMA_V:
3800   case AMDGPU::DS_GWS_SEMA_BR:
3801   case AMDGPU::DS_GWS_SEMA_P:
3802   case AMDGPU::DS_GWS_SEMA_RELEASE_ALL:
3803   case AMDGPU::DS_GWS_BARRIER:
3804     // A s_waitcnt 0 is required to be the instruction immediately following.
3805     if (getSubtarget()->hasGWSAutoReplay()) {
3806       bundleInstWithWaitcnt(MI);
3807       return BB;
3808     }
3809 
3810     return emitGWSMemViolTestLoop(MI, BB);
3811   default:
3812     return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
3813   }
3814 }
3815 
3816 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3817   return isTypeLegal(VT.getScalarType());
3818 }
3819 
3820 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3821   // This currently forces unfolding various combinations of fsub into fma with
3822   // free fneg'd operands. As long as we have fast FMA (controlled by
3823   // isFMAFasterThanFMulAndFAdd), we should perform these.
3824 
3825   // When fma is quarter rate, for f64 where add / sub are at best half rate,
3826   // most of these combines appear to be cycle neutral but save on instruction
3827   // count / code size.
3828   return true;
3829 }
3830 
3831 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3832                                          EVT VT) const {
3833   if (!VT.isVector()) {
3834     return MVT::i1;
3835   }
3836   return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
3837 }
3838 
3839 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3840   // TODO: Should i16 be used always if legal? For now it would force VALU
3841   // shifts.
3842   return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
3843 }
3844 
3845 // Answering this is somewhat tricky and depends on the specific device which
3846 // have different rates for fma or all f64 operations.
3847 //
3848 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3849 // regardless of which device (although the number of cycles differs between
3850 // devices), so it is always profitable for f64.
3851 //
3852 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3853 // only on full rate devices. Normally, we should prefer selecting v_mad_f32
3854 // which we can always do even without fused FP ops since it returns the same
3855 // result as the separate operations and since it is always full
3856 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3857 // however does not support denormals, so we do report fma as faster if we have
3858 // a fast fma device and require denormals.
3859 //
3860 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
3861                                                   EVT VT) const {
3862   VT = VT.getScalarType();
3863 
3864   switch (VT.getSimpleVT().SimpleTy) {
3865   case MVT::f32: {
3866     // This is as fast on some subtargets. However, we always have full rate f32
3867     // mad available which returns the same result as the separate operations
3868     // which we should prefer over fma. We can't use this if we want to support
3869     // denormals, so only report this in these cases.
3870     if (hasFP32Denormals(MF))
3871       return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3872 
3873     // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3874     return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3875   }
3876   case MVT::f64:
3877     return true;
3878   case MVT::f16:
3879     return Subtarget->has16BitInsts() && hasFP64FP16Denormals(MF);
3880   default:
3881     break;
3882   }
3883 
3884   return false;
3885 }
3886 
3887 bool SITargetLowering::isFMADLegalForFAddFSub(const SelectionDAG &DAG,
3888                                               const SDNode *N) const {
3889   // TODO: Check future ftz flag
3890   // v_mad_f32/v_mac_f32 do not support denormals.
3891   EVT VT = N->getValueType(0);
3892   if (VT == MVT::f32)
3893     return !hasFP32Denormals(DAG.getMachineFunction());
3894   if (VT == MVT::f16) {
3895     return Subtarget->hasMadF16() &&
3896            !hasFP64FP16Denormals(DAG.getMachineFunction());
3897   }
3898 
3899   return false;
3900 }
3901 
3902 //===----------------------------------------------------------------------===//
3903 // Custom DAG Lowering Operations
3904 //===----------------------------------------------------------------------===//
3905 
3906 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3907 // wider vector type is legal.
3908 SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
3909                                              SelectionDAG &DAG) const {
3910   unsigned Opc = Op.getOpcode();
3911   EVT VT = Op.getValueType();
3912   assert(VT == MVT::v4f16);
3913 
3914   SDValue Lo, Hi;
3915   std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3916 
3917   SDLoc SL(Op);
3918   SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3919                              Op->getFlags());
3920   SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3921                              Op->getFlags());
3922 
3923   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3924 }
3925 
3926 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3927 // wider vector type is legal.
3928 SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
3929                                               SelectionDAG &DAG) const {
3930   unsigned Opc = Op.getOpcode();
3931   EVT VT = Op.getValueType();
3932   assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3933 
3934   SDValue Lo0, Hi0;
3935   std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3936   SDValue Lo1, Hi1;
3937   std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3938 
3939   SDLoc SL(Op);
3940 
3941   SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3942                              Op->getFlags());
3943   SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3944                              Op->getFlags());
3945 
3946   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3947 }
3948 
3949 SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op,
3950                                               SelectionDAG &DAG) const {
3951   unsigned Opc = Op.getOpcode();
3952   EVT VT = Op.getValueType();
3953   assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3954 
3955   SDValue Lo0, Hi0;
3956   std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3957   SDValue Lo1, Hi1;
3958   std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3959   SDValue Lo2, Hi2;
3960   std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2);
3961 
3962   SDLoc SL(Op);
3963 
3964   SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, Lo2,
3965                              Op->getFlags());
3966   SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, Hi2,
3967                              Op->getFlags());
3968 
3969   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3970 }
3971 
3972 
3973 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3974   switch (Op.getOpcode()) {
3975   default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
3976   case ISD::BRCOND: return LowerBRCOND(Op, DAG);
3977   case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
3978   case ISD::LOAD: {
3979     SDValue Result = LowerLOAD(Op, DAG);
3980     assert((!Result.getNode() ||
3981             Result.getNode()->getNumValues() == 2) &&
3982            "Load should return a value and a chain");
3983     return Result;
3984   }
3985 
3986   case ISD::FSIN:
3987   case ISD::FCOS:
3988     return LowerTrig(Op, DAG);
3989   case ISD::SELECT: return LowerSELECT(Op, DAG);
3990   case ISD::FDIV: return LowerFDIV(Op, DAG);
3991   case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
3992   case ISD::STORE: return LowerSTORE(Op, DAG);
3993   case ISD::GlobalAddress: {
3994     MachineFunction &MF = DAG.getMachineFunction();
3995     SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3996     return LowerGlobalAddress(MFI, Op, DAG);
3997   }
3998   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3999   case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
4000   case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
4001   case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
4002   case ISD::INSERT_SUBVECTOR:
4003     return lowerINSERT_SUBVECTOR(Op, DAG);
4004   case ISD::INSERT_VECTOR_ELT:
4005     return lowerINSERT_VECTOR_ELT(Op, DAG);
4006   case ISD::EXTRACT_VECTOR_ELT:
4007     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
4008   case ISD::VECTOR_SHUFFLE:
4009     return lowerVECTOR_SHUFFLE(Op, DAG);
4010   case ISD::BUILD_VECTOR:
4011     return lowerBUILD_VECTOR(Op, DAG);
4012   case ISD::FP_ROUND:
4013     return lowerFP_ROUND(Op, DAG);
4014   case ISD::TRAP:
4015     return lowerTRAP(Op, DAG);
4016   case ISD::DEBUGTRAP:
4017     return lowerDEBUGTRAP(Op, DAG);
4018   case ISD::FABS:
4019   case ISD::FNEG:
4020   case ISD::FCANONICALIZE:
4021     return splitUnaryVectorOp(Op, DAG);
4022   case ISD::FMINNUM:
4023   case ISD::FMAXNUM:
4024     return lowerFMINNUM_FMAXNUM(Op, DAG);
4025   case ISD::FMA:
4026     return splitTernaryVectorOp(Op, DAG);
4027   case ISD::SHL:
4028   case ISD::SRA:
4029   case ISD::SRL:
4030   case ISD::ADD:
4031   case ISD::SUB:
4032   case ISD::MUL:
4033   case ISD::SMIN:
4034   case ISD::SMAX:
4035   case ISD::UMIN:
4036   case ISD::UMAX:
4037   case ISD::FADD:
4038   case ISD::FMUL:
4039   case ISD::FMINNUM_IEEE:
4040   case ISD::FMAXNUM_IEEE:
4041     return splitBinaryVectorOp(Op, DAG);
4042   }
4043   return SDValue();
4044 }
4045 
4046 static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
4047                                        const SDLoc &DL,
4048                                        SelectionDAG &DAG, bool Unpacked) {
4049   if (!LoadVT.isVector())
4050     return Result;
4051 
4052   if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
4053     // Truncate to v2i16/v4i16.
4054     EVT IntLoadVT = LoadVT.changeTypeToInteger();
4055 
4056     // Workaround legalizer not scalarizing truncate after vector op
4057     // legalization byt not creating intermediate vector trunc.
4058     SmallVector<SDValue, 4> Elts;
4059     DAG.ExtractVectorElements(Result, Elts);
4060     for (SDValue &Elt : Elts)
4061       Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
4062 
4063     Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
4064 
4065     // Bitcast to original type (v2f16/v4f16).
4066     return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
4067   }
4068 
4069   // Cast back to the original packed type.
4070   return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
4071 }
4072 
4073 SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
4074                                               MemSDNode *M,
4075                                               SelectionDAG &DAG,
4076                                               ArrayRef<SDValue> Ops,
4077                                               bool IsIntrinsic) const {
4078   SDLoc DL(M);
4079 
4080   bool Unpacked = Subtarget->hasUnpackedD16VMem();
4081   EVT LoadVT = M->getValueType(0);
4082 
4083   EVT EquivLoadVT = LoadVT;
4084   if (Unpacked && LoadVT.isVector()) {
4085     EquivLoadVT = LoadVT.isVector() ?
4086       EVT::getVectorVT(*DAG.getContext(), MVT::i32,
4087                        LoadVT.getVectorNumElements()) : LoadVT;
4088   }
4089 
4090   // Change from v4f16/v2f16 to EquivLoadVT.
4091   SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
4092 
4093   SDValue Load
4094     = DAG.getMemIntrinsicNode(
4095       IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
4096       VTList, Ops, M->getMemoryVT(),
4097       M->getMemOperand());
4098   if (!Unpacked) // Just adjusted the opcode.
4099     return Load;
4100 
4101   SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
4102 
4103   return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
4104 }
4105 
4106 SDValue SITargetLowering::lowerIntrinsicLoad(MemSDNode *M, bool IsFormat,
4107                                              SelectionDAG &DAG,
4108                                              ArrayRef<SDValue> Ops) const {
4109   SDLoc DL(M);
4110   EVT LoadVT = M->getValueType(0);
4111   EVT EltType = LoadVT.getScalarType();
4112   EVT IntVT = LoadVT.changeTypeToInteger();
4113 
4114   bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
4115 
4116   unsigned Opc =
4117       IsFormat ? AMDGPUISD::BUFFER_LOAD_FORMAT : AMDGPUISD::BUFFER_LOAD;
4118 
4119   if (IsD16) {
4120     return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops);
4121   }
4122 
4123   // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
4124   if (!IsD16 && !LoadVT.isVector() && EltType.getSizeInBits() < 32)
4125     return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
4126 
4127   if (isTypeLegal(LoadVT)) {
4128     return getMemIntrinsicNode(Opc, DL, M->getVTList(), Ops, IntVT,
4129                                M->getMemOperand(), DAG);
4130   }
4131 
4132   EVT CastVT = getEquivalentMemType(*DAG.getContext(), LoadVT);
4133   SDVTList VTList = DAG.getVTList(CastVT, MVT::Other);
4134   SDValue MemNode = getMemIntrinsicNode(Opc, DL, VTList, Ops, CastVT,
4135                                         M->getMemOperand(), DAG);
4136   return DAG.getMergeValues(
4137       {DAG.getNode(ISD::BITCAST, DL, LoadVT, MemNode), MemNode.getValue(1)},
4138       DL);
4139 }
4140 
4141 static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI,
4142                                   SDNode *N, SelectionDAG &DAG) {
4143   EVT VT = N->getValueType(0);
4144   const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
4145   int CondCode = CD->getSExtValue();
4146   if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
4147       CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
4148     return DAG.getUNDEF(VT);
4149 
4150   ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
4151 
4152   SDValue LHS = N->getOperand(1);
4153   SDValue RHS = N->getOperand(2);
4154 
4155   SDLoc DL(N);
4156 
4157   EVT CmpVT = LHS.getValueType();
4158   if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
4159     unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
4160       ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4161     LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
4162     RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
4163   }
4164 
4165   ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
4166 
4167   unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
4168   EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
4169 
4170   SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS,
4171                               DAG.getCondCode(CCOpcode));
4172   if (VT.bitsEq(CCVT))
4173     return SetCC;
4174   return DAG.getZExtOrTrunc(SetCC, DL, VT);
4175 }
4176 
4177 static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
4178                                   SDNode *N, SelectionDAG &DAG) {
4179   EVT VT = N->getValueType(0);
4180   const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
4181 
4182   int CondCode = CD->getSExtValue();
4183   if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
4184       CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) {
4185     return DAG.getUNDEF(VT);
4186   }
4187 
4188   SDValue Src0 = N->getOperand(1);
4189   SDValue Src1 = N->getOperand(2);
4190   EVT CmpVT = Src0.getValueType();
4191   SDLoc SL(N);
4192 
4193   if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
4194     Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
4195     Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
4196   }
4197 
4198   FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
4199   ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
4200   unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
4201   EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
4202   SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0,
4203                               Src1, DAG.getCondCode(CCOpcode));
4204   if (VT.bitsEq(CCVT))
4205     return SetCC;
4206   return DAG.getZExtOrTrunc(SetCC, SL, VT);
4207 }
4208 
4209 void SITargetLowering::ReplaceNodeResults(SDNode *N,
4210                                           SmallVectorImpl<SDValue> &Results,
4211                                           SelectionDAG &DAG) const {
4212   switch (N->getOpcode()) {
4213   case ISD::INSERT_VECTOR_ELT: {
4214     if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
4215       Results.push_back(Res);
4216     return;
4217   }
4218   case ISD::EXTRACT_VECTOR_ELT: {
4219     if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
4220       Results.push_back(Res);
4221     return;
4222   }
4223   case ISD::INTRINSIC_WO_CHAIN: {
4224     unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
4225     switch (IID) {
4226     case Intrinsic::amdgcn_cvt_pkrtz: {
4227       SDValue Src0 = N->getOperand(1);
4228       SDValue Src1 = N->getOperand(2);
4229       SDLoc SL(N);
4230       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
4231                                 Src0, Src1);
4232       Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
4233       return;
4234     }
4235     case Intrinsic::amdgcn_cvt_pknorm_i16:
4236     case Intrinsic::amdgcn_cvt_pknorm_u16:
4237     case Intrinsic::amdgcn_cvt_pk_i16:
4238     case Intrinsic::amdgcn_cvt_pk_u16: {
4239       SDValue Src0 = N->getOperand(1);
4240       SDValue Src1 = N->getOperand(2);
4241       SDLoc SL(N);
4242       unsigned Opcode;
4243 
4244       if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
4245         Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
4246       else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
4247         Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
4248       else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
4249         Opcode = AMDGPUISD::CVT_PK_I16_I32;
4250       else
4251         Opcode = AMDGPUISD::CVT_PK_U16_U32;
4252 
4253       EVT VT = N->getValueType(0);
4254       if (isTypeLegal(VT))
4255         Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
4256       else {
4257         SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
4258         Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
4259       }
4260       return;
4261     }
4262     }
4263     break;
4264   }
4265   case ISD::INTRINSIC_W_CHAIN: {
4266     if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
4267       if (Res.getOpcode() == ISD::MERGE_VALUES) {
4268         // FIXME: Hacky
4269         Results.push_back(Res.getOperand(0));
4270         Results.push_back(Res.getOperand(1));
4271       } else {
4272         Results.push_back(Res);
4273         Results.push_back(Res.getValue(1));
4274       }
4275       return;
4276     }
4277 
4278     break;
4279   }
4280   case ISD::SELECT: {
4281     SDLoc SL(N);
4282     EVT VT = N->getValueType(0);
4283     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
4284     SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
4285     SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
4286 
4287     EVT SelectVT = NewVT;
4288     if (NewVT.bitsLT(MVT::i32)) {
4289       LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
4290       RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
4291       SelectVT = MVT::i32;
4292     }
4293 
4294     SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
4295                                     N->getOperand(0), LHS, RHS);
4296 
4297     if (NewVT != SelectVT)
4298       NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
4299     Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
4300     return;
4301   }
4302   case ISD::FNEG: {
4303     if (N->getValueType(0) != MVT::v2f16)
4304       break;
4305 
4306     SDLoc SL(N);
4307     SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4308 
4309     SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
4310                              BC,
4311                              DAG.getConstant(0x80008000, SL, MVT::i32));
4312     Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4313     return;
4314   }
4315   case ISD::FABS: {
4316     if (N->getValueType(0) != MVT::v2f16)
4317       break;
4318 
4319     SDLoc SL(N);
4320     SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4321 
4322     SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
4323                              BC,
4324                              DAG.getConstant(0x7fff7fff, SL, MVT::i32));
4325     Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4326     return;
4327   }
4328   default:
4329     break;
4330   }
4331 }
4332 
4333 /// Helper function for LowerBRCOND
4334 static SDNode *findUser(SDValue Value, unsigned Opcode) {
4335 
4336   SDNode *Parent = Value.getNode();
4337   for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
4338        I != E; ++I) {
4339 
4340     if (I.getUse().get() != Value)
4341       continue;
4342 
4343     if (I->getOpcode() == Opcode)
4344       return *I;
4345   }
4346   return nullptr;
4347 }
4348 
4349 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
4350   if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
4351     switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
4352     case Intrinsic::amdgcn_if:
4353       return AMDGPUISD::IF;
4354     case Intrinsic::amdgcn_else:
4355       return AMDGPUISD::ELSE;
4356     case Intrinsic::amdgcn_loop:
4357       return AMDGPUISD::LOOP;
4358     case Intrinsic::amdgcn_end_cf:
4359       llvm_unreachable("should not occur");
4360     default:
4361       return 0;
4362     }
4363   }
4364 
4365   // break, if_break, else_break are all only used as inputs to loop, not
4366   // directly as branch conditions.
4367   return 0;
4368 }
4369 
4370 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
4371   const Triple &TT = getTargetMachine().getTargetTriple();
4372   return (GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4373           GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
4374          AMDGPU::shouldEmitConstantsToTextSection(TT);
4375 }
4376 
4377 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
4378   // FIXME: Either avoid relying on address space here or change the default
4379   // address space for functions to avoid the explicit check.
4380   return (GV->getValueType()->isFunctionTy() ||
4381           GV->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
4382           GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4383           GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
4384          !shouldEmitFixup(GV) &&
4385          !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
4386 }
4387 
4388 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
4389   return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
4390 }
4391 
4392 bool SITargetLowering::shouldUseLDSConstAddress(const GlobalValue *GV) const {
4393   if (!GV->hasExternalLinkage())
4394     return true;
4395 
4396   const auto OS = getTargetMachine().getTargetTriple().getOS();
4397   return OS == Triple::AMDHSA || OS == Triple::AMDPAL;
4398 }
4399 
4400 /// This transforms the control flow intrinsics to get the branch destination as
4401 /// last parameter, also switches branch target with BR if the need arise
4402 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
4403                                       SelectionDAG &DAG) const {
4404   SDLoc DL(BRCOND);
4405 
4406   SDNode *Intr = BRCOND.getOperand(1).getNode();
4407   SDValue Target = BRCOND.getOperand(2);
4408   SDNode *BR = nullptr;
4409   SDNode *SetCC = nullptr;
4410 
4411   if (Intr->getOpcode() == ISD::SETCC) {
4412     // As long as we negate the condition everything is fine
4413     SetCC = Intr;
4414     Intr = SetCC->getOperand(0).getNode();
4415 
4416   } else {
4417     // Get the target from BR if we don't negate the condition
4418     BR = findUser(BRCOND, ISD::BR);
4419     Target = BR->getOperand(1);
4420   }
4421 
4422   // FIXME: This changes the types of the intrinsics instead of introducing new
4423   // nodes with the correct types.
4424   // e.g. llvm.amdgcn.loop
4425 
4426   // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
4427   // =>     t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
4428 
4429   unsigned CFNode = isCFIntrinsic(Intr);
4430   if (CFNode == 0) {
4431     // This is a uniform branch so we don't need to legalize.
4432     return BRCOND;
4433   }
4434 
4435   bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
4436                    Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
4437 
4438   assert(!SetCC ||
4439         (SetCC->getConstantOperandVal(1) == 1 &&
4440          cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
4441                                                              ISD::SETNE));
4442 
4443   // operands of the new intrinsic call
4444   SmallVector<SDValue, 4> Ops;
4445   if (HaveChain)
4446     Ops.push_back(BRCOND.getOperand(0));
4447 
4448   Ops.append(Intr->op_begin() + (HaveChain ?  2 : 1), Intr->op_end());
4449   Ops.push_back(Target);
4450 
4451   ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
4452 
4453   // build the new intrinsic call
4454   SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
4455 
4456   if (!HaveChain) {
4457     SDValue Ops[] =  {
4458       SDValue(Result, 0),
4459       BRCOND.getOperand(0)
4460     };
4461 
4462     Result = DAG.getMergeValues(Ops, DL).getNode();
4463   }
4464 
4465   if (BR) {
4466     // Give the branch instruction our target
4467     SDValue Ops[] = {
4468       BR->getOperand(0),
4469       BRCOND.getOperand(2)
4470     };
4471     SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
4472     DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
4473     BR = NewBR.getNode();
4474   }
4475 
4476   SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
4477 
4478   // Copy the intrinsic results to registers
4479   for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
4480     SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
4481     if (!CopyToReg)
4482       continue;
4483 
4484     Chain = DAG.getCopyToReg(
4485       Chain, DL,
4486       CopyToReg->getOperand(1),
4487       SDValue(Result, i - 1),
4488       SDValue());
4489 
4490     DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
4491   }
4492 
4493   // Remove the old intrinsic from the chain
4494   DAG.ReplaceAllUsesOfValueWith(
4495     SDValue(Intr, Intr->getNumValues() - 1),
4496     Intr->getOperand(0));
4497 
4498   return Chain;
4499 }
4500 
4501 SDValue SITargetLowering::LowerRETURNADDR(SDValue Op,
4502                                           SelectionDAG &DAG) const {
4503   MVT VT = Op.getSimpleValueType();
4504   SDLoc DL(Op);
4505   // Checking the depth
4506   if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0)
4507     return DAG.getConstant(0, DL, VT);
4508 
4509   MachineFunction &MF = DAG.getMachineFunction();
4510   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4511   // Check for kernel and shader functions
4512   if (Info->isEntryFunction())
4513     return DAG.getConstant(0, DL, VT);
4514 
4515   MachineFrameInfo &MFI = MF.getFrameInfo();
4516   // There is a call to @llvm.returnaddress in this function
4517   MFI.setReturnAddressIsTaken(true);
4518 
4519   const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
4520   // Get the return address reg and mark it as an implicit live-in
4521   unsigned Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent()));
4522 
4523   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
4524 }
4525 
4526 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
4527                                             SDValue Op,
4528                                             const SDLoc &DL,
4529                                             EVT VT) const {
4530   return Op.getValueType().bitsLE(VT) ?
4531       DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
4532       DAG.getNode(ISD::FTRUNC, DL, VT, Op);
4533 }
4534 
4535 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
4536   assert(Op.getValueType() == MVT::f16 &&
4537          "Do not know how to custom lower FP_ROUND for non-f16 type");
4538 
4539   SDValue Src = Op.getOperand(0);
4540   EVT SrcVT = Src.getValueType();
4541   if (SrcVT != MVT::f64)
4542     return Op;
4543 
4544   SDLoc DL(Op);
4545 
4546   SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
4547   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
4548   return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
4549 }
4550 
4551 SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
4552                                                SelectionDAG &DAG) const {
4553   EVT VT = Op.getValueType();
4554   const MachineFunction &MF = DAG.getMachineFunction();
4555   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4556   bool IsIEEEMode = Info->getMode().IEEE;
4557 
4558   // FIXME: Assert during eslection that this is only selected for
4559   // ieee_mode. Currently a combine can produce the ieee version for non-ieee
4560   // mode functions, but this happens to be OK since it's only done in cases
4561   // where there is known no sNaN.
4562   if (IsIEEEMode)
4563     return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
4564 
4565   if (VT == MVT::v4f16)
4566     return splitBinaryVectorOp(Op, DAG);
4567   return Op;
4568 }
4569 
4570 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
4571   SDLoc SL(Op);
4572   SDValue Chain = Op.getOperand(0);
4573 
4574   if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4575       !Subtarget->isTrapHandlerEnabled())
4576     return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
4577 
4578   MachineFunction &MF = DAG.getMachineFunction();
4579   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4580   unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4581   assert(UserSGPR != AMDGPU::NoRegister);
4582   SDValue QueuePtr = CreateLiveInRegister(
4583     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4584   SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4585   SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4586                                    QueuePtr, SDValue());
4587   SDValue Ops[] = {
4588     ToReg,
4589     DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16),
4590     SGPR01,
4591     ToReg.getValue(1)
4592   };
4593   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4594 }
4595 
4596 SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4597   SDLoc SL(Op);
4598   SDValue Chain = Op.getOperand(0);
4599   MachineFunction &MF = DAG.getMachineFunction();
4600 
4601   if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4602       !Subtarget->isTrapHandlerEnabled()) {
4603     DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
4604                                      "debugtrap handler not supported",
4605                                      Op.getDebugLoc(),
4606                                      DS_Warning);
4607     LLVMContext &Ctx = MF.getFunction().getContext();
4608     Ctx.diagnose(NoTrap);
4609     return Chain;
4610   }
4611 
4612   SDValue Ops[] = {
4613     Chain,
4614     DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16)
4615   };
4616   return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4617 }
4618 
4619 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
4620                                              SelectionDAG &DAG) const {
4621   // FIXME: Use inline constants (src_{shared, private}_base) instead.
4622   if (Subtarget->hasApertureRegs()) {
4623     unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
4624         AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
4625         AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
4626     unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
4627         AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
4628         AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
4629     unsigned Encoding =
4630         AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
4631         Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4632         WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
4633 
4634     SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4635     SDValue ApertureReg = SDValue(
4636         DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4637     SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4638     return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
4639   }
4640 
4641   MachineFunction &MF = DAG.getMachineFunction();
4642   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4643   unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4644   assert(UserSGPR != AMDGPU::NoRegister);
4645 
4646   SDValue QueuePtr = CreateLiveInRegister(
4647     DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4648 
4649   // Offset into amd_queue_t for group_segment_aperture_base_hi /
4650   // private_segment_aperture_base_hi.
4651   uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
4652 
4653   SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
4654 
4655   // TODO: Use custom target PseudoSourceValue.
4656   // TODO: We should use the value from the IR intrinsic call, but it might not
4657   // be available and how do we get it?
4658   MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
4659   return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
4660                      MinAlign(64, StructOffset),
4661                      MachineMemOperand::MODereferenceable |
4662                          MachineMemOperand::MOInvariant);
4663 }
4664 
4665 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4666                                              SelectionDAG &DAG) const {
4667   SDLoc SL(Op);
4668   const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4669 
4670   SDValue Src = ASC->getOperand(0);
4671   SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4672 
4673   const AMDGPUTargetMachine &TM =
4674     static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4675 
4676   // flat -> local/private
4677   if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
4678     unsigned DestAS = ASC->getDestAddressSpace();
4679 
4680     if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
4681         DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
4682       unsigned NullVal = TM.getNullPointerValue(DestAS);
4683       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4684       SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4685       SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4686 
4687       return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4688                          NonNull, Ptr, SegmentNullPtr);
4689     }
4690   }
4691 
4692   // local/private -> flat
4693   if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
4694     unsigned SrcAS = ASC->getSrcAddressSpace();
4695 
4696     if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
4697         SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
4698       unsigned NullVal = TM.getNullPointerValue(SrcAS);
4699       SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4700 
4701       SDValue NonNull
4702         = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4703 
4704       SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
4705       SDValue CvtPtr
4706         = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4707 
4708       return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4709                          DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4710                          FlatNullPtr);
4711     }
4712   }
4713 
4714   // global <-> flat are no-ops and never emitted.
4715 
4716   const MachineFunction &MF = DAG.getMachineFunction();
4717   DiagnosticInfoUnsupported InvalidAddrSpaceCast(
4718     MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
4719   DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4720 
4721   return DAG.getUNDEF(ASC->getValueType(0));
4722 }
4723 
4724 // This lowers an INSERT_SUBVECTOR by extracting the individual elements from
4725 // the small vector and inserting them into the big vector. That is better than
4726 // the default expansion of doing it via a stack slot. Even though the use of
4727 // the stack slot would be optimized away afterwards, the stack slot itself
4728 // remains.
4729 SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
4730                                                 SelectionDAG &DAG) const {
4731   SDValue Vec = Op.getOperand(0);
4732   SDValue Ins = Op.getOperand(1);
4733   SDValue Idx = Op.getOperand(2);
4734   EVT VecVT = Vec.getValueType();
4735   EVT InsVT = Ins.getValueType();
4736   EVT EltVT = VecVT.getVectorElementType();
4737   unsigned InsNumElts = InsVT.getVectorNumElements();
4738   unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
4739   SDLoc SL(Op);
4740 
4741   for (unsigned I = 0; I != InsNumElts; ++I) {
4742     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins,
4743                               DAG.getConstant(I, SL, MVT::i32));
4744     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt,
4745                       DAG.getConstant(IdxVal + I, SL, MVT::i32));
4746   }
4747   return Vec;
4748 }
4749 
4750 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4751                                                  SelectionDAG &DAG) const {
4752   SDValue Vec = Op.getOperand(0);
4753   SDValue InsVal = Op.getOperand(1);
4754   SDValue Idx = Op.getOperand(2);
4755   EVT VecVT = Vec.getValueType();
4756   EVT EltVT = VecVT.getVectorElementType();
4757   unsigned VecSize = VecVT.getSizeInBits();
4758   unsigned EltSize = EltVT.getSizeInBits();
4759 
4760 
4761   assert(VecSize <= 64);
4762 
4763   unsigned NumElts = VecVT.getVectorNumElements();
4764   SDLoc SL(Op);
4765   auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4766 
4767   if (NumElts == 4 && EltSize == 16 && KIdx) {
4768     SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4769 
4770     SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4771                                  DAG.getConstant(0, SL, MVT::i32));
4772     SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4773                                  DAG.getConstant(1, SL, MVT::i32));
4774 
4775     SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4776     SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4777 
4778     unsigned Idx = KIdx->getZExtValue();
4779     bool InsertLo = Idx < 2;
4780     SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4781       InsertLo ? LoVec : HiVec,
4782       DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4783       DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4784 
4785     InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4786 
4787     SDValue Concat = InsertLo ?
4788       DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4789       DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4790 
4791     return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4792   }
4793 
4794   if (isa<ConstantSDNode>(Idx))
4795     return SDValue();
4796 
4797   MVT IntVT = MVT::getIntegerVT(VecSize);
4798 
4799   // Avoid stack access for dynamic indexing.
4800   // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
4801 
4802   // Create a congruent vector with the target value in each element so that
4803   // the required element can be masked and ORed into the target vector.
4804   SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT,
4805                                DAG.getSplatBuildVector(VecVT, SL, InsVal));
4806 
4807   assert(isPowerOf2_32(EltSize));
4808   SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4809 
4810   // Convert vector index to bit-index.
4811   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
4812 
4813   SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4814   SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
4815                             DAG.getConstant(0xffff, SL, IntVT),
4816                             ScaledIdx);
4817 
4818   SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
4819   SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
4820                             DAG.getNOT(SL, BFM, IntVT), BCVec);
4821 
4822   SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
4823   return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
4824 }
4825 
4826 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4827                                                   SelectionDAG &DAG) const {
4828   SDLoc SL(Op);
4829 
4830   EVT ResultVT = Op.getValueType();
4831   SDValue Vec = Op.getOperand(0);
4832   SDValue Idx = Op.getOperand(1);
4833   EVT VecVT = Vec.getValueType();
4834   unsigned VecSize = VecVT.getSizeInBits();
4835   EVT EltVT = VecVT.getVectorElementType();
4836   assert(VecSize <= 64);
4837 
4838   DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4839 
4840   // Make sure we do any optimizations that will make it easier to fold
4841   // source modifiers before obscuring it with bit operations.
4842 
4843   // XXX - Why doesn't this get called when vector_shuffle is expanded?
4844   if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4845     return Combined;
4846 
4847   unsigned EltSize = EltVT.getSizeInBits();
4848   assert(isPowerOf2_32(EltSize));
4849 
4850   MVT IntVT = MVT::getIntegerVT(VecSize);
4851   SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4852 
4853   // Convert vector index to bit-index (* EltSize)
4854   SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
4855 
4856   SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4857   SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
4858 
4859   if (ResultVT == MVT::f16) {
4860     SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
4861     return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4862   }
4863 
4864   return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
4865 }
4866 
4867 static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) {
4868   assert(Elt % 2 == 0);
4869   return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0);
4870 }
4871 
4872 SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
4873                                               SelectionDAG &DAG) const {
4874   SDLoc SL(Op);
4875   EVT ResultVT = Op.getValueType();
4876   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
4877 
4878   EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16;
4879   EVT EltVT = PackVT.getVectorElementType();
4880   int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements();
4881 
4882   // vector_shuffle <0,1,6,7> lhs, rhs
4883   // -> concat_vectors (extract_subvector lhs, 0), (extract_subvector rhs, 2)
4884   //
4885   // vector_shuffle <6,7,2,3> lhs, rhs
4886   // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 2)
4887   //
4888   // vector_shuffle <6,7,0,1> lhs, rhs
4889   // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 0)
4890 
4891   // Avoid scalarizing when both halves are reading from consecutive elements.
4892   SmallVector<SDValue, 4> Pieces;
4893   for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) {
4894     if (elementPairIsContiguous(SVN->getMask(), I)) {
4895       const int Idx = SVN->getMaskElt(I);
4896       int VecIdx = Idx < SrcNumElts ? 0 : 1;
4897       int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts;
4898       SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL,
4899                                     PackVT, SVN->getOperand(VecIdx),
4900                                     DAG.getConstant(EltIdx, SL, MVT::i32));
4901       Pieces.push_back(SubVec);
4902     } else {
4903       const int Idx0 = SVN->getMaskElt(I);
4904       const int Idx1 = SVN->getMaskElt(I + 1);
4905       int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1;
4906       int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1;
4907       int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts;
4908       int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts;
4909 
4910       SDValue Vec0 = SVN->getOperand(VecIdx0);
4911       SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
4912                                  Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32));
4913 
4914       SDValue Vec1 = SVN->getOperand(VecIdx1);
4915       SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
4916                                  Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32));
4917       Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 }));
4918     }
4919   }
4920 
4921   return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces);
4922 }
4923 
4924 SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
4925                                             SelectionDAG &DAG) const {
4926   SDLoc SL(Op);
4927   EVT VT = Op.getValueType();
4928 
4929   if (VT == MVT::v4i16 || VT == MVT::v4f16) {
4930     EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2);
4931 
4932     // Turn into pair of packed build_vectors.
4933     // TODO: Special case for constants that can be materialized with s_mov_b64.
4934     SDValue Lo = DAG.getBuildVector(HalfVT, SL,
4935                                     { Op.getOperand(0), Op.getOperand(1) });
4936     SDValue Hi = DAG.getBuildVector(HalfVT, SL,
4937                                     { Op.getOperand(2), Op.getOperand(3) });
4938 
4939     SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo);
4940     SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi);
4941 
4942     SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi });
4943     return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
4944   }
4945 
4946   assert(VT == MVT::v2f16 || VT == MVT::v2i16);
4947   assert(!Subtarget->hasVOP3PInsts() && "this should be legal");
4948 
4949   SDValue Lo = Op.getOperand(0);
4950   SDValue Hi = Op.getOperand(1);
4951 
4952   // Avoid adding defined bits with the zero_extend.
4953   if (Hi.isUndef()) {
4954     Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4955     SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo);
4956     return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo);
4957   }
4958 
4959   Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
4960   Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
4961 
4962   SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
4963                               DAG.getConstant(16, SL, MVT::i32));
4964   if (Lo.isUndef())
4965     return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi);
4966 
4967   Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4968   Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
4969 
4970   SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
4971   return DAG.getNode(ISD::BITCAST, SL, VT, Or);
4972 }
4973 
4974 bool
4975 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4976   // We can fold offsets for anything that doesn't require a GOT relocation.
4977   return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
4978           GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4979           GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
4980          !shouldEmitGOTReloc(GA->getGlobal());
4981 }
4982 
4983 static SDValue
4984 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
4985                         const SDLoc &DL, unsigned Offset, EVT PtrVT,
4986                         unsigned GAFlags = SIInstrInfo::MO_NONE) {
4987   // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
4988   // lowered to the following code sequence:
4989   //
4990   // For constant address space:
4991   //   s_getpc_b64 s[0:1]
4992   //   s_add_u32 s0, s0, $symbol
4993   //   s_addc_u32 s1, s1, 0
4994   //
4995   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
4996   //   a fixup or relocation is emitted to replace $symbol with a literal
4997   //   constant, which is a pc-relative offset from the encoding of the $symbol
4998   //   operand to the global variable.
4999   //
5000   // For global address space:
5001   //   s_getpc_b64 s[0:1]
5002   //   s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
5003   //   s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
5004   //
5005   //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
5006   //   fixups or relocations are emitted to replace $symbol@*@lo and
5007   //   $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
5008   //   which is a 64-bit pc-relative offset from the encoding of the $symbol
5009   //   operand to the global variable.
5010   //
5011   // What we want here is an offset from the value returned by s_getpc
5012   // (which is the address of the s_add_u32 instruction) to the global
5013   // variable, but since the encoding of $symbol starts 4 bytes after the start
5014   // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
5015   // small. This requires us to add 4 to the global variable offset in order to
5016   // compute the correct address.
5017   SDValue PtrLo =
5018       DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags);
5019   SDValue PtrHi;
5020   if (GAFlags == SIInstrInfo::MO_NONE) {
5021     PtrHi = DAG.getTargetConstant(0, DL, MVT::i32);
5022   } else {
5023     PtrHi =
5024         DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags + 1);
5025   }
5026   return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
5027 }
5028 
5029 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
5030                                              SDValue Op,
5031                                              SelectionDAG &DAG) const {
5032   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
5033   const GlobalValue *GV = GSD->getGlobal();
5034   if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
5035        shouldUseLDSConstAddress(GV)) ||
5036       GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS ||
5037       GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS)
5038     return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
5039 
5040   SDLoc DL(GSD);
5041   EVT PtrVT = Op.getValueType();
5042 
5043   if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
5044     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(),
5045                                             SIInstrInfo::MO_ABS32_LO);
5046     return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA);
5047   }
5048 
5049   if (shouldEmitFixup(GV))
5050     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
5051   else if (shouldEmitPCReloc(GV))
5052     return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
5053                                    SIInstrInfo::MO_REL32);
5054 
5055   SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
5056                                             SIInstrInfo::MO_GOTPCREL32);
5057 
5058   Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
5059   PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
5060   const DataLayout &DataLayout = DAG.getDataLayout();
5061   unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
5062   MachinePointerInfo PtrInfo
5063     = MachinePointerInfo::getGOT(DAG.getMachineFunction());
5064 
5065   return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
5066                      MachineMemOperand::MODereferenceable |
5067                          MachineMemOperand::MOInvariant);
5068 }
5069 
5070 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
5071                                    const SDLoc &DL, SDValue V) const {
5072   // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
5073   // the destination register.
5074   //
5075   // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
5076   // so we will end up with redundant moves to m0.
5077   //
5078   // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
5079 
5080   // A Null SDValue creates a glue result.
5081   SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
5082                                   V, Chain);
5083   return SDValue(M0, 0);
5084 }
5085 
5086 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
5087                                                  SDValue Op,
5088                                                  MVT VT,
5089                                                  unsigned Offset) const {
5090   SDLoc SL(Op);
5091   SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
5092                                            DAG.getEntryNode(), Offset, 4, false);
5093   // The local size values will have the hi 16-bits as zero.
5094   return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
5095                      DAG.getValueType(VT));
5096 }
5097 
5098 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
5099                                         EVT VT) {
5100   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
5101                                       "non-hsa intrinsic with hsa target",
5102                                       DL.getDebugLoc());
5103   DAG.getContext()->diagnose(BadIntrin);
5104   return DAG.getUNDEF(VT);
5105 }
5106 
5107 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
5108                                          EVT VT) {
5109   DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
5110                                       "intrinsic not supported on subtarget",
5111                                       DL.getDebugLoc());
5112   DAG.getContext()->diagnose(BadIntrin);
5113   return DAG.getUNDEF(VT);
5114 }
5115 
5116 static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
5117                                     ArrayRef<SDValue> Elts) {
5118   assert(!Elts.empty());
5119   MVT Type;
5120   unsigned NumElts;
5121 
5122   if (Elts.size() == 1) {
5123     Type = MVT::f32;
5124     NumElts = 1;
5125   } else if (Elts.size() == 2) {
5126     Type = MVT::v2f32;
5127     NumElts = 2;
5128   } else if (Elts.size() == 3) {
5129     Type = MVT::v3f32;
5130     NumElts = 3;
5131   } else if (Elts.size() <= 4) {
5132     Type = MVT::v4f32;
5133     NumElts = 4;
5134   } else if (Elts.size() <= 8) {
5135     Type = MVT::v8f32;
5136     NumElts = 8;
5137   } else {
5138     assert(Elts.size() <= 16);
5139     Type = MVT::v16f32;
5140     NumElts = 16;
5141   }
5142 
5143   SmallVector<SDValue, 16> VecElts(NumElts);
5144   for (unsigned i = 0; i < Elts.size(); ++i) {
5145     SDValue Elt = Elts[i];
5146     if (Elt.getValueType() != MVT::f32)
5147       Elt = DAG.getBitcast(MVT::f32, Elt);
5148     VecElts[i] = Elt;
5149   }
5150   for (unsigned i = Elts.size(); i < NumElts; ++i)
5151     VecElts[i] = DAG.getUNDEF(MVT::f32);
5152 
5153   if (NumElts == 1)
5154     return VecElts[0];
5155   return DAG.getBuildVector(Type, DL, VecElts);
5156 }
5157 
5158 static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG,
5159                              SDValue *GLC, SDValue *SLC, SDValue *DLC) {
5160   auto CachePolicyConst = cast<ConstantSDNode>(CachePolicy.getNode());
5161 
5162   uint64_t Value = CachePolicyConst->getZExtValue();
5163   SDLoc DL(CachePolicy);
5164   if (GLC) {
5165     *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
5166     Value &= ~(uint64_t)0x1;
5167   }
5168   if (SLC) {
5169     *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
5170     Value &= ~(uint64_t)0x2;
5171   }
5172   if (DLC) {
5173     *DLC = DAG.getTargetConstant((Value & 0x4) ? 1 : 0, DL, MVT::i32);
5174     Value &= ~(uint64_t)0x4;
5175   }
5176 
5177   return Value == 0;
5178 }
5179 
5180 static SDValue padEltsToUndef(SelectionDAG &DAG, const SDLoc &DL, EVT CastVT,
5181                               SDValue Src, int ExtraElts) {
5182   EVT SrcVT = Src.getValueType();
5183 
5184   SmallVector<SDValue, 8> Elts;
5185 
5186   if (SrcVT.isVector())
5187     DAG.ExtractVectorElements(Src, Elts);
5188   else
5189     Elts.push_back(Src);
5190 
5191   SDValue Undef = DAG.getUNDEF(SrcVT.getScalarType());
5192   while (ExtraElts--)
5193     Elts.push_back(Undef);
5194 
5195   return DAG.getBuildVector(CastVT, DL, Elts);
5196 }
5197 
5198 // Re-construct the required return value for a image load intrinsic.
5199 // This is more complicated due to the optional use TexFailCtrl which means the required
5200 // return type is an aggregate
5201 static SDValue constructRetValue(SelectionDAG &DAG,
5202                                  MachineSDNode *Result,
5203                                  ArrayRef<EVT> ResultTypes,
5204                                  bool IsTexFail, bool Unpacked, bool IsD16,
5205                                  int DMaskPop, int NumVDataDwords,
5206                                  const SDLoc &DL, LLVMContext &Context) {
5207   // Determine the required return type. This is the same regardless of IsTexFail flag
5208   EVT ReqRetVT = ResultTypes[0];
5209   int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1;
5210   int NumDataDwords = (!IsD16 || (IsD16 && Unpacked)) ?
5211     ReqRetNumElts : (ReqRetNumElts + 1) / 2;
5212 
5213   int MaskPopDwords = (!IsD16 || (IsD16 && Unpacked)) ?
5214     DMaskPop : (DMaskPop + 1) / 2;
5215 
5216   MVT DataDwordVT = NumDataDwords == 1 ?
5217     MVT::i32 : MVT::getVectorVT(MVT::i32, NumDataDwords);
5218 
5219   MVT MaskPopVT = MaskPopDwords == 1 ?
5220     MVT::i32 : MVT::getVectorVT(MVT::i32, MaskPopDwords);
5221 
5222   SDValue Data(Result, 0);
5223   SDValue TexFail;
5224 
5225   if (IsTexFail) {
5226     SDValue ZeroIdx = DAG.getConstant(0, DL, MVT::i32);
5227     if (MaskPopVT.isVector()) {
5228       Data = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MaskPopVT,
5229                          SDValue(Result, 0), ZeroIdx);
5230     } else {
5231       Data = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MaskPopVT,
5232                          SDValue(Result, 0), ZeroIdx);
5233     }
5234 
5235     TexFail = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
5236                           SDValue(Result, 0),
5237                           DAG.getConstant(MaskPopDwords, DL, MVT::i32));
5238   }
5239 
5240   if (DataDwordVT.isVector())
5241     Data = padEltsToUndef(DAG, DL, DataDwordVT, Data,
5242                           NumDataDwords - MaskPopDwords);
5243 
5244   if (IsD16)
5245     Data = adjustLoadValueTypeImpl(Data, ReqRetVT, DL, DAG, Unpacked);
5246 
5247   if (!ReqRetVT.isVector())
5248     Data = DAG.getNode(ISD::TRUNCATE, DL, ReqRetVT.changeTypeToInteger(), Data);
5249 
5250   Data = DAG.getNode(ISD::BITCAST, DL, ReqRetVT, Data);
5251 
5252   if (TexFail)
5253     return DAG.getMergeValues({Data, TexFail, SDValue(Result, 1)}, DL);
5254 
5255   if (Result->getNumValues() == 1)
5256     return Data;
5257 
5258   return DAG.getMergeValues({Data, SDValue(Result, 1)}, DL);
5259 }
5260 
5261 static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE,
5262                          SDValue *LWE, bool &IsTexFail) {
5263   auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode());
5264 
5265   uint64_t Value = TexFailCtrlConst->getZExtValue();
5266   if (Value) {
5267     IsTexFail = true;
5268   }
5269 
5270   SDLoc DL(TexFailCtrlConst);
5271   *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
5272   Value &= ~(uint64_t)0x1;
5273   *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
5274   Value &= ~(uint64_t)0x2;
5275 
5276   return Value == 0;
5277 }
5278 
5279 SDValue SITargetLowering::lowerImage(SDValue Op,
5280                                      const AMDGPU::ImageDimIntrinsicInfo *Intr,
5281                                      SelectionDAG &DAG) const {
5282   SDLoc DL(Op);
5283   MachineFunction &MF = DAG.getMachineFunction();
5284   const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>();
5285   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
5286       AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
5287   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
5288   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
5289       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
5290   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
5291       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
5292   unsigned IntrOpcode = Intr->BaseOpcode;
5293   bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10;
5294 
5295   SmallVector<EVT, 3> ResultTypes(Op->value_begin(), Op->value_end());
5296   SmallVector<EVT, 3> OrigResultTypes(Op->value_begin(), Op->value_end());
5297   bool IsD16 = false;
5298   bool IsA16 = false;
5299   SDValue VData;
5300   int NumVDataDwords;
5301   bool AdjustRetType = false;
5302 
5303   unsigned AddrIdx; // Index of first address argument
5304   unsigned DMask;
5305   unsigned DMaskLanes = 0;
5306 
5307   if (BaseOpcode->Atomic) {
5308     VData = Op.getOperand(2);
5309 
5310     bool Is64Bit = VData.getValueType() == MVT::i64;
5311     if (BaseOpcode->AtomicX2) {
5312       SDValue VData2 = Op.getOperand(3);
5313       VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL,
5314                                  {VData, VData2});
5315       if (Is64Bit)
5316         VData = DAG.getBitcast(MVT::v4i32, VData);
5317 
5318       ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32;
5319       DMask = Is64Bit ? 0xf : 0x3;
5320       NumVDataDwords = Is64Bit ? 4 : 2;
5321       AddrIdx = 4;
5322     } else {
5323       DMask = Is64Bit ? 0x3 : 0x1;
5324       NumVDataDwords = Is64Bit ? 2 : 1;
5325       AddrIdx = 3;
5326     }
5327   } else {
5328     unsigned DMaskIdx = BaseOpcode->Store ? 3 : isa<MemSDNode>(Op) ? 2 : 1;
5329     auto DMaskConst = cast<ConstantSDNode>(Op.getOperand(DMaskIdx));
5330     DMask = DMaskConst->getZExtValue();
5331     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
5332 
5333     if (BaseOpcode->Store) {
5334       VData = Op.getOperand(2);
5335 
5336       MVT StoreVT = VData.getSimpleValueType();
5337       if (StoreVT.getScalarType() == MVT::f16) {
5338         if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
5339           return Op; // D16 is unsupported for this instruction
5340 
5341         IsD16 = true;
5342         VData = handleD16VData(VData, DAG);
5343       }
5344 
5345       NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
5346     } else {
5347       // Work out the num dwords based on the dmask popcount and underlying type
5348       // and whether packing is supported.
5349       MVT LoadVT = ResultTypes[0].getSimpleVT();
5350       if (LoadVT.getScalarType() == MVT::f16) {
5351         if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
5352           return Op; // D16 is unsupported for this instruction
5353 
5354         IsD16 = true;
5355       }
5356 
5357       // Confirm that the return type is large enough for the dmask specified
5358       if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) ||
5359           (!LoadVT.isVector() && DMaskLanes > 1))
5360           return Op;
5361 
5362       if (IsD16 && !Subtarget->hasUnpackedD16VMem())
5363         NumVDataDwords = (DMaskLanes + 1) / 2;
5364       else
5365         NumVDataDwords = DMaskLanes;
5366 
5367       AdjustRetType = true;
5368     }
5369 
5370     AddrIdx = DMaskIdx + 1;
5371   }
5372 
5373   unsigned NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0;
5374   unsigned NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0;
5375   unsigned NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0;
5376   unsigned NumVAddrs = BaseOpcode->NumExtraArgs + NumGradients +
5377                        NumCoords + NumLCM;
5378   unsigned NumMIVAddrs = NumVAddrs;
5379 
5380   SmallVector<SDValue, 4> VAddrs;
5381 
5382   // Optimize _L to _LZ when _L is zero
5383   if (LZMappingInfo) {
5384     if (auto ConstantLod =
5385          dyn_cast<ConstantFPSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
5386       if (ConstantLod->isZero() || ConstantLod->isNegative()) {
5387         IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
5388         NumMIVAddrs--;               // remove 'lod'
5389       }
5390     }
5391   }
5392 
5393   // Optimize _mip away, when 'lod' is zero
5394   if (MIPMappingInfo) {
5395     if (auto ConstantLod =
5396          dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
5397       if (ConstantLod->isNullValue()) {
5398         IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
5399         NumMIVAddrs--;               // remove 'lod'
5400       }
5401     }
5402   }
5403 
5404   // Check for 16 bit addresses and pack if true.
5405   unsigned DimIdx = AddrIdx + BaseOpcode->NumExtraArgs;
5406   MVT VAddrVT = Op.getOperand(DimIdx).getSimpleValueType();
5407   const MVT VAddrScalarVT = VAddrVT.getScalarType();
5408   if (((VAddrScalarVT == MVT::f16) || (VAddrScalarVT == MVT::i16))) {
5409     // Illegal to use a16 images
5410     if (!ST->hasFeature(AMDGPU::FeatureR128A16) && !ST->hasFeature(AMDGPU::FeatureGFX10A16))
5411       return Op;
5412 
5413     IsA16 = true;
5414     const MVT VectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16;
5415     for (unsigned i = AddrIdx; i < (AddrIdx + NumMIVAddrs); ++i) {
5416       SDValue AddrLo;
5417       // Push back extra arguments.
5418       if (i < DimIdx) {
5419         AddrLo = Op.getOperand(i);
5420       } else {
5421         // Dz/dh, dz/dv and the last odd coord are packed with undef. Also,
5422         // in 1D, derivatives dx/dh and dx/dv are packed with undef.
5423         if (((i + 1) >= (AddrIdx + NumMIVAddrs)) ||
5424             ((NumGradients / 2) % 2 == 1 &&
5425             (i == DimIdx + (NumGradients / 2) - 1 ||
5426              i == DimIdx + NumGradients - 1))) {
5427           AddrLo = Op.getOperand(i);
5428           if (AddrLo.getValueType() != MVT::i16)
5429             AddrLo = DAG.getBitcast(MVT::i16, Op.getOperand(i));
5430           AddrLo = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, AddrLo);
5431         } else {
5432           AddrLo = DAG.getBuildVector(VectorVT, DL,
5433                                       {Op.getOperand(i), Op.getOperand(i + 1)});
5434           i++;
5435         }
5436         AddrLo = DAG.getBitcast(MVT::f32, AddrLo);
5437       }
5438       VAddrs.push_back(AddrLo);
5439     }
5440   } else {
5441     for (unsigned i = 0; i < NumMIVAddrs; ++i)
5442       VAddrs.push_back(Op.getOperand(AddrIdx + i));
5443   }
5444 
5445   // If the register allocator cannot place the address registers contiguously
5446   // without introducing moves, then using the non-sequential address encoding
5447   // is always preferable, since it saves VALU instructions and is usually a
5448   // wash in terms of code size or even better.
5449   //
5450   // However, we currently have no way of hinting to the register allocator that
5451   // MIMG addresses should be placed contiguously when it is possible to do so,
5452   // so force non-NSA for the common 2-address case as a heuristic.
5453   //
5454   // SIShrinkInstructions will convert NSA encodings to non-NSA after register
5455   // allocation when possible.
5456   bool UseNSA =
5457       ST->hasFeature(AMDGPU::FeatureNSAEncoding) && VAddrs.size() >= 3;
5458   SDValue VAddr;
5459   if (!UseNSA)
5460     VAddr = getBuildDwordsVector(DAG, DL, VAddrs);
5461 
5462   SDValue True = DAG.getTargetConstant(1, DL, MVT::i1);
5463   SDValue False = DAG.getTargetConstant(0, DL, MVT::i1);
5464   unsigned CtrlIdx; // Index of texfailctrl argument
5465   SDValue Unorm;
5466   if (!BaseOpcode->Sampler) {
5467     Unorm = True;
5468     CtrlIdx = AddrIdx + NumVAddrs + 1;
5469   } else {
5470     auto UnormConst =
5471         cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2));
5472 
5473     Unorm = UnormConst->getZExtValue() ? True : False;
5474     CtrlIdx = AddrIdx + NumVAddrs + 3;
5475   }
5476 
5477   SDValue TFE;
5478   SDValue LWE;
5479   SDValue TexFail = Op.getOperand(CtrlIdx);
5480   bool IsTexFail = false;
5481   if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail))
5482     return Op;
5483 
5484   if (IsTexFail) {
5485     if (!DMaskLanes) {
5486       // Expecting to get an error flag since TFC is on - and dmask is 0
5487       // Force dmask to be at least 1 otherwise the instruction will fail
5488       DMask = 0x1;
5489       DMaskLanes = 1;
5490       NumVDataDwords = 1;
5491     }
5492     NumVDataDwords += 1;
5493     AdjustRetType = true;
5494   }
5495 
5496   // Has something earlier tagged that the return type needs adjusting
5497   // This happens if the instruction is a load or has set TexFailCtrl flags
5498   if (AdjustRetType) {
5499     // NumVDataDwords reflects the true number of dwords required in the return type
5500     if (DMaskLanes == 0 && !BaseOpcode->Store) {
5501       // This is a no-op load. This can be eliminated
5502       SDValue Undef = DAG.getUNDEF(Op.getValueType());
5503       if (isa<MemSDNode>(Op))
5504         return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
5505       return Undef;
5506     }
5507 
5508     EVT NewVT = NumVDataDwords > 1 ?
5509                   EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumVDataDwords)
5510                 : MVT::i32;
5511 
5512     ResultTypes[0] = NewVT;
5513     if (ResultTypes.size() == 3) {
5514       // Original result was aggregate type used for TexFailCtrl results
5515       // The actual instruction returns as a vector type which has now been
5516       // created. Remove the aggregate result.
5517       ResultTypes.erase(&ResultTypes[1]);
5518     }
5519   }
5520 
5521   SDValue GLC;
5522   SDValue SLC;
5523   SDValue DLC;
5524   if (BaseOpcode->Atomic) {
5525     GLC = True; // TODO no-return optimization
5526     if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC,
5527                           IsGFX10 ? &DLC : nullptr))
5528       return Op;
5529   } else {
5530     if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC,
5531                           IsGFX10 ? &DLC : nullptr))
5532       return Op;
5533   }
5534 
5535   SmallVector<SDValue, 26> Ops;
5536   if (BaseOpcode->Store || BaseOpcode->Atomic)
5537     Ops.push_back(VData); // vdata
5538   if (UseNSA) {
5539     for (const SDValue &Addr : VAddrs)
5540       Ops.push_back(Addr);
5541   } else {
5542     Ops.push_back(VAddr);
5543   }
5544   Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc
5545   if (BaseOpcode->Sampler)
5546     Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler
5547   Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32));
5548   if (IsGFX10)
5549     Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32));
5550   Ops.push_back(Unorm);
5551   if (IsGFX10)
5552     Ops.push_back(DLC);
5553   Ops.push_back(GLC);
5554   Ops.push_back(SLC);
5555   Ops.push_back(IsA16 &&  // r128, a16 for gfx9
5556                 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False);
5557   if (IsGFX10)
5558     Ops.push_back(IsA16 ? True : False);
5559   Ops.push_back(TFE);
5560   Ops.push_back(LWE);
5561   if (!IsGFX10)
5562     Ops.push_back(DimInfo->DA ? True : False);
5563   if (BaseOpcode->HasD16)
5564     Ops.push_back(IsD16 ? True : False);
5565   if (isa<MemSDNode>(Op))
5566     Ops.push_back(Op.getOperand(0)); // chain
5567 
5568   int NumVAddrDwords =
5569       UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32;
5570   int Opcode = -1;
5571 
5572   if (IsGFX10) {
5573     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
5574                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
5575                                           : AMDGPU::MIMGEncGfx10Default,
5576                                    NumVDataDwords, NumVAddrDwords);
5577   } else {
5578     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5579       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
5580                                      NumVDataDwords, NumVAddrDwords);
5581     if (Opcode == -1)
5582       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
5583                                      NumVDataDwords, NumVAddrDwords);
5584   }
5585   assert(Opcode != -1);
5586 
5587   MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops);
5588   if (auto MemOp = dyn_cast<MemSDNode>(Op)) {
5589     MachineMemOperand *MemRef = MemOp->getMemOperand();
5590     DAG.setNodeMemRefs(NewNode, {MemRef});
5591   }
5592 
5593   if (BaseOpcode->AtomicX2) {
5594     SmallVector<SDValue, 1> Elt;
5595     DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
5596     return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
5597   } else if (!BaseOpcode->Store) {
5598     return constructRetValue(DAG, NewNode,
5599                              OrigResultTypes, IsTexFail,
5600                              Subtarget->hasUnpackedD16VMem(), IsD16,
5601                              DMaskLanes, NumVDataDwords, DL,
5602                              *DAG.getContext());
5603   }
5604 
5605   return SDValue(NewNode, 0);
5606 }
5607 
5608 SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
5609                                        SDValue Offset, SDValue CachePolicy,
5610                                        SelectionDAG &DAG) const {
5611   MachineFunction &MF = DAG.getMachineFunction();
5612 
5613   const DataLayout &DataLayout = DAG.getDataLayout();
5614   unsigned Align =
5615       DataLayout.getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
5616 
5617   MachineMemOperand *MMO = MF.getMachineMemOperand(
5618       MachinePointerInfo(),
5619       MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
5620           MachineMemOperand::MOInvariant,
5621       VT.getStoreSize(), Align);
5622 
5623   if (!Offset->isDivergent()) {
5624     SDValue Ops[] = {
5625         Rsrc,
5626         Offset, // Offset
5627         CachePolicy
5628     };
5629 
5630     // Widen vec3 load to vec4.
5631     if (VT.isVector() && VT.getVectorNumElements() == 3) {
5632       EVT WidenedVT =
5633           EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
5634       auto WidenedOp = DAG.getMemIntrinsicNode(
5635           AMDGPUISD::SBUFFER_LOAD, DL, DAG.getVTList(WidenedVT), Ops, WidenedVT,
5636           MF.getMachineMemOperand(MMO, 0, WidenedVT.getStoreSize()));
5637       auto Subvector = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, WidenedOp,
5638                                    DAG.getVectorIdxConstant(0, DL));
5639       return Subvector;
5640     }
5641 
5642     return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL,
5643                                    DAG.getVTList(VT), Ops, VT, MMO);
5644   }
5645 
5646   // We have a divergent offset. Emit a MUBUF buffer load instead. We can
5647   // assume that the buffer is unswizzled.
5648   SmallVector<SDValue, 4> Loads;
5649   unsigned NumLoads = 1;
5650   MVT LoadVT = VT.getSimpleVT();
5651   unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1;
5652   assert((LoadVT.getScalarType() == MVT::i32 ||
5653           LoadVT.getScalarType() == MVT::f32));
5654 
5655   if (NumElts == 8 || NumElts == 16) {
5656     NumLoads = NumElts / 4;
5657     LoadVT = MVT::getVectorVT(LoadVT.getScalarType(), 4);
5658   }
5659 
5660   SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue});
5661   SDValue Ops[] = {
5662       DAG.getEntryNode(),                               // Chain
5663       Rsrc,                                             // rsrc
5664       DAG.getConstant(0, DL, MVT::i32),                 // vindex
5665       {},                                               // voffset
5666       {},                                               // soffset
5667       {},                                               // offset
5668       CachePolicy,                                      // cachepolicy
5669       DAG.getTargetConstant(0, DL, MVT::i1),            // idxen
5670   };
5671 
5672   // Use the alignment to ensure that the required offsets will fit into the
5673   // immediate offsets.
5674   setBufferOffsets(Offset, DAG, &Ops[3], NumLoads > 1 ? 16 * NumLoads : 4);
5675 
5676   uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
5677   for (unsigned i = 0; i < NumLoads; ++i) {
5678     Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32);
5679     Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops,
5680                                         LoadVT, MMO, DAG));
5681   }
5682 
5683   if (NumElts == 8 || NumElts == 16)
5684     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads);
5685 
5686   return Loads[0];
5687 }
5688 
5689 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
5690                                                   SelectionDAG &DAG) const {
5691   MachineFunction &MF = DAG.getMachineFunction();
5692   auto MFI = MF.getInfo<SIMachineFunctionInfo>();
5693 
5694   EVT VT = Op.getValueType();
5695   SDLoc DL(Op);
5696   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5697 
5698   // TODO: Should this propagate fast-math-flags?
5699 
5700   switch (IntrinsicID) {
5701   case Intrinsic::amdgcn_implicit_buffer_ptr: {
5702     if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction()))
5703       return emitNonHSAIntrinsicError(DAG, DL, VT);
5704     return getPreloadedValue(DAG, *MFI, VT,
5705                              AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
5706   }
5707   case Intrinsic::amdgcn_dispatch_ptr:
5708   case Intrinsic::amdgcn_queue_ptr: {
5709     if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) {
5710       DiagnosticInfoUnsupported BadIntrin(
5711           MF.getFunction(), "unsupported hsa intrinsic without hsa target",
5712           DL.getDebugLoc());
5713       DAG.getContext()->diagnose(BadIntrin);
5714       return DAG.getUNDEF(VT);
5715     }
5716 
5717     auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
5718       AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
5719     return getPreloadedValue(DAG, *MFI, VT, RegID);
5720   }
5721   case Intrinsic::amdgcn_implicitarg_ptr: {
5722     if (MFI->isEntryFunction())
5723       return getImplicitArgPtr(DAG, DL);
5724     return getPreloadedValue(DAG, *MFI, VT,
5725                              AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
5726   }
5727   case Intrinsic::amdgcn_kernarg_segment_ptr: {
5728     return getPreloadedValue(DAG, *MFI, VT,
5729                              AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
5730   }
5731   case Intrinsic::amdgcn_dispatch_id: {
5732     return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
5733   }
5734   case Intrinsic::amdgcn_rcp:
5735     return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
5736   case Intrinsic::amdgcn_rsq:
5737     return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5738   case Intrinsic::amdgcn_rsq_legacy:
5739     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5740       return emitRemovedIntrinsicError(DAG, DL, VT);
5741 
5742     return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
5743   case Intrinsic::amdgcn_rcp_legacy:
5744     if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5745       return emitRemovedIntrinsicError(DAG, DL, VT);
5746     return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
5747   case Intrinsic::amdgcn_rsq_clamp: {
5748     if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
5749       return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
5750 
5751     Type *Type = VT.getTypeForEVT(*DAG.getContext());
5752     APFloat Max = APFloat::getLargest(Type->getFltSemantics());
5753     APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
5754 
5755     SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5756     SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
5757                               DAG.getConstantFP(Max, DL, VT));
5758     return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
5759                        DAG.getConstantFP(Min, DL, VT));
5760   }
5761   case Intrinsic::r600_read_ngroups_x:
5762     if (Subtarget->isAmdHsaOS())
5763       return emitNonHSAIntrinsicError(DAG, DL, VT);
5764 
5765     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5766                                     SI::KernelInputOffsets::NGROUPS_X, 4, false);
5767   case Intrinsic::r600_read_ngroups_y:
5768     if (Subtarget->isAmdHsaOS())
5769       return emitNonHSAIntrinsicError(DAG, DL, VT);
5770 
5771     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5772                                     SI::KernelInputOffsets::NGROUPS_Y, 4, false);
5773   case Intrinsic::r600_read_ngroups_z:
5774     if (Subtarget->isAmdHsaOS())
5775       return emitNonHSAIntrinsicError(DAG, DL, VT);
5776 
5777     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5778                                     SI::KernelInputOffsets::NGROUPS_Z, 4, false);
5779   case Intrinsic::r600_read_global_size_x:
5780     if (Subtarget->isAmdHsaOS())
5781       return emitNonHSAIntrinsicError(DAG, DL, VT);
5782 
5783     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5784                                     SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false);
5785   case Intrinsic::r600_read_global_size_y:
5786     if (Subtarget->isAmdHsaOS())
5787       return emitNonHSAIntrinsicError(DAG, DL, VT);
5788 
5789     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5790                                     SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false);
5791   case Intrinsic::r600_read_global_size_z:
5792     if (Subtarget->isAmdHsaOS())
5793       return emitNonHSAIntrinsicError(DAG, DL, VT);
5794 
5795     return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
5796                                     SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false);
5797   case Intrinsic::r600_read_local_size_x:
5798     if (Subtarget->isAmdHsaOS())
5799       return emitNonHSAIntrinsicError(DAG, DL, VT);
5800 
5801     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5802                                   SI::KernelInputOffsets::LOCAL_SIZE_X);
5803   case Intrinsic::r600_read_local_size_y:
5804     if (Subtarget->isAmdHsaOS())
5805       return emitNonHSAIntrinsicError(DAG, DL, VT);
5806 
5807     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5808                                   SI::KernelInputOffsets::LOCAL_SIZE_Y);
5809   case Intrinsic::r600_read_local_size_z:
5810     if (Subtarget->isAmdHsaOS())
5811       return emitNonHSAIntrinsicError(DAG, DL, VT);
5812 
5813     return lowerImplicitZextParam(DAG, Op, MVT::i16,
5814                                   SI::KernelInputOffsets::LOCAL_SIZE_Z);
5815   case Intrinsic::amdgcn_workgroup_id_x:
5816     return getPreloadedValue(DAG, *MFI, VT,
5817                              AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
5818   case Intrinsic::amdgcn_workgroup_id_y:
5819     return getPreloadedValue(DAG, *MFI, VT,
5820                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
5821   case Intrinsic::amdgcn_workgroup_id_z:
5822     return getPreloadedValue(DAG, *MFI, VT,
5823                              AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
5824   case Intrinsic::amdgcn_workitem_id_x:
5825     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5826                           SDLoc(DAG.getEntryNode()),
5827                           MFI->getArgInfo().WorkItemIDX);
5828   case Intrinsic::amdgcn_workitem_id_y:
5829     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5830                           SDLoc(DAG.getEntryNode()),
5831                           MFI->getArgInfo().WorkItemIDY);
5832   case Intrinsic::amdgcn_workitem_id_z:
5833     return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5834                           SDLoc(DAG.getEntryNode()),
5835                           MFI->getArgInfo().WorkItemIDZ);
5836   case Intrinsic::amdgcn_wavefrontsize:
5837     return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(),
5838                            SDLoc(Op), MVT::i32);
5839   case Intrinsic::amdgcn_s_buffer_load: {
5840     bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10;
5841     SDValue GLC;
5842     SDValue DLC = DAG.getTargetConstant(0, DL, MVT::i1);
5843     if (!parseCachePolicy(Op.getOperand(3), DAG, &GLC, nullptr,
5844                           IsGFX10 ? &DLC : nullptr))
5845       return Op;
5846     return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5847                         DAG);
5848   }
5849   case Intrinsic::amdgcn_fdiv_fast:
5850     return lowerFDIV_FAST(Op, DAG);
5851   case Intrinsic::amdgcn_sin:
5852     return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
5853 
5854   case Intrinsic::amdgcn_cos:
5855     return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
5856 
5857   case Intrinsic::amdgcn_mul_u24:
5858     return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2));
5859   case Intrinsic::amdgcn_mul_i24:
5860     return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2));
5861 
5862   case Intrinsic::amdgcn_log_clamp: {
5863     if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
5864       return SDValue();
5865 
5866     DiagnosticInfoUnsupported BadIntrin(
5867       MF.getFunction(), "intrinsic not supported on subtarget",
5868       DL.getDebugLoc());
5869       DAG.getContext()->diagnose(BadIntrin);
5870       return DAG.getUNDEF(VT);
5871   }
5872   case Intrinsic::amdgcn_ldexp:
5873     return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
5874                        Op.getOperand(1), Op.getOperand(2));
5875 
5876   case Intrinsic::amdgcn_fract:
5877     return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
5878 
5879   case Intrinsic::amdgcn_class:
5880     return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
5881                        Op.getOperand(1), Op.getOperand(2));
5882   case Intrinsic::amdgcn_div_fmas:
5883     return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
5884                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5885                        Op.getOperand(4));
5886 
5887   case Intrinsic::amdgcn_div_fixup:
5888     return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
5889                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5890 
5891   case Intrinsic::amdgcn_trig_preop:
5892     return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
5893                        Op.getOperand(1), Op.getOperand(2));
5894   case Intrinsic::amdgcn_div_scale: {
5895     const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3));
5896 
5897     // Translate to the operands expected by the machine instruction. The
5898     // first parameter must be the same as the first instruction.
5899     SDValue Numerator = Op.getOperand(1);
5900     SDValue Denominator = Op.getOperand(2);
5901 
5902     // Note this order is opposite of the machine instruction's operations,
5903     // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
5904     // intrinsic has the numerator as the first operand to match a normal
5905     // division operation.
5906 
5907     SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
5908 
5909     return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
5910                        Denominator, Numerator);
5911   }
5912   case Intrinsic::amdgcn_icmp: {
5913     // There is a Pat that handles this variant, so return it as-is.
5914     if (Op.getOperand(1).getValueType() == MVT::i1 &&
5915         Op.getConstantOperandVal(2) == 0 &&
5916         Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE)
5917       return Op;
5918     return lowerICMPIntrinsic(*this, Op.getNode(), DAG);
5919   }
5920   case Intrinsic::amdgcn_fcmp: {
5921     return lowerFCMPIntrinsic(*this, Op.getNode(), DAG);
5922   }
5923   case Intrinsic::amdgcn_fmed3:
5924     return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
5925                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5926   case Intrinsic::amdgcn_fdot2:
5927     return DAG.getNode(AMDGPUISD::FDOT2, DL, VT,
5928                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5929                        Op.getOperand(4));
5930   case Intrinsic::amdgcn_fmul_legacy:
5931     return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
5932                        Op.getOperand(1), Op.getOperand(2));
5933   case Intrinsic::amdgcn_sffbh:
5934     return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
5935   case Intrinsic::amdgcn_sbfe:
5936     return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
5937                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5938   case Intrinsic::amdgcn_ubfe:
5939     return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
5940                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5941   case Intrinsic::amdgcn_cvt_pkrtz:
5942   case Intrinsic::amdgcn_cvt_pknorm_i16:
5943   case Intrinsic::amdgcn_cvt_pknorm_u16:
5944   case Intrinsic::amdgcn_cvt_pk_i16:
5945   case Intrinsic::amdgcn_cvt_pk_u16: {
5946     // FIXME: Stop adding cast if v2f16/v2i16 are legal.
5947     EVT VT = Op.getValueType();
5948     unsigned Opcode;
5949 
5950     if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
5951       Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
5952     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
5953       Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
5954     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
5955       Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
5956     else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
5957       Opcode = AMDGPUISD::CVT_PK_I16_I32;
5958     else
5959       Opcode = AMDGPUISD::CVT_PK_U16_U32;
5960 
5961     if (isTypeLegal(VT))
5962       return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2));
5963 
5964     SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
5965                                Op.getOperand(1), Op.getOperand(2));
5966     return DAG.getNode(ISD::BITCAST, DL, VT, Node);
5967   }
5968   case Intrinsic::amdgcn_fmad_ftz:
5969     return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1),
5970                        Op.getOperand(2), Op.getOperand(3));
5971 
5972   case Intrinsic::amdgcn_if_break:
5973     return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT,
5974                                       Op->getOperand(1), Op->getOperand(2)), 0);
5975 
5976   case Intrinsic::amdgcn_groupstaticsize: {
5977     Triple::OSType OS = getTargetMachine().getTargetTriple().getOS();
5978     if (OS == Triple::AMDHSA || OS == Triple::AMDPAL)
5979       return Op;
5980 
5981     const Module *M = MF.getFunction().getParent();
5982     const GlobalValue *GV =
5983         M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize));
5984     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0,
5985                                             SIInstrInfo::MO_ABS32_LO);
5986     return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0};
5987   }
5988   case Intrinsic::amdgcn_is_shared:
5989   case Intrinsic::amdgcn_is_private: {
5990     SDLoc SL(Op);
5991     unsigned AS = (IntrinsicID == Intrinsic::amdgcn_is_shared) ?
5992       AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
5993     SDValue Aperture = getSegmentAperture(AS, SL, DAG);
5994     SDValue SrcVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32,
5995                                  Op.getOperand(1));
5996 
5997     SDValue SrcHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, SrcVec,
5998                                 DAG.getConstant(1, SL, MVT::i32));
5999     return DAG.getSetCC(SL, MVT::i1, SrcHi, Aperture, ISD::SETEQ);
6000   }
6001   default:
6002     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6003             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
6004       return lowerImage(Op, ImageDimIntr, DAG);
6005 
6006     return Op;
6007   }
6008 }
6009 
6010 // This function computes an appropriate offset to pass to
6011 // MachineMemOperand::setOffset() based on the offset inputs to
6012 // an intrinsic.  If any of the offsets are non-contstant or
6013 // if VIndex is non-zero then this function returns 0.  Otherwise,
6014 // it returns the sum of VOffset, SOffset, and Offset.
6015 static unsigned getBufferOffsetForMMO(SDValue VOffset,
6016                                       SDValue SOffset,
6017                                       SDValue Offset,
6018                                       SDValue VIndex = SDValue()) {
6019 
6020   if (!isa<ConstantSDNode>(VOffset) || !isa<ConstantSDNode>(SOffset) ||
6021       !isa<ConstantSDNode>(Offset))
6022     return 0;
6023 
6024   if (VIndex) {
6025     if (!isa<ConstantSDNode>(VIndex) || !cast<ConstantSDNode>(VIndex)->isNullValue())
6026       return 0;
6027   }
6028 
6029   return cast<ConstantSDNode>(VOffset)->getSExtValue() +
6030          cast<ConstantSDNode>(SOffset)->getSExtValue() +
6031          cast<ConstantSDNode>(Offset)->getSExtValue();
6032 }
6033 
6034 static unsigned getDSShaderTypeValue(const MachineFunction &MF) {
6035   switch (MF.getFunction().getCallingConv()) {
6036   case CallingConv::AMDGPU_PS:
6037     return 1;
6038   case CallingConv::AMDGPU_VS:
6039     return 2;
6040   case CallingConv::AMDGPU_GS:
6041     return 3;
6042   case CallingConv::AMDGPU_HS:
6043   case CallingConv::AMDGPU_LS:
6044   case CallingConv::AMDGPU_ES:
6045     report_fatal_error("ds_ordered_count unsupported for this calling conv");
6046   case CallingConv::AMDGPU_CS:
6047   case CallingConv::AMDGPU_KERNEL:
6048   case CallingConv::C:
6049   case CallingConv::Fast:
6050   default:
6051     // Assume other calling conventions are various compute callable functions
6052     return 0;
6053   }
6054 }
6055 
6056 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
6057                                                  SelectionDAG &DAG) const {
6058   unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6059   SDLoc DL(Op);
6060 
6061   switch (IntrID) {
6062   case Intrinsic::amdgcn_ds_ordered_add:
6063   case Intrinsic::amdgcn_ds_ordered_swap: {
6064     MemSDNode *M = cast<MemSDNode>(Op);
6065     SDValue Chain = M->getOperand(0);
6066     SDValue M0 = M->getOperand(2);
6067     SDValue Value = M->getOperand(3);
6068     unsigned IndexOperand = M->getConstantOperandVal(7);
6069     unsigned WaveRelease = M->getConstantOperandVal(8);
6070     unsigned WaveDone = M->getConstantOperandVal(9);
6071 
6072     unsigned OrderedCountIndex = IndexOperand & 0x3f;
6073     IndexOperand &= ~0x3f;
6074     unsigned CountDw = 0;
6075 
6076     if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) {
6077       CountDw = (IndexOperand >> 24) & 0xf;
6078       IndexOperand &= ~(0xf << 24);
6079 
6080       if (CountDw < 1 || CountDw > 4) {
6081         report_fatal_error(
6082             "ds_ordered_count: dword count must be between 1 and 4");
6083       }
6084     }
6085 
6086     if (IndexOperand)
6087       report_fatal_error("ds_ordered_count: bad index operand");
6088 
6089     if (WaveDone && !WaveRelease)
6090       report_fatal_error("ds_ordered_count: wave_done requires wave_release");
6091 
6092     unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
6093     unsigned ShaderType = getDSShaderTypeValue(DAG.getMachineFunction());
6094     unsigned Offset0 = OrderedCountIndex << 2;
6095     unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
6096                        (Instruction << 4);
6097 
6098     if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
6099       Offset1 |= (CountDw - 1) << 6;
6100 
6101     unsigned Offset = Offset0 | (Offset1 << 8);
6102 
6103     SDValue Ops[] = {
6104       Chain,
6105       Value,
6106       DAG.getTargetConstant(Offset, DL, MVT::i16),
6107       copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue
6108     };
6109     return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL,
6110                                    M->getVTList(), Ops, M->getMemoryVT(),
6111                                    M->getMemOperand());
6112   }
6113   case Intrinsic::amdgcn_ds_fadd: {
6114     MemSDNode *M = cast<MemSDNode>(Op);
6115     unsigned Opc;
6116     switch (IntrID) {
6117     case Intrinsic::amdgcn_ds_fadd:
6118       Opc = ISD::ATOMIC_LOAD_FADD;
6119       break;
6120     }
6121 
6122     return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(),
6123                          M->getOperand(0), M->getOperand(2), M->getOperand(3),
6124                          M->getMemOperand());
6125   }
6126   case Intrinsic::amdgcn_atomic_inc:
6127   case Intrinsic::amdgcn_atomic_dec:
6128   case Intrinsic::amdgcn_ds_fmin:
6129   case Intrinsic::amdgcn_ds_fmax: {
6130     MemSDNode *M = cast<MemSDNode>(Op);
6131     unsigned Opc;
6132     switch (IntrID) {
6133     case Intrinsic::amdgcn_atomic_inc:
6134       Opc = AMDGPUISD::ATOMIC_INC;
6135       break;
6136     case Intrinsic::amdgcn_atomic_dec:
6137       Opc = AMDGPUISD::ATOMIC_DEC;
6138       break;
6139     case Intrinsic::amdgcn_ds_fmin:
6140       Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
6141       break;
6142     case Intrinsic::amdgcn_ds_fmax:
6143       Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
6144       break;
6145     default:
6146       llvm_unreachable("Unknown intrinsic!");
6147     }
6148     SDValue Ops[] = {
6149       M->getOperand(0), // Chain
6150       M->getOperand(2), // Ptr
6151       M->getOperand(3)  // Value
6152     };
6153 
6154     return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
6155                                    M->getMemoryVT(), M->getMemOperand());
6156   }
6157   case Intrinsic::amdgcn_buffer_load:
6158   case Intrinsic::amdgcn_buffer_load_format: {
6159     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
6160     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6161     unsigned IdxEn = 1;
6162     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
6163       IdxEn = Idx->getZExtValue() != 0;
6164     SDValue Ops[] = {
6165       Op.getOperand(0), // Chain
6166       Op.getOperand(2), // rsrc
6167       Op.getOperand(3), // vindex
6168       SDValue(),        // voffset -- will be set by setBufferOffsets
6169       SDValue(),        // soffset -- will be set by setBufferOffsets
6170       SDValue(),        // offset -- will be set by setBufferOffsets
6171       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6172       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
6173     };
6174 
6175     unsigned Offset = setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]);
6176     // We don't know the offset if vindex is non-zero, so clear it.
6177     if (IdxEn)
6178       Offset = 0;
6179 
6180     unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
6181         AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
6182 
6183     EVT VT = Op.getValueType();
6184     EVT IntVT = VT.changeTypeToInteger();
6185     auto *M = cast<MemSDNode>(Op);
6186     M->getMemOperand()->setOffset(Offset);
6187     EVT LoadVT = Op.getValueType();
6188 
6189     if (LoadVT.getScalarType() == MVT::f16)
6190       return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
6191                                  M, DAG, Ops);
6192 
6193     // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
6194     if (LoadVT.getScalarType() == MVT::i8 ||
6195         LoadVT.getScalarType() == MVT::i16)
6196       return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
6197 
6198     return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
6199                                M->getMemOperand(), DAG);
6200   }
6201   case Intrinsic::amdgcn_raw_buffer_load:
6202   case Intrinsic::amdgcn_raw_buffer_load_format: {
6203     const bool IsFormat = IntrID == Intrinsic::amdgcn_raw_buffer_load_format;
6204 
6205     auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
6206     SDValue Ops[] = {
6207       Op.getOperand(0), // Chain
6208       Op.getOperand(2), // rsrc
6209       DAG.getConstant(0, DL, MVT::i32), // vindex
6210       Offsets.first,    // voffset
6211       Op.getOperand(4), // soffset
6212       Offsets.second,   // offset
6213       Op.getOperand(5), // cachepolicy, swizzled buffer
6214       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6215     };
6216 
6217     auto *M = cast<MemSDNode>(Op);
6218     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[3], Ops[4], Ops[5]));
6219     return lowerIntrinsicLoad(M, IsFormat, DAG, Ops);
6220   }
6221   case Intrinsic::amdgcn_struct_buffer_load:
6222   case Intrinsic::amdgcn_struct_buffer_load_format: {
6223     const bool IsFormat = IntrID == Intrinsic::amdgcn_struct_buffer_load_format;
6224 
6225     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6226     SDValue Ops[] = {
6227       Op.getOperand(0), // Chain
6228       Op.getOperand(2), // rsrc
6229       Op.getOperand(3), // vindex
6230       Offsets.first,    // voffset
6231       Op.getOperand(5), // soffset
6232       Offsets.second,   // offset
6233       Op.getOperand(6), // cachepolicy, swizzled buffer
6234       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6235     };
6236 
6237     auto *M = cast<MemSDNode>(Op);
6238     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[3], Ops[4], Ops[5],
6239                                                         Ops[2]));
6240     return lowerIntrinsicLoad(cast<MemSDNode>(Op), IsFormat, DAG, Ops);
6241   }
6242   case Intrinsic::amdgcn_tbuffer_load: {
6243     MemSDNode *M = cast<MemSDNode>(Op);
6244     EVT LoadVT = Op.getValueType();
6245 
6246     unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6247     unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6248     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6249     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6250     unsigned IdxEn = 1;
6251     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
6252       IdxEn = Idx->getZExtValue() != 0;
6253     SDValue Ops[] = {
6254       Op.getOperand(0),  // Chain
6255       Op.getOperand(2),  // rsrc
6256       Op.getOperand(3),  // vindex
6257       Op.getOperand(4),  // voffset
6258       Op.getOperand(5),  // soffset
6259       Op.getOperand(6),  // offset
6260       DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6261       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6262       DAG.getTargetConstant(IdxEn, DL, MVT::i1) // idxen
6263     };
6264 
6265     if (LoadVT.getScalarType() == MVT::f16)
6266       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6267                                  M, DAG, Ops);
6268     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6269                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6270                                DAG);
6271   }
6272   case Intrinsic::amdgcn_raw_tbuffer_load: {
6273     MemSDNode *M = cast<MemSDNode>(Op);
6274     EVT LoadVT = Op.getValueType();
6275     auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
6276 
6277     SDValue Ops[] = {
6278       Op.getOperand(0),  // Chain
6279       Op.getOperand(2),  // rsrc
6280       DAG.getConstant(0, DL, MVT::i32), // vindex
6281       Offsets.first,     // voffset
6282       Op.getOperand(4),  // soffset
6283       Offsets.second,    // offset
6284       Op.getOperand(5),  // format
6285       Op.getOperand(6),  // cachepolicy, swizzled buffer
6286       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6287     };
6288 
6289     if (LoadVT.getScalarType() == MVT::f16)
6290       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6291                                  M, DAG, Ops);
6292     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6293                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6294                                DAG);
6295   }
6296   case Intrinsic::amdgcn_struct_tbuffer_load: {
6297     MemSDNode *M = cast<MemSDNode>(Op);
6298     EVT LoadVT = Op.getValueType();
6299     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6300 
6301     SDValue Ops[] = {
6302       Op.getOperand(0),  // Chain
6303       Op.getOperand(2),  // rsrc
6304       Op.getOperand(3),  // vindex
6305       Offsets.first,     // voffset
6306       Op.getOperand(5),  // soffset
6307       Offsets.second,    // offset
6308       Op.getOperand(6),  // format
6309       Op.getOperand(7),  // cachepolicy, swizzled buffer
6310       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6311     };
6312 
6313     if (LoadVT.getScalarType() == MVT::f16)
6314       return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6315                                  M, DAG, Ops);
6316     return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6317                                Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6318                                DAG);
6319   }
6320   case Intrinsic::amdgcn_buffer_atomic_swap:
6321   case Intrinsic::amdgcn_buffer_atomic_add:
6322   case Intrinsic::amdgcn_buffer_atomic_sub:
6323   case Intrinsic::amdgcn_buffer_atomic_smin:
6324   case Intrinsic::amdgcn_buffer_atomic_umin:
6325   case Intrinsic::amdgcn_buffer_atomic_smax:
6326   case Intrinsic::amdgcn_buffer_atomic_umax:
6327   case Intrinsic::amdgcn_buffer_atomic_and:
6328   case Intrinsic::amdgcn_buffer_atomic_or:
6329   case Intrinsic::amdgcn_buffer_atomic_xor: {
6330     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6331     unsigned IdxEn = 1;
6332     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6333       IdxEn = Idx->getZExtValue() != 0;
6334     SDValue Ops[] = {
6335       Op.getOperand(0), // Chain
6336       Op.getOperand(2), // vdata
6337       Op.getOperand(3), // rsrc
6338       Op.getOperand(4), // vindex
6339       SDValue(),        // voffset -- will be set by setBufferOffsets
6340       SDValue(),        // soffset -- will be set by setBufferOffsets
6341       SDValue(),        // offset -- will be set by setBufferOffsets
6342       DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6343       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
6344     };
6345     unsigned Offset = setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6346     // We don't know the offset if vindex is non-zero, so clear it.
6347     if (IdxEn)
6348       Offset = 0;
6349     EVT VT = Op.getValueType();
6350 
6351     auto *M = cast<MemSDNode>(Op);
6352     M->getMemOperand()->setOffset(Offset);
6353     unsigned Opcode = 0;
6354 
6355     switch (IntrID) {
6356     case Intrinsic::amdgcn_buffer_atomic_swap:
6357       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6358       break;
6359     case Intrinsic::amdgcn_buffer_atomic_add:
6360       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6361       break;
6362     case Intrinsic::amdgcn_buffer_atomic_sub:
6363       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6364       break;
6365     case Intrinsic::amdgcn_buffer_atomic_smin:
6366       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6367       break;
6368     case Intrinsic::amdgcn_buffer_atomic_umin:
6369       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6370       break;
6371     case Intrinsic::amdgcn_buffer_atomic_smax:
6372       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6373       break;
6374     case Intrinsic::amdgcn_buffer_atomic_umax:
6375       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6376       break;
6377     case Intrinsic::amdgcn_buffer_atomic_and:
6378       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6379       break;
6380     case Intrinsic::amdgcn_buffer_atomic_or:
6381       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6382       break;
6383     case Intrinsic::amdgcn_buffer_atomic_xor:
6384       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6385       break;
6386     default:
6387       llvm_unreachable("unhandled atomic opcode");
6388     }
6389 
6390     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6391                                    M->getMemOperand());
6392   }
6393   case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6394   case Intrinsic::amdgcn_raw_buffer_atomic_add:
6395   case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6396   case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6397   case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6398   case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6399   case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6400   case Intrinsic::amdgcn_raw_buffer_atomic_and:
6401   case Intrinsic::amdgcn_raw_buffer_atomic_or:
6402   case Intrinsic::amdgcn_raw_buffer_atomic_xor:
6403   case Intrinsic::amdgcn_raw_buffer_atomic_inc:
6404   case Intrinsic::amdgcn_raw_buffer_atomic_dec: {
6405     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6406     SDValue Ops[] = {
6407       Op.getOperand(0), // Chain
6408       Op.getOperand(2), // vdata
6409       Op.getOperand(3), // rsrc
6410       DAG.getConstant(0, DL, MVT::i32), // vindex
6411       Offsets.first,    // voffset
6412       Op.getOperand(5), // soffset
6413       Offsets.second,   // offset
6414       Op.getOperand(6), // cachepolicy
6415       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6416     };
6417     EVT VT = Op.getValueType();
6418 
6419     auto *M = cast<MemSDNode>(Op);
6420     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6]));
6421     unsigned Opcode = 0;
6422 
6423     switch (IntrID) {
6424     case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6425       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6426       break;
6427     case Intrinsic::amdgcn_raw_buffer_atomic_add:
6428       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6429       break;
6430     case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6431       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6432       break;
6433     case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6434       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6435       break;
6436     case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6437       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6438       break;
6439     case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6440       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6441       break;
6442     case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6443       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6444       break;
6445     case Intrinsic::amdgcn_raw_buffer_atomic_and:
6446       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6447       break;
6448     case Intrinsic::amdgcn_raw_buffer_atomic_or:
6449       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6450       break;
6451     case Intrinsic::amdgcn_raw_buffer_atomic_xor:
6452       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6453       break;
6454     case Intrinsic::amdgcn_raw_buffer_atomic_inc:
6455       Opcode = AMDGPUISD::BUFFER_ATOMIC_INC;
6456       break;
6457     case Intrinsic::amdgcn_raw_buffer_atomic_dec:
6458       Opcode = AMDGPUISD::BUFFER_ATOMIC_DEC;
6459       break;
6460     default:
6461       llvm_unreachable("unhandled atomic opcode");
6462     }
6463 
6464     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6465                                    M->getMemOperand());
6466   }
6467   case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6468   case Intrinsic::amdgcn_struct_buffer_atomic_add:
6469   case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6470   case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6471   case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6472   case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6473   case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6474   case Intrinsic::amdgcn_struct_buffer_atomic_and:
6475   case Intrinsic::amdgcn_struct_buffer_atomic_or:
6476   case Intrinsic::amdgcn_struct_buffer_atomic_xor:
6477   case Intrinsic::amdgcn_struct_buffer_atomic_inc:
6478   case Intrinsic::amdgcn_struct_buffer_atomic_dec: {
6479     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6480     SDValue Ops[] = {
6481       Op.getOperand(0), // Chain
6482       Op.getOperand(2), // vdata
6483       Op.getOperand(3), // rsrc
6484       Op.getOperand(4), // vindex
6485       Offsets.first,    // voffset
6486       Op.getOperand(6), // soffset
6487       Offsets.second,   // offset
6488       Op.getOperand(7), // cachepolicy
6489       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6490     };
6491     EVT VT = Op.getValueType();
6492 
6493     auto *M = cast<MemSDNode>(Op);
6494     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6],
6495                                                         Ops[3]));
6496     unsigned Opcode = 0;
6497 
6498     switch (IntrID) {
6499     case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6500       Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6501       break;
6502     case Intrinsic::amdgcn_struct_buffer_atomic_add:
6503       Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6504       break;
6505     case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6506       Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6507       break;
6508     case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6509       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6510       break;
6511     case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6512       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6513       break;
6514     case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6515       Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6516       break;
6517     case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6518       Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6519       break;
6520     case Intrinsic::amdgcn_struct_buffer_atomic_and:
6521       Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6522       break;
6523     case Intrinsic::amdgcn_struct_buffer_atomic_or:
6524       Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6525       break;
6526     case Intrinsic::amdgcn_struct_buffer_atomic_xor:
6527       Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6528       break;
6529     case Intrinsic::amdgcn_struct_buffer_atomic_inc:
6530       Opcode = AMDGPUISD::BUFFER_ATOMIC_INC;
6531       break;
6532     case Intrinsic::amdgcn_struct_buffer_atomic_dec:
6533       Opcode = AMDGPUISD::BUFFER_ATOMIC_DEC;
6534       break;
6535     default:
6536       llvm_unreachable("unhandled atomic opcode");
6537     }
6538 
6539     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6540                                    M->getMemOperand());
6541   }
6542   case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
6543     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6544     unsigned IdxEn = 1;
6545     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5)))
6546       IdxEn = Idx->getZExtValue() != 0;
6547     SDValue Ops[] = {
6548       Op.getOperand(0), // Chain
6549       Op.getOperand(2), // src
6550       Op.getOperand(3), // cmp
6551       Op.getOperand(4), // rsrc
6552       Op.getOperand(5), // vindex
6553       SDValue(),        // voffset -- will be set by setBufferOffsets
6554       SDValue(),        // soffset -- will be set by setBufferOffsets
6555       SDValue(),        // offset -- will be set by setBufferOffsets
6556       DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6557       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
6558     };
6559     unsigned Offset = setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]);
6560     // We don't know the offset if vindex is non-zero, so clear it.
6561     if (IdxEn)
6562       Offset = 0;
6563     EVT VT = Op.getValueType();
6564     auto *M = cast<MemSDNode>(Op);
6565     M->getMemOperand()->setOffset(Offset);
6566 
6567     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6568                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6569   }
6570   case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: {
6571     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6572     SDValue Ops[] = {
6573       Op.getOperand(0), // Chain
6574       Op.getOperand(2), // src
6575       Op.getOperand(3), // cmp
6576       Op.getOperand(4), // rsrc
6577       DAG.getConstant(0, DL, MVT::i32), // vindex
6578       Offsets.first,    // voffset
6579       Op.getOperand(6), // soffset
6580       Offsets.second,   // offset
6581       Op.getOperand(7), // cachepolicy
6582       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6583     };
6584     EVT VT = Op.getValueType();
6585     auto *M = cast<MemSDNode>(Op);
6586     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[5], Ops[6], Ops[7]));
6587 
6588     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6589                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6590   }
6591   case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: {
6592     auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG);
6593     SDValue Ops[] = {
6594       Op.getOperand(0), // Chain
6595       Op.getOperand(2), // src
6596       Op.getOperand(3), // cmp
6597       Op.getOperand(4), // rsrc
6598       Op.getOperand(5), // vindex
6599       Offsets.first,    // voffset
6600       Op.getOperand(7), // soffset
6601       Offsets.second,   // offset
6602       Op.getOperand(8), // cachepolicy
6603       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6604     };
6605     EVT VT = Op.getValueType();
6606     auto *M = cast<MemSDNode>(Op);
6607     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[5], Ops[6], Ops[7],
6608                                                         Ops[4]));
6609 
6610     return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6611                                    Op->getVTList(), Ops, VT, M->getMemOperand());
6612   }
6613 
6614   default:
6615     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6616             AMDGPU::getImageDimIntrinsicInfo(IntrID))
6617       return lowerImage(Op, ImageDimIntr, DAG);
6618 
6619     return SDValue();
6620   }
6621 }
6622 
6623 // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to
6624 // dwordx4 if on SI.
6625 SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL,
6626                                               SDVTList VTList,
6627                                               ArrayRef<SDValue> Ops, EVT MemVT,
6628                                               MachineMemOperand *MMO,
6629                                               SelectionDAG &DAG) const {
6630   EVT VT = VTList.VTs[0];
6631   EVT WidenedVT = VT;
6632   EVT WidenedMemVT = MemVT;
6633   if (!Subtarget->hasDwordx3LoadStores() &&
6634       (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) {
6635     WidenedVT = EVT::getVectorVT(*DAG.getContext(),
6636                                  WidenedVT.getVectorElementType(), 4);
6637     WidenedMemVT = EVT::getVectorVT(*DAG.getContext(),
6638                                     WidenedMemVT.getVectorElementType(), 4);
6639     MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16);
6640   }
6641 
6642   assert(VTList.NumVTs == 2);
6643   SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]);
6644 
6645   auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops,
6646                                        WidenedMemVT, MMO);
6647   if (WidenedVT != VT) {
6648     auto Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp,
6649                                DAG.getVectorIdxConstant(0, DL));
6650     NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL);
6651   }
6652   return NewOp;
6653 }
6654 
6655 SDValue SITargetLowering::handleD16VData(SDValue VData,
6656                                          SelectionDAG &DAG) const {
6657   EVT StoreVT = VData.getValueType();
6658 
6659   // No change for f16 and legal vector D16 types.
6660   if (!StoreVT.isVector())
6661     return VData;
6662 
6663   SDLoc DL(VData);
6664   assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
6665 
6666   if (Subtarget->hasUnpackedD16VMem()) {
6667     // We need to unpack the packed data to store.
6668     EVT IntStoreVT = StoreVT.changeTypeToInteger();
6669     SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
6670 
6671     EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
6672                                         StoreVT.getVectorNumElements());
6673     SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
6674     return DAG.UnrollVectorOp(ZExt.getNode());
6675   }
6676 
6677   assert(isTypeLegal(StoreVT));
6678   return VData;
6679 }
6680 
6681 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
6682                                               SelectionDAG &DAG) const {
6683   SDLoc DL(Op);
6684   SDValue Chain = Op.getOperand(0);
6685   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
6686   MachineFunction &MF = DAG.getMachineFunction();
6687 
6688   switch (IntrinsicID) {
6689   case Intrinsic::amdgcn_exp_compr: {
6690     SDValue Src0 = Op.getOperand(4);
6691     SDValue Src1 = Op.getOperand(5);
6692     // Hack around illegal type on SI by directly selecting it.
6693     if (isTypeLegal(Src0.getValueType()))
6694       return SDValue();
6695 
6696     const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
6697     SDValue Undef = DAG.getUNDEF(MVT::f32);
6698     const SDValue Ops[] = {
6699       Op.getOperand(2), // tgt
6700       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), // src0
6701       DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), // src1
6702       Undef, // src2
6703       Undef, // src3
6704       Op.getOperand(7), // vm
6705       DAG.getTargetConstant(1, DL, MVT::i1), // compr
6706       Op.getOperand(3), // en
6707       Op.getOperand(0) // Chain
6708     };
6709 
6710     unsigned Opc = Done->isNullValue() ? AMDGPU::EXP : AMDGPU::EXP_DONE;
6711     return SDValue(DAG.getMachineNode(Opc, DL, Op->getVTList(), Ops), 0);
6712   }
6713   case Intrinsic::amdgcn_s_barrier: {
6714     if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
6715       const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
6716       unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
6717       if (WGSize <= ST.getWavefrontSize())
6718         return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
6719                                           Op.getOperand(0)), 0);
6720     }
6721     return SDValue();
6722   };
6723   case Intrinsic::amdgcn_tbuffer_store: {
6724     SDValue VData = Op.getOperand(2);
6725     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6726     if (IsD16)
6727       VData = handleD16VData(VData, DAG);
6728     unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6729     unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6730     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6731     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue();
6732     unsigned IdxEn = 1;
6733     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6734       IdxEn = Idx->getZExtValue() != 0;
6735     SDValue Ops[] = {
6736       Chain,
6737       VData,             // vdata
6738       Op.getOperand(3),  // rsrc
6739       Op.getOperand(4),  // vindex
6740       Op.getOperand(5),  // voffset
6741       Op.getOperand(6),  // soffset
6742       Op.getOperand(7),  // offset
6743       DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6744       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6745       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idexen
6746     };
6747     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6748                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6749     MemSDNode *M = cast<MemSDNode>(Op);
6750     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6751                                    M->getMemoryVT(), M->getMemOperand());
6752   }
6753 
6754   case Intrinsic::amdgcn_struct_tbuffer_store: {
6755     SDValue VData = Op.getOperand(2);
6756     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6757     if (IsD16)
6758       VData = handleD16VData(VData, DAG);
6759     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6760     SDValue Ops[] = {
6761       Chain,
6762       VData,             // vdata
6763       Op.getOperand(3),  // rsrc
6764       Op.getOperand(4),  // vindex
6765       Offsets.first,     // voffset
6766       Op.getOperand(6),  // soffset
6767       Offsets.second,    // offset
6768       Op.getOperand(7),  // format
6769       Op.getOperand(8),  // cachepolicy, swizzled buffer
6770       DAG.getTargetConstant(1, DL, MVT::i1), // idexen
6771     };
6772     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6773                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6774     MemSDNode *M = cast<MemSDNode>(Op);
6775     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6776                                    M->getMemoryVT(), M->getMemOperand());
6777   }
6778 
6779   case Intrinsic::amdgcn_raw_tbuffer_store: {
6780     SDValue VData = Op.getOperand(2);
6781     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6782     if (IsD16)
6783       VData = handleD16VData(VData, DAG);
6784     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6785     SDValue Ops[] = {
6786       Chain,
6787       VData,             // vdata
6788       Op.getOperand(3),  // rsrc
6789       DAG.getConstant(0, DL, MVT::i32), // vindex
6790       Offsets.first,     // voffset
6791       Op.getOperand(5),  // soffset
6792       Offsets.second,    // offset
6793       Op.getOperand(6),  // format
6794       Op.getOperand(7),  // cachepolicy, swizzled buffer
6795       DAG.getTargetConstant(0, DL, MVT::i1), // idexen
6796     };
6797     unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6798                            AMDGPUISD::TBUFFER_STORE_FORMAT;
6799     MemSDNode *M = cast<MemSDNode>(Op);
6800     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6801                                    M->getMemoryVT(), M->getMemOperand());
6802   }
6803 
6804   case Intrinsic::amdgcn_buffer_store:
6805   case Intrinsic::amdgcn_buffer_store_format: {
6806     SDValue VData = Op.getOperand(2);
6807     bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6808     if (IsD16)
6809       VData = handleD16VData(VData, DAG);
6810     unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6811     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6812     unsigned IdxEn = 1;
6813     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6814       IdxEn = Idx->getZExtValue() != 0;
6815     SDValue Ops[] = {
6816       Chain,
6817       VData,
6818       Op.getOperand(3), // rsrc
6819       Op.getOperand(4), // vindex
6820       SDValue(), // voffset -- will be set by setBufferOffsets
6821       SDValue(), // soffset -- will be set by setBufferOffsets
6822       SDValue(), // offset -- will be set by setBufferOffsets
6823       DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6824       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
6825     };
6826     unsigned Offset = setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6827     // We don't know the offset if vindex is non-zero, so clear it.
6828     if (IdxEn)
6829       Offset = 0;
6830     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
6831                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6832     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6833     MemSDNode *M = cast<MemSDNode>(Op);
6834     M->getMemOperand()->setOffset(Offset);
6835 
6836     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6837     EVT VDataType = VData.getValueType().getScalarType();
6838     if (VDataType == MVT::i8 || VDataType == MVT::i16)
6839       return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6840 
6841     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6842                                    M->getMemoryVT(), M->getMemOperand());
6843   }
6844 
6845   case Intrinsic::amdgcn_raw_buffer_store:
6846   case Intrinsic::amdgcn_raw_buffer_store_format: {
6847     const bool IsFormat =
6848         IntrinsicID == Intrinsic::amdgcn_raw_buffer_store_format;
6849 
6850     SDValue VData = Op.getOperand(2);
6851     EVT VDataVT = VData.getValueType();
6852     EVT EltType = VDataVT.getScalarType();
6853     bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
6854     if (IsD16)
6855       VData = handleD16VData(VData, DAG);
6856 
6857     if (!isTypeLegal(VDataVT)) {
6858       VData =
6859           DAG.getNode(ISD::BITCAST, DL,
6860                       getEquivalentMemType(*DAG.getContext(), VDataVT), VData);
6861     }
6862 
6863     auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6864     SDValue Ops[] = {
6865       Chain,
6866       VData,
6867       Op.getOperand(3), // rsrc
6868       DAG.getConstant(0, DL, MVT::i32), // vindex
6869       Offsets.first,    // voffset
6870       Op.getOperand(5), // soffset
6871       Offsets.second,   // offset
6872       Op.getOperand(6), // cachepolicy, swizzled buffer
6873       DAG.getTargetConstant(0, DL, MVT::i1), // idxen
6874     };
6875     unsigned Opc =
6876         IsFormat ? AMDGPUISD::BUFFER_STORE_FORMAT : AMDGPUISD::BUFFER_STORE;
6877     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6878     MemSDNode *M = cast<MemSDNode>(Op);
6879     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6]));
6880 
6881     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6882     if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32)
6883       return handleByteShortBufferStores(DAG, VDataVT, DL, Ops, M);
6884 
6885     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6886                                    M->getMemoryVT(), M->getMemOperand());
6887   }
6888 
6889   case Intrinsic::amdgcn_struct_buffer_store:
6890   case Intrinsic::amdgcn_struct_buffer_store_format: {
6891     const bool IsFormat =
6892         IntrinsicID == Intrinsic::amdgcn_struct_buffer_store_format;
6893 
6894     SDValue VData = Op.getOperand(2);
6895     EVT VDataVT = VData.getValueType();
6896     EVT EltType = VDataVT.getScalarType();
6897     bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
6898 
6899     if (IsD16)
6900       VData = handleD16VData(VData, DAG);
6901 
6902     if (!isTypeLegal(VDataVT)) {
6903       VData =
6904           DAG.getNode(ISD::BITCAST, DL,
6905                       getEquivalentMemType(*DAG.getContext(), VDataVT), VData);
6906     }
6907 
6908     auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6909     SDValue Ops[] = {
6910       Chain,
6911       VData,
6912       Op.getOperand(3), // rsrc
6913       Op.getOperand(4), // vindex
6914       Offsets.first,    // voffset
6915       Op.getOperand(6), // soffset
6916       Offsets.second,   // offset
6917       Op.getOperand(7), // cachepolicy, swizzled buffer
6918       DAG.getTargetConstant(1, DL, MVT::i1), // idxen
6919     };
6920     unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ?
6921                    AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6922     Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6923     MemSDNode *M = cast<MemSDNode>(Op);
6924     M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6],
6925                                                         Ops[3]));
6926 
6927     // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6928     EVT VDataType = VData.getValueType().getScalarType();
6929     if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32)
6930       return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6931 
6932     return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6933                                    M->getMemoryVT(), M->getMemOperand());
6934   }
6935 
6936   case Intrinsic::amdgcn_buffer_atomic_fadd: {
6937     unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6938     unsigned IdxEn = 1;
6939     if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6940       IdxEn = Idx->getZExtValue() != 0;
6941     SDValue Ops[] = {
6942       Chain,
6943       Op.getOperand(2), // vdata
6944       Op.getOperand(3), // rsrc
6945       Op.getOperand(4), // vindex
6946       SDValue(),        // voffset -- will be set by setBufferOffsets
6947       SDValue(),        // soffset -- will be set by setBufferOffsets
6948       SDValue(),        // offset -- will be set by setBufferOffsets
6949       DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6950       DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
6951     };
6952     unsigned Offset = setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6953     // We don't know the offset if vindex is non-zero, so clear it.
6954     if (IdxEn)
6955       Offset = 0;
6956     EVT VT = Op.getOperand(2).getValueType();
6957 
6958     auto *M = cast<MemSDNode>(Op);
6959     M->getMemOperand()->setOffset(Offset);
6960     unsigned Opcode = VT.isVector() ? AMDGPUISD::BUFFER_ATOMIC_PK_FADD
6961                                     : AMDGPUISD::BUFFER_ATOMIC_FADD;
6962 
6963     return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6964                                    M->getMemOperand());
6965   }
6966 
6967   case Intrinsic::amdgcn_global_atomic_fadd: {
6968     SDValue Ops[] = {
6969       Chain,
6970       Op.getOperand(2), // ptr
6971       Op.getOperand(3)  // vdata
6972     };
6973     EVT VT = Op.getOperand(3).getValueType();
6974 
6975     auto *M = cast<MemSDNode>(Op);
6976     if (VT.isVector()) {
6977       return DAG.getMemIntrinsicNode(
6978         AMDGPUISD::ATOMIC_PK_FADD, DL, Op->getVTList(), Ops, VT,
6979         M->getMemOperand());
6980     }
6981 
6982     return DAG.getAtomic(ISD::ATOMIC_LOAD_FADD, DL, VT,
6983                          DAG.getVTList(VT, MVT::Other), Ops,
6984                          M->getMemOperand()).getValue(1);
6985   }
6986   case Intrinsic::amdgcn_end_cf:
6987     return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other,
6988                                       Op->getOperand(2), Chain), 0);
6989 
6990   default: {
6991     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6992             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
6993       return lowerImage(Op, ImageDimIntr, DAG);
6994 
6995     return Op;
6996   }
6997   }
6998 }
6999 
7000 // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args:
7001 // offset (the offset that is included in bounds checking and swizzling, to be
7002 // split between the instruction's voffset and immoffset fields) and soffset
7003 // (the offset that is excluded from bounds checking and swizzling, to go in
7004 // the instruction's soffset field).  This function takes the first kind of
7005 // offset and figures out how to split it between voffset and immoffset.
7006 std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets(
7007     SDValue Offset, SelectionDAG &DAG) const {
7008   SDLoc DL(Offset);
7009   const unsigned MaxImm = 4095;
7010   SDValue N0 = Offset;
7011   ConstantSDNode *C1 = nullptr;
7012 
7013   if ((C1 = dyn_cast<ConstantSDNode>(N0)))
7014     N0 = SDValue();
7015   else if (DAG.isBaseWithConstantOffset(N0)) {
7016     C1 = cast<ConstantSDNode>(N0.getOperand(1));
7017     N0 = N0.getOperand(0);
7018   }
7019 
7020   if (C1) {
7021     unsigned ImmOffset = C1->getZExtValue();
7022     // If the immediate value is too big for the immoffset field, put the value
7023     // and -4096 into the immoffset field so that the value that is copied/added
7024     // for the voffset field is a multiple of 4096, and it stands more chance
7025     // of being CSEd with the copy/add for another similar load/store.
7026     // However, do not do that rounding down to a multiple of 4096 if that is a
7027     // negative number, as it appears to be illegal to have a negative offset
7028     // in the vgpr, even if adding the immediate offset makes it positive.
7029     unsigned Overflow = ImmOffset & ~MaxImm;
7030     ImmOffset -= Overflow;
7031     if ((int32_t)Overflow < 0) {
7032       Overflow += ImmOffset;
7033       ImmOffset = 0;
7034     }
7035     C1 = cast<ConstantSDNode>(DAG.getTargetConstant(ImmOffset, DL, MVT::i32));
7036     if (Overflow) {
7037       auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32);
7038       if (!N0)
7039         N0 = OverflowVal;
7040       else {
7041         SDValue Ops[] = { N0, OverflowVal };
7042         N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops);
7043       }
7044     }
7045   }
7046   if (!N0)
7047     N0 = DAG.getConstant(0, DL, MVT::i32);
7048   if (!C1)
7049     C1 = cast<ConstantSDNode>(DAG.getTargetConstant(0, DL, MVT::i32));
7050   return {N0, SDValue(C1, 0)};
7051 }
7052 
7053 // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the
7054 // three offsets (voffset, soffset and instoffset) into the SDValue[3] array
7055 // pointed to by Offsets.
7056 unsigned SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
7057                                         SelectionDAG &DAG, SDValue *Offsets,
7058                                         unsigned Align) const {
7059   SDLoc DL(CombinedOffset);
7060   if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) {
7061     uint32_t Imm = C->getZExtValue();
7062     uint32_t SOffset, ImmOffset;
7063     if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, Align)) {
7064       Offsets[0] = DAG.getConstant(0, DL, MVT::i32);
7065       Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
7066       Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32);
7067       return SOffset + ImmOffset;
7068     }
7069   }
7070   if (DAG.isBaseWithConstantOffset(CombinedOffset)) {
7071     SDValue N0 = CombinedOffset.getOperand(0);
7072     SDValue N1 = CombinedOffset.getOperand(1);
7073     uint32_t SOffset, ImmOffset;
7074     int Offset = cast<ConstantSDNode>(N1)->getSExtValue();
7075     if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset,
7076                                                 Subtarget, Align)) {
7077       Offsets[0] = N0;
7078       Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
7079       Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32);
7080       return 0;
7081     }
7082   }
7083   Offsets[0] = CombinedOffset;
7084   Offsets[1] = DAG.getConstant(0, DL, MVT::i32);
7085   Offsets[2] = DAG.getTargetConstant(0, DL, MVT::i32);
7086   return 0;
7087 }
7088 
7089 // Handle 8 bit and 16 bit buffer loads
7090 SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG,
7091                                                      EVT LoadVT, SDLoc DL,
7092                                                      ArrayRef<SDValue> Ops,
7093                                                      MemSDNode *M) const {
7094   EVT IntVT = LoadVT.changeTypeToInteger();
7095   unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ?
7096          AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT;
7097 
7098   SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other);
7099   SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList,
7100                                                Ops, IntVT,
7101                                                M->getMemOperand());
7102   SDValue LoadVal = DAG.getNode(ISD::TRUNCATE, DL, IntVT, BufferLoad);
7103   LoadVal = DAG.getNode(ISD::BITCAST, DL, LoadVT, LoadVal);
7104 
7105   return DAG.getMergeValues({LoadVal, BufferLoad.getValue(1)}, DL);
7106 }
7107 
7108 // Handle 8 bit and 16 bit buffer stores
7109 SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG,
7110                                                       EVT VDataType, SDLoc DL,
7111                                                       SDValue Ops[],
7112                                                       MemSDNode *M) const {
7113   if (VDataType == MVT::f16)
7114     Ops[1] = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Ops[1]);
7115 
7116   SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]);
7117   Ops[1] = BufferStoreExt;
7118   unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE :
7119                                  AMDGPUISD::BUFFER_STORE_SHORT;
7120   ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9);
7121   return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType,
7122                                      M->getMemOperand());
7123 }
7124 
7125 static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
7126                                  ISD::LoadExtType ExtType, SDValue Op,
7127                                  const SDLoc &SL, EVT VT) {
7128   if (VT.bitsLT(Op.getValueType()))
7129     return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
7130 
7131   switch (ExtType) {
7132   case ISD::SEXTLOAD:
7133     return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
7134   case ISD::ZEXTLOAD:
7135     return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
7136   case ISD::EXTLOAD:
7137     return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
7138   case ISD::NON_EXTLOAD:
7139     return Op;
7140   }
7141 
7142   llvm_unreachable("invalid ext type");
7143 }
7144 
7145 SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
7146   SelectionDAG &DAG = DCI.DAG;
7147   if (Ld->getAlignment() < 4 || Ld->isDivergent())
7148     return SDValue();
7149 
7150   // FIXME: Constant loads should all be marked invariant.
7151   unsigned AS = Ld->getAddressSpace();
7152   if (AS != AMDGPUAS::CONSTANT_ADDRESS &&
7153       AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
7154       (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
7155     return SDValue();
7156 
7157   // Don't do this early, since it may interfere with adjacent load merging for
7158   // illegal types. We can avoid losing alignment information for exotic types
7159   // pre-legalize.
7160   EVT MemVT = Ld->getMemoryVT();
7161   if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
7162       MemVT.getSizeInBits() >= 32)
7163     return SDValue();
7164 
7165   SDLoc SL(Ld);
7166 
7167   assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
7168          "unexpected vector extload");
7169 
7170   // TODO: Drop only high part of range.
7171   SDValue Ptr = Ld->getBasePtr();
7172   SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
7173                                 MVT::i32, SL, Ld->getChain(), Ptr,
7174                                 Ld->getOffset(),
7175                                 Ld->getPointerInfo(), MVT::i32,
7176                                 Ld->getAlignment(),
7177                                 Ld->getMemOperand()->getFlags(),
7178                                 Ld->getAAInfo(),
7179                                 nullptr); // Drop ranges
7180 
7181   EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
7182   if (MemVT.isFloatingPoint()) {
7183     assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
7184            "unexpected fp extload");
7185     TruncVT = MemVT.changeTypeToInteger();
7186   }
7187 
7188   SDValue Cvt = NewLoad;
7189   if (Ld->getExtensionType() == ISD::SEXTLOAD) {
7190     Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
7191                       DAG.getValueType(TruncVT));
7192   } else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
7193              Ld->getExtensionType() == ISD::NON_EXTLOAD) {
7194     Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
7195   } else {
7196     assert(Ld->getExtensionType() == ISD::EXTLOAD);
7197   }
7198 
7199   EVT VT = Ld->getValueType(0);
7200   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
7201 
7202   DCI.AddToWorklist(Cvt.getNode());
7203 
7204   // We may need to handle exotic cases, such as i16->i64 extloads, so insert
7205   // the appropriate extension from the 32-bit load.
7206   Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
7207   DCI.AddToWorklist(Cvt.getNode());
7208 
7209   // Handle conversion back to floating point if necessary.
7210   Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
7211 
7212   return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
7213 }
7214 
7215 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7216   SDLoc DL(Op);
7217   LoadSDNode *Load = cast<LoadSDNode>(Op);
7218   ISD::LoadExtType ExtType = Load->getExtensionType();
7219   EVT MemVT = Load->getMemoryVT();
7220 
7221   if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
7222     if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
7223       return SDValue();
7224 
7225     // FIXME: Copied from PPC
7226     // First, load into 32 bits, then truncate to 1 bit.
7227 
7228     SDValue Chain = Load->getChain();
7229     SDValue BasePtr = Load->getBasePtr();
7230     MachineMemOperand *MMO = Load->getMemOperand();
7231 
7232     EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
7233 
7234     SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
7235                                    BasePtr, RealMemVT, MMO);
7236 
7237     if (!MemVT.isVector()) {
7238       SDValue Ops[] = {
7239         DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
7240         NewLD.getValue(1)
7241       };
7242 
7243       return DAG.getMergeValues(Ops, DL);
7244     }
7245 
7246     SmallVector<SDValue, 3> Elts;
7247     for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) {
7248       SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD,
7249                                 DAG.getConstant(I, DL, MVT::i32));
7250 
7251       Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt));
7252     }
7253 
7254     SDValue Ops[] = {
7255       DAG.getBuildVector(MemVT, DL, Elts),
7256       NewLD.getValue(1)
7257     };
7258 
7259     return DAG.getMergeValues(Ops, DL);
7260   }
7261 
7262   if (!MemVT.isVector())
7263     return SDValue();
7264 
7265   assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
7266          "Custom lowering for non-i32 vectors hasn't been implemented.");
7267 
7268   if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
7269                                       MemVT, *Load->getMemOperand())) {
7270     SDValue Ops[2];
7271     std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
7272     return DAG.getMergeValues(Ops, DL);
7273   }
7274 
7275   unsigned Alignment = Load->getAlignment();
7276   unsigned AS = Load->getAddressSpace();
7277   if (Subtarget->hasLDSMisalignedBug() &&
7278       AS == AMDGPUAS::FLAT_ADDRESS &&
7279       Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) {
7280     return SplitVectorLoad(Op, DAG);
7281   }
7282 
7283   MachineFunction &MF = DAG.getMachineFunction();
7284   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
7285   // If there is a possibilty that flat instruction access scratch memory
7286   // then we need to use the same legalization rules we use for private.
7287   if (AS == AMDGPUAS::FLAT_ADDRESS &&
7288       !Subtarget->hasMultiDwordFlatScratchAddressing())
7289     AS = MFI->hasFlatScratchInit() ?
7290          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
7291 
7292   unsigned NumElements = MemVT.getVectorNumElements();
7293 
7294   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7295       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
7296     if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) {
7297       if (MemVT.isPow2VectorType())
7298         return SDValue();
7299       if (NumElements == 3)
7300         return WidenVectorLoad(Op, DAG);
7301       return SplitVectorLoad(Op, DAG);
7302     }
7303     // Non-uniform loads will be selected to MUBUF instructions, so they
7304     // have the same legalization requirements as global and private
7305     // loads.
7306     //
7307   }
7308 
7309   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7310       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
7311       AS == AMDGPUAS::GLOBAL_ADDRESS) {
7312     if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
7313         !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
7314         Alignment >= 4 && NumElements < 32) {
7315       if (MemVT.isPow2VectorType())
7316         return SDValue();
7317       if (NumElements == 3)
7318         return WidenVectorLoad(Op, DAG);
7319       return SplitVectorLoad(Op, DAG);
7320     }
7321     // Non-uniform loads will be selected to MUBUF instructions, so they
7322     // have the same legalization requirements as global and private
7323     // loads.
7324     //
7325   }
7326   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7327       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
7328       AS == AMDGPUAS::GLOBAL_ADDRESS ||
7329       AS == AMDGPUAS::FLAT_ADDRESS) {
7330     if (NumElements > 4)
7331       return SplitVectorLoad(Op, DAG);
7332     // v3 loads not supported on SI.
7333     if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7334       return WidenVectorLoad(Op, DAG);
7335     // v3 and v4 loads are supported for private and global memory.
7336     return SDValue();
7337   }
7338   if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
7339     // Depending on the setting of the private_element_size field in the
7340     // resource descriptor, we can only make private accesses up to a certain
7341     // size.
7342     switch (Subtarget->getMaxPrivateElementSize()) {
7343     case 4: {
7344       SDValue Ops[2];
7345       std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
7346       return DAG.getMergeValues(Ops, DL);
7347     }
7348     case 8:
7349       if (NumElements > 2)
7350         return SplitVectorLoad(Op, DAG);
7351       return SDValue();
7352     case 16:
7353       // Same as global/flat
7354       if (NumElements > 4)
7355         return SplitVectorLoad(Op, DAG);
7356       // v3 loads not supported on SI.
7357       if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7358         return WidenVectorLoad(Op, DAG);
7359       return SDValue();
7360     default:
7361       llvm_unreachable("unsupported private_element_size");
7362     }
7363   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
7364     // Use ds_read_b128 if possible.
7365     if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
7366         MemVT.getStoreSize() == 16)
7367       return SDValue();
7368 
7369     if (NumElements > 2)
7370       return SplitVectorLoad(Op, DAG);
7371 
7372     // SI has a hardware bug in the LDS / GDS boounds checking: if the base
7373     // address is negative, then the instruction is incorrectly treated as
7374     // out-of-bounds even if base + offsets is in bounds. Split vectorized
7375     // loads here to avoid emitting ds_read2_b32. We may re-combine the
7376     // load later in the SILoadStoreOptimizer.
7377     if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
7378         NumElements == 2 && MemVT.getStoreSize() == 8 &&
7379         Load->getAlignment() < 8) {
7380       return SplitVectorLoad(Op, DAG);
7381     }
7382   }
7383   return SDValue();
7384 }
7385 
7386 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
7387   EVT VT = Op.getValueType();
7388   assert(VT.getSizeInBits() == 64);
7389 
7390   SDLoc DL(Op);
7391   SDValue Cond = Op.getOperand(0);
7392 
7393   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
7394   SDValue One = DAG.getConstant(1, DL, MVT::i32);
7395 
7396   SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
7397   SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
7398 
7399   SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
7400   SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
7401 
7402   SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
7403 
7404   SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
7405   SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
7406 
7407   SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
7408 
7409   SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
7410   return DAG.getNode(ISD::BITCAST, DL, VT, Res);
7411 }
7412 
7413 // Catch division cases where we can use shortcuts with rcp and rsq
7414 // instructions.
7415 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
7416                                               SelectionDAG &DAG) const {
7417   SDLoc SL(Op);
7418   SDValue LHS = Op.getOperand(0);
7419   SDValue RHS = Op.getOperand(1);
7420   EVT VT = Op.getValueType();
7421   const SDNodeFlags Flags = Op->getFlags();
7422 
7423   bool AllowInaccurateRcp = DAG.getTarget().Options.UnsafeFPMath ||
7424                             Flags.hasApproximateFuncs();
7425 
7426   // Without !fpmath accuracy information, we can't do more because we don't
7427   // know exactly whether rcp is accurate enough to meet !fpmath requirement.
7428   if (!AllowInaccurateRcp)
7429     return SDValue();
7430 
7431   if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
7432     if (CLHS->isExactlyValue(1.0)) {
7433       // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
7434       // the CI documentation has a worst case error of 1 ulp.
7435       // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
7436       // use it as long as we aren't trying to use denormals.
7437       //
7438       // v_rcp_f16 and v_rsq_f16 DO support denormals.
7439 
7440       // 1.0 / sqrt(x) -> rsq(x)
7441 
7442       // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
7443       // error seems really high at 2^29 ULP.
7444       if (RHS.getOpcode() == ISD::FSQRT)
7445         return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
7446 
7447       // 1.0 / x -> rcp(x)
7448       return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
7449     }
7450 
7451     // Same as for 1.0, but expand the sign out of the constant.
7452     if (CLHS->isExactlyValue(-1.0)) {
7453       // -1.0 / x -> rcp (fneg x)
7454       SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
7455       return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
7456     }
7457   }
7458 
7459   // Turn into multiply by the reciprocal.
7460   // x / y -> x * (1.0 / y)
7461   SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
7462   return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
7463 }
7464 
7465 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
7466                           EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
7467   if (GlueChain->getNumValues() <= 1) {
7468     return DAG.getNode(Opcode, SL, VT, A, B);
7469   }
7470 
7471   assert(GlueChain->getNumValues() == 3);
7472 
7473   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
7474   switch (Opcode) {
7475   default: llvm_unreachable("no chain equivalent for opcode");
7476   case ISD::FMUL:
7477     Opcode = AMDGPUISD::FMUL_W_CHAIN;
7478     break;
7479   }
7480 
7481   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
7482                      GlueChain.getValue(2));
7483 }
7484 
7485 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
7486                            EVT VT, SDValue A, SDValue B, SDValue C,
7487                            SDValue GlueChain) {
7488   if (GlueChain->getNumValues() <= 1) {
7489     return DAG.getNode(Opcode, SL, VT, A, B, C);
7490   }
7491 
7492   assert(GlueChain->getNumValues() == 3);
7493 
7494   SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
7495   switch (Opcode) {
7496   default: llvm_unreachable("no chain equivalent for opcode");
7497   case ISD::FMA:
7498     Opcode = AMDGPUISD::FMA_W_CHAIN;
7499     break;
7500   }
7501 
7502   return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
7503                      GlueChain.getValue(2));
7504 }
7505 
7506 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
7507   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
7508     return FastLowered;
7509 
7510   SDLoc SL(Op);
7511   SDValue Src0 = Op.getOperand(0);
7512   SDValue Src1 = Op.getOperand(1);
7513 
7514   SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
7515   SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
7516 
7517   SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
7518   SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
7519 
7520   SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
7521   SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
7522 
7523   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
7524 }
7525 
7526 // Faster 2.5 ULP division that does not support denormals.
7527 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
7528   SDLoc SL(Op);
7529   SDValue LHS = Op.getOperand(1);
7530   SDValue RHS = Op.getOperand(2);
7531 
7532   SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
7533 
7534   const APFloat K0Val(BitsToFloat(0x6f800000));
7535   const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
7536 
7537   const APFloat K1Val(BitsToFloat(0x2f800000));
7538   const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
7539 
7540   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
7541 
7542   EVT SetCCVT =
7543     getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
7544 
7545   SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
7546 
7547   SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
7548 
7549   // TODO: Should this propagate fast-math-flags?
7550   r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
7551 
7552   // rcp does not support denormals.
7553   SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
7554 
7555   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
7556 
7557   return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
7558 }
7559 
7560 // Returns immediate value for setting the F32 denorm mode when using the
7561 // S_DENORM_MODE instruction.
7562 static const SDValue getSPDenormModeValue(int SPDenormMode, SelectionDAG &DAG,
7563                                           const SDLoc &SL, const GCNSubtarget *ST) {
7564   assert(ST->hasDenormModeInst() && "Requires S_DENORM_MODE");
7565   int DPDenormModeDefault = hasFP64FP16Denormals(DAG.getMachineFunction())
7566                                 ? FP_DENORM_FLUSH_NONE
7567                                 : FP_DENORM_FLUSH_IN_FLUSH_OUT;
7568 
7569   int Mode = SPDenormMode | (DPDenormModeDefault << 2);
7570   return DAG.getTargetConstant(Mode, SL, MVT::i32);
7571 }
7572 
7573 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
7574   if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
7575     return FastLowered;
7576 
7577   SDLoc SL(Op);
7578   SDValue LHS = Op.getOperand(0);
7579   SDValue RHS = Op.getOperand(1);
7580 
7581   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
7582 
7583   SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
7584 
7585   SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7586                                           RHS, RHS, LHS);
7587   SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7588                                         LHS, RHS, LHS);
7589 
7590   // Denominator is scaled to not be denormal, so using rcp is ok.
7591   SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
7592                                   DenominatorScaled);
7593   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
7594                                      DenominatorScaled);
7595 
7596   const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
7597                                (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
7598                                (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
7599   const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
7600 
7601   const bool HasFP32Denormals = hasFP32Denormals(DAG.getMachineFunction());
7602 
7603   if (!HasFP32Denormals) {
7604     SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
7605 
7606     SDValue EnableDenorm;
7607     if (Subtarget->hasDenormModeInst()) {
7608       const SDValue EnableDenormValue =
7609           getSPDenormModeValue(FP_DENORM_FLUSH_NONE, DAG, SL, Subtarget);
7610 
7611       EnableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, BindParamVTs,
7612                                  DAG.getEntryNode(), EnableDenormValue);
7613     } else {
7614       const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
7615                                                         SL, MVT::i32);
7616       EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
7617                                  DAG.getEntryNode(), EnableDenormValue,
7618                                  BitField);
7619     }
7620 
7621     SDValue Ops[3] = {
7622       NegDivScale0,
7623       EnableDenorm.getValue(0),
7624       EnableDenorm.getValue(1)
7625     };
7626 
7627     NegDivScale0 = DAG.getMergeValues(Ops, SL);
7628   }
7629 
7630   SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
7631                              ApproxRcp, One, NegDivScale0);
7632 
7633   SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
7634                              ApproxRcp, Fma0);
7635 
7636   SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
7637                            Fma1, Fma1);
7638 
7639   SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
7640                              NumeratorScaled, Mul);
7641 
7642   SDValue Fma3 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
7643 
7644   SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
7645                              NumeratorScaled, Fma3);
7646 
7647   if (!HasFP32Denormals) {
7648     SDValue DisableDenorm;
7649     if (Subtarget->hasDenormModeInst()) {
7650       const SDValue DisableDenormValue =
7651           getSPDenormModeValue(FP_DENORM_FLUSH_IN_FLUSH_OUT, DAG, SL, Subtarget);
7652 
7653       DisableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, MVT::Other,
7654                                   Fma4.getValue(1), DisableDenormValue,
7655                                   Fma4.getValue(2));
7656     } else {
7657       const SDValue DisableDenormValue =
7658           DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
7659 
7660       DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
7661                                   Fma4.getValue(1), DisableDenormValue,
7662                                   BitField, Fma4.getValue(2));
7663     }
7664 
7665     SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
7666                                       DisableDenorm, DAG.getRoot());
7667     DAG.setRoot(OutputChain);
7668   }
7669 
7670   SDValue Scale = NumeratorScaled.getValue(1);
7671   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
7672                              Fma4, Fma1, Fma3, Scale);
7673 
7674   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
7675 }
7676 
7677 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
7678   if (DAG.getTarget().Options.UnsafeFPMath)
7679     return lowerFastUnsafeFDIV(Op, DAG);
7680 
7681   SDLoc SL(Op);
7682   SDValue X = Op.getOperand(0);
7683   SDValue Y = Op.getOperand(1);
7684 
7685   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
7686 
7687   SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
7688 
7689   SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
7690 
7691   SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
7692 
7693   SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
7694 
7695   SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
7696 
7697   SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
7698 
7699   SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
7700 
7701   SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
7702 
7703   SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
7704   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
7705 
7706   SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
7707                              NegDivScale0, Mul, DivScale1);
7708 
7709   SDValue Scale;
7710 
7711   if (!Subtarget->hasUsableDivScaleConditionOutput()) {
7712     // Workaround a hardware bug on SI where the condition output from div_scale
7713     // is not usable.
7714 
7715     const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
7716 
7717     // Figure out if the scale to use for div_fmas.
7718     SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
7719     SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
7720     SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
7721     SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
7722 
7723     SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
7724     SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
7725 
7726     SDValue Scale0Hi
7727       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
7728     SDValue Scale1Hi
7729       = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
7730 
7731     SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
7732     SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
7733     Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
7734   } else {
7735     Scale = DivScale1.getValue(1);
7736   }
7737 
7738   SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
7739                              Fma4, Fma3, Mul, Scale);
7740 
7741   return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
7742 }
7743 
7744 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
7745   EVT VT = Op.getValueType();
7746 
7747   if (VT == MVT::f32)
7748     return LowerFDIV32(Op, DAG);
7749 
7750   if (VT == MVT::f64)
7751     return LowerFDIV64(Op, DAG);
7752 
7753   if (VT == MVT::f16)
7754     return LowerFDIV16(Op, DAG);
7755 
7756   llvm_unreachable("Unexpected type for fdiv");
7757 }
7758 
7759 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7760   SDLoc DL(Op);
7761   StoreSDNode *Store = cast<StoreSDNode>(Op);
7762   EVT VT = Store->getMemoryVT();
7763 
7764   if (VT == MVT::i1) {
7765     return DAG.getTruncStore(Store->getChain(), DL,
7766        DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
7767        Store->getBasePtr(), MVT::i1, Store->getMemOperand());
7768   }
7769 
7770   assert(VT.isVector() &&
7771          Store->getValue().getValueType().getScalarType() == MVT::i32);
7772 
7773   if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
7774                                       VT, *Store->getMemOperand())) {
7775     return expandUnalignedStore(Store, DAG);
7776   }
7777 
7778   unsigned AS = Store->getAddressSpace();
7779   if (Subtarget->hasLDSMisalignedBug() &&
7780       AS == AMDGPUAS::FLAT_ADDRESS &&
7781       Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) {
7782     return SplitVectorStore(Op, DAG);
7783   }
7784 
7785   MachineFunction &MF = DAG.getMachineFunction();
7786   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
7787   // If there is a possibilty that flat instruction access scratch memory
7788   // then we need to use the same legalization rules we use for private.
7789   if (AS == AMDGPUAS::FLAT_ADDRESS &&
7790       !Subtarget->hasMultiDwordFlatScratchAddressing())
7791     AS = MFI->hasFlatScratchInit() ?
7792          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
7793 
7794   unsigned NumElements = VT.getVectorNumElements();
7795   if (AS == AMDGPUAS::GLOBAL_ADDRESS ||
7796       AS == AMDGPUAS::FLAT_ADDRESS) {
7797     if (NumElements > 4)
7798       return SplitVectorStore(Op, DAG);
7799     // v3 stores not supported on SI.
7800     if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7801       return SplitVectorStore(Op, DAG);
7802     return SDValue();
7803   } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
7804     switch (Subtarget->getMaxPrivateElementSize()) {
7805     case 4:
7806       return scalarizeVectorStore(Store, DAG);
7807     case 8:
7808       if (NumElements > 2)
7809         return SplitVectorStore(Op, DAG);
7810       return SDValue();
7811     case 16:
7812       if (NumElements > 4 || NumElements == 3)
7813         return SplitVectorStore(Op, DAG);
7814       return SDValue();
7815     default:
7816       llvm_unreachable("unsupported private_element_size");
7817     }
7818   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
7819     // Use ds_write_b128 if possible.
7820     if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
7821         VT.getStoreSize() == 16 && NumElements != 3)
7822       return SDValue();
7823 
7824     if (NumElements > 2)
7825       return SplitVectorStore(Op, DAG);
7826 
7827     // SI has a hardware bug in the LDS / GDS boounds checking: if the base
7828     // address is negative, then the instruction is incorrectly treated as
7829     // out-of-bounds even if base + offsets is in bounds. Split vectorized
7830     // stores here to avoid emitting ds_write2_b32. We may re-combine the
7831     // store later in the SILoadStoreOptimizer.
7832     if (!Subtarget->hasUsableDSOffset() &&
7833         NumElements == 2 && VT.getStoreSize() == 8 &&
7834         Store->getAlignment() < 8) {
7835       return SplitVectorStore(Op, DAG);
7836     }
7837 
7838     return SDValue();
7839   } else {
7840     llvm_unreachable("unhandled address space");
7841   }
7842 }
7843 
7844 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
7845   SDLoc DL(Op);
7846   EVT VT = Op.getValueType();
7847   SDValue Arg = Op.getOperand(0);
7848   SDValue TrigVal;
7849 
7850   // TODO: Should this propagate fast-math-flags?
7851 
7852   SDValue OneOver2Pi = DAG.getConstantFP(0.5 / M_PI, DL, VT);
7853 
7854   if (Subtarget->hasTrigReducedRange()) {
7855     SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7856     TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal);
7857   } else {
7858     TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7859   }
7860 
7861   switch (Op.getOpcode()) {
7862   case ISD::FCOS:
7863     return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal);
7864   case ISD::FSIN:
7865     return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal);
7866   default:
7867     llvm_unreachable("Wrong trig opcode");
7868   }
7869 }
7870 
7871 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
7872   AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
7873   assert(AtomicNode->isCompareAndSwap());
7874   unsigned AS = AtomicNode->getAddressSpace();
7875 
7876   // No custom lowering required for local address space
7877   if (!isFlatGlobalAddrSpace(AS))
7878     return Op;
7879 
7880   // Non-local address space requires custom lowering for atomic compare
7881   // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
7882   SDLoc DL(Op);
7883   SDValue ChainIn = Op.getOperand(0);
7884   SDValue Addr = Op.getOperand(1);
7885   SDValue Old = Op.getOperand(2);
7886   SDValue New = Op.getOperand(3);
7887   EVT VT = Op.getValueType();
7888   MVT SimpleVT = VT.getSimpleVT();
7889   MVT VecType = MVT::getVectorVT(SimpleVT, 2);
7890 
7891   SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
7892   SDValue Ops[] = { ChainIn, Addr, NewOld };
7893 
7894   return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
7895                                  Ops, VT, AtomicNode->getMemOperand());
7896 }
7897 
7898 //===----------------------------------------------------------------------===//
7899 // Custom DAG optimizations
7900 //===----------------------------------------------------------------------===//
7901 
7902 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
7903                                                      DAGCombinerInfo &DCI) const {
7904   EVT VT = N->getValueType(0);
7905   EVT ScalarVT = VT.getScalarType();
7906   if (ScalarVT != MVT::f32)
7907     return SDValue();
7908 
7909   SelectionDAG &DAG = DCI.DAG;
7910   SDLoc DL(N);
7911 
7912   SDValue Src = N->getOperand(0);
7913   EVT SrcVT = Src.getValueType();
7914 
7915   // TODO: We could try to match extracting the higher bytes, which would be
7916   // easier if i8 vectors weren't promoted to i32 vectors, particularly after
7917   // types are legalized. v4i8 -> v4f32 is probably the only case to worry
7918   // about in practice.
7919   if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
7920     if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
7921       SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
7922       DCI.AddToWorklist(Cvt.getNode());
7923       return Cvt;
7924     }
7925   }
7926 
7927   return SDValue();
7928 }
7929 
7930 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
7931 
7932 // This is a variant of
7933 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
7934 //
7935 // The normal DAG combiner will do this, but only if the add has one use since
7936 // that would increase the number of instructions.
7937 //
7938 // This prevents us from seeing a constant offset that can be folded into a
7939 // memory instruction's addressing mode. If we know the resulting add offset of
7940 // a pointer can be folded into an addressing offset, we can replace the pointer
7941 // operand with the add of new constant offset. This eliminates one of the uses,
7942 // and may allow the remaining use to also be simplified.
7943 //
7944 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
7945                                                unsigned AddrSpace,
7946                                                EVT MemVT,
7947                                                DAGCombinerInfo &DCI) const {
7948   SDValue N0 = N->getOperand(0);
7949   SDValue N1 = N->getOperand(1);
7950 
7951   // We only do this to handle cases where it's profitable when there are
7952   // multiple uses of the add, so defer to the standard combine.
7953   if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
7954       N0->hasOneUse())
7955     return SDValue();
7956 
7957   const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
7958   if (!CN1)
7959     return SDValue();
7960 
7961   const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
7962   if (!CAdd)
7963     return SDValue();
7964 
7965   // If the resulting offset is too large, we can't fold it into the addressing
7966   // mode offset.
7967   APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
7968   Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
7969 
7970   AddrMode AM;
7971   AM.HasBaseReg = true;
7972   AM.BaseOffs = Offset.getSExtValue();
7973   if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
7974     return SDValue();
7975 
7976   SelectionDAG &DAG = DCI.DAG;
7977   SDLoc SL(N);
7978   EVT VT = N->getValueType(0);
7979 
7980   SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
7981   SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
7982 
7983   SDNodeFlags Flags;
7984   Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
7985                           (N0.getOpcode() == ISD::OR ||
7986                            N0->getFlags().hasNoUnsignedWrap()));
7987 
7988   return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
7989 }
7990 
7991 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
7992                                                   DAGCombinerInfo &DCI) const {
7993   SDValue Ptr = N->getBasePtr();
7994   SelectionDAG &DAG = DCI.DAG;
7995   SDLoc SL(N);
7996 
7997   // TODO: We could also do this for multiplies.
7998   if (Ptr.getOpcode() == ISD::SHL) {
7999     SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(),  N->getAddressSpace(),
8000                                           N->getMemoryVT(), DCI);
8001     if (NewPtr) {
8002       SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
8003 
8004       NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
8005       return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
8006     }
8007   }
8008 
8009   return SDValue();
8010 }
8011 
8012 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
8013   return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
8014          (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
8015          (Opc == ISD::XOR && Val == 0);
8016 }
8017 
8018 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
8019 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
8020 // integer combine opportunities since most 64-bit operations are decomposed
8021 // this way.  TODO: We won't want this for SALU especially if it is an inline
8022 // immediate.
8023 SDValue SITargetLowering::splitBinaryBitConstantOp(
8024   DAGCombinerInfo &DCI,
8025   const SDLoc &SL,
8026   unsigned Opc, SDValue LHS,
8027   const ConstantSDNode *CRHS) const {
8028   uint64_t Val = CRHS->getZExtValue();
8029   uint32_t ValLo = Lo_32(Val);
8030   uint32_t ValHi = Hi_32(Val);
8031   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8032 
8033     if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
8034          bitOpWithConstantIsReducible(Opc, ValHi)) ||
8035         (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
8036     // If we need to materialize a 64-bit immediate, it will be split up later
8037     // anyway. Avoid creating the harder to understand 64-bit immediate
8038     // materialization.
8039     return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
8040   }
8041 
8042   return SDValue();
8043 }
8044 
8045 // Returns true if argument is a boolean value which is not serialized into
8046 // memory or argument and does not require v_cmdmask_b32 to be deserialized.
8047 static bool isBoolSGPR(SDValue V) {
8048   if (V.getValueType() != MVT::i1)
8049     return false;
8050   switch (V.getOpcode()) {
8051   default: break;
8052   case ISD::SETCC:
8053   case ISD::AND:
8054   case ISD::OR:
8055   case ISD::XOR:
8056   case AMDGPUISD::FP_CLASS:
8057     return true;
8058   }
8059   return false;
8060 }
8061 
8062 // If a constant has all zeroes or all ones within each byte return it.
8063 // Otherwise return 0.
8064 static uint32_t getConstantPermuteMask(uint32_t C) {
8065   // 0xff for any zero byte in the mask
8066   uint32_t ZeroByteMask = 0;
8067   if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
8068   if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
8069   if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
8070   if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
8071   uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte
8072   if ((NonZeroByteMask & C) != NonZeroByteMask)
8073     return 0; // Partial bytes selected.
8074   return C;
8075 }
8076 
8077 // Check if a node selects whole bytes from its operand 0 starting at a byte
8078 // boundary while masking the rest. Returns select mask as in the v_perm_b32
8079 // or -1 if not succeeded.
8080 // Note byte select encoding:
8081 // value 0-3 selects corresponding source byte;
8082 // value 0xc selects zero;
8083 // value 0xff selects 0xff.
8084 static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
8085   assert(V.getValueSizeInBits() == 32);
8086 
8087   if (V.getNumOperands() != 2)
8088     return ~0;
8089 
8090   ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
8091   if (!N1)
8092     return ~0;
8093 
8094   uint32_t C = N1->getZExtValue();
8095 
8096   switch (V.getOpcode()) {
8097   default:
8098     break;
8099   case ISD::AND:
8100     if (uint32_t ConstMask = getConstantPermuteMask(C)) {
8101       return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
8102     }
8103     break;
8104 
8105   case ISD::OR:
8106     if (uint32_t ConstMask = getConstantPermuteMask(C)) {
8107       return (0x03020100 & ~ConstMask) | ConstMask;
8108     }
8109     break;
8110 
8111   case ISD::SHL:
8112     if (C % 8)
8113       return ~0;
8114 
8115     return uint32_t((0x030201000c0c0c0cull << C) >> 32);
8116 
8117   case ISD::SRL:
8118     if (C % 8)
8119       return ~0;
8120 
8121     return uint32_t(0x0c0c0c0c03020100ull >> C);
8122   }
8123 
8124   return ~0;
8125 }
8126 
8127 SDValue SITargetLowering::performAndCombine(SDNode *N,
8128                                             DAGCombinerInfo &DCI) const {
8129   if (DCI.isBeforeLegalize())
8130     return SDValue();
8131 
8132   SelectionDAG &DAG = DCI.DAG;
8133   EVT VT = N->getValueType(0);
8134   SDValue LHS = N->getOperand(0);
8135   SDValue RHS = N->getOperand(1);
8136 
8137 
8138   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
8139   if (VT == MVT::i64 && CRHS) {
8140     if (SDValue Split
8141         = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
8142       return Split;
8143   }
8144 
8145   if (CRHS && VT == MVT::i32) {
8146     // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
8147     // nb = number of trailing zeroes in mask
8148     // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
8149     // given that we are selecting 8 or 16 bit fields starting at byte boundary.
8150     uint64_t Mask = CRHS->getZExtValue();
8151     unsigned Bits = countPopulation(Mask);
8152     if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
8153         (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
8154       if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
8155         unsigned Shift = CShift->getZExtValue();
8156         unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
8157         unsigned Offset = NB + Shift;
8158         if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
8159           SDLoc SL(N);
8160           SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
8161                                     LHS->getOperand(0),
8162                                     DAG.getConstant(Offset, SL, MVT::i32),
8163                                     DAG.getConstant(Bits, SL, MVT::i32));
8164           EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
8165           SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
8166                                     DAG.getValueType(NarrowVT));
8167           SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
8168                                     DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
8169           return Shl;
8170         }
8171       }
8172     }
8173 
8174     // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
8175     if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
8176         isa<ConstantSDNode>(LHS.getOperand(2))) {
8177       uint32_t Sel = getConstantPermuteMask(Mask);
8178       if (!Sel)
8179         return SDValue();
8180 
8181       // Select 0xc for all zero bytes
8182       Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
8183       SDLoc DL(N);
8184       return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
8185                          LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
8186     }
8187   }
8188 
8189   // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
8190   // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
8191   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
8192     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8193     ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
8194 
8195     SDValue X = LHS.getOperand(0);
8196     SDValue Y = RHS.getOperand(0);
8197     if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
8198       return SDValue();
8199 
8200     if (LCC == ISD::SETO) {
8201       if (X != LHS.getOperand(1))
8202         return SDValue();
8203 
8204       if (RCC == ISD::SETUNE) {
8205         const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
8206         if (!C1 || !C1->isInfinity() || C1->isNegative())
8207           return SDValue();
8208 
8209         const uint32_t Mask = SIInstrFlags::N_NORMAL |
8210                               SIInstrFlags::N_SUBNORMAL |
8211                               SIInstrFlags::N_ZERO |
8212                               SIInstrFlags::P_ZERO |
8213                               SIInstrFlags::P_SUBNORMAL |
8214                               SIInstrFlags::P_NORMAL;
8215 
8216         static_assert(((~(SIInstrFlags::S_NAN |
8217                           SIInstrFlags::Q_NAN |
8218                           SIInstrFlags::N_INFINITY |
8219                           SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
8220                       "mask not equal");
8221 
8222         SDLoc DL(N);
8223         return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
8224                            X, DAG.getConstant(Mask, DL, MVT::i32));
8225       }
8226     }
8227   }
8228 
8229   if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS)
8230     std::swap(LHS, RHS);
8231 
8232   if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS &&
8233       RHS.hasOneUse()) {
8234     ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8235     // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan)
8236     // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan)
8237     const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8238     if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask &&
8239         (RHS.getOperand(0) == LHS.getOperand(0) &&
8240          LHS.getOperand(0) == LHS.getOperand(1))) {
8241       const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN;
8242       unsigned NewMask = LCC == ISD::SETO ?
8243         Mask->getZExtValue() & ~OrdMask :
8244         Mask->getZExtValue() & OrdMask;
8245 
8246       SDLoc DL(N);
8247       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0),
8248                          DAG.getConstant(NewMask, DL, MVT::i32));
8249     }
8250   }
8251 
8252   if (VT == MVT::i32 &&
8253       (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
8254     // and x, (sext cc from i1) => select cc, x, 0
8255     if (RHS.getOpcode() != ISD::SIGN_EXTEND)
8256       std::swap(LHS, RHS);
8257     if (isBoolSGPR(RHS.getOperand(0)))
8258       return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
8259                            LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
8260   }
8261 
8262   // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
8263   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8264   if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
8265       N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
8266     uint32_t LHSMask = getPermuteMask(DAG, LHS);
8267     uint32_t RHSMask = getPermuteMask(DAG, RHS);
8268     if (LHSMask != ~0u && RHSMask != ~0u) {
8269       // Canonicalize the expression in an attempt to have fewer unique masks
8270       // and therefore fewer registers used to hold the masks.
8271       if (LHSMask > RHSMask) {
8272         std::swap(LHSMask, RHSMask);
8273         std::swap(LHS, RHS);
8274       }
8275 
8276       // Select 0xc for each lane used from source operand. Zero has 0xc mask
8277       // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
8278       uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8279       uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8280 
8281       // Check of we need to combine values from two sources within a byte.
8282       if (!(LHSUsedLanes & RHSUsedLanes) &&
8283           // If we select high and lower word keep it for SDWA.
8284           // TODO: teach SDWA to work with v_perm_b32 and remove the check.
8285           !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
8286         // Each byte in each mask is either selector mask 0-3, or has higher
8287         // bits set in either of masks, which can be 0xff for 0xff or 0x0c for
8288         // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise
8289         // mask which is not 0xff wins. By anding both masks we have a correct
8290         // result except that 0x0c shall be corrected to give 0x0c only.
8291         uint32_t Mask = LHSMask & RHSMask;
8292         for (unsigned I = 0; I < 32; I += 8) {
8293           uint32_t ByteSel = 0xff << I;
8294           if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
8295             Mask &= (0x0c << I) & 0xffffffff;
8296         }
8297 
8298         // Add 4 to each active LHS lane. It will not affect any existing 0xff
8299         // or 0x0c.
8300         uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
8301         SDLoc DL(N);
8302 
8303         return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
8304                            LHS.getOperand(0), RHS.getOperand(0),
8305                            DAG.getConstant(Sel, DL, MVT::i32));
8306       }
8307     }
8308   }
8309 
8310   return SDValue();
8311 }
8312 
8313 SDValue SITargetLowering::performOrCombine(SDNode *N,
8314                                            DAGCombinerInfo &DCI) const {
8315   SelectionDAG &DAG = DCI.DAG;
8316   SDValue LHS = N->getOperand(0);
8317   SDValue RHS = N->getOperand(1);
8318 
8319   EVT VT = N->getValueType(0);
8320   if (VT == MVT::i1) {
8321     // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
8322     if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
8323         RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
8324       SDValue Src = LHS.getOperand(0);
8325       if (Src != RHS.getOperand(0))
8326         return SDValue();
8327 
8328       const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
8329       const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8330       if (!CLHS || !CRHS)
8331         return SDValue();
8332 
8333       // Only 10 bits are used.
8334       static const uint32_t MaxMask = 0x3ff;
8335 
8336       uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
8337       SDLoc DL(N);
8338       return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
8339                          Src, DAG.getConstant(NewMask, DL, MVT::i32));
8340     }
8341 
8342     return SDValue();
8343   }
8344 
8345   // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
8346   if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
8347       LHS.getOpcode() == AMDGPUISD::PERM &&
8348       isa<ConstantSDNode>(LHS.getOperand(2))) {
8349     uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
8350     if (!Sel)
8351       return SDValue();
8352 
8353     Sel |= LHS.getConstantOperandVal(2);
8354     SDLoc DL(N);
8355     return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
8356                        LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
8357   }
8358 
8359   // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
8360   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8361   if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
8362       N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
8363     uint32_t LHSMask = getPermuteMask(DAG, LHS);
8364     uint32_t RHSMask = getPermuteMask(DAG, RHS);
8365     if (LHSMask != ~0u && RHSMask != ~0u) {
8366       // Canonicalize the expression in an attempt to have fewer unique masks
8367       // and therefore fewer registers used to hold the masks.
8368       if (LHSMask > RHSMask) {
8369         std::swap(LHSMask, RHSMask);
8370         std::swap(LHS, RHS);
8371       }
8372 
8373       // Select 0xc for each lane used from source operand. Zero has 0xc mask
8374       // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
8375       uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8376       uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8377 
8378       // Check of we need to combine values from two sources within a byte.
8379       if (!(LHSUsedLanes & RHSUsedLanes) &&
8380           // If we select high and lower word keep it for SDWA.
8381           // TODO: teach SDWA to work with v_perm_b32 and remove the check.
8382           !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
8383         // Kill zero bytes selected by other mask. Zero value is 0xc.
8384         LHSMask &= ~RHSUsedLanes;
8385         RHSMask &= ~LHSUsedLanes;
8386         // Add 4 to each active LHS lane
8387         LHSMask |= LHSUsedLanes & 0x04040404;
8388         // Combine masks
8389         uint32_t Sel = LHSMask | RHSMask;
8390         SDLoc DL(N);
8391 
8392         return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
8393                            LHS.getOperand(0), RHS.getOperand(0),
8394                            DAG.getConstant(Sel, DL, MVT::i32));
8395       }
8396     }
8397   }
8398 
8399   if (VT != MVT::i64)
8400     return SDValue();
8401 
8402   // TODO: This could be a generic combine with a predicate for extracting the
8403   // high half of an integer being free.
8404 
8405   // (or i64:x, (zero_extend i32:y)) ->
8406   //   i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
8407   if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
8408       RHS.getOpcode() != ISD::ZERO_EXTEND)
8409     std::swap(LHS, RHS);
8410 
8411   if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
8412     SDValue ExtSrc = RHS.getOperand(0);
8413     EVT SrcVT = ExtSrc.getValueType();
8414     if (SrcVT == MVT::i32) {
8415       SDLoc SL(N);
8416       SDValue LowLHS, HiBits;
8417       std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
8418       SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
8419 
8420       DCI.AddToWorklist(LowOr.getNode());
8421       DCI.AddToWorklist(HiBits.getNode());
8422 
8423       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
8424                                 LowOr, HiBits);
8425       return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
8426     }
8427   }
8428 
8429   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
8430   if (CRHS) {
8431     if (SDValue Split
8432           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
8433       return Split;
8434   }
8435 
8436   return SDValue();
8437 }
8438 
8439 SDValue SITargetLowering::performXorCombine(SDNode *N,
8440                                             DAGCombinerInfo &DCI) const {
8441   EVT VT = N->getValueType(0);
8442   if (VT != MVT::i64)
8443     return SDValue();
8444 
8445   SDValue LHS = N->getOperand(0);
8446   SDValue RHS = N->getOperand(1);
8447 
8448   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
8449   if (CRHS) {
8450     if (SDValue Split
8451           = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
8452       return Split;
8453   }
8454 
8455   return SDValue();
8456 }
8457 
8458 // Instructions that will be lowered with a final instruction that zeros the
8459 // high result bits.
8460 // XXX - probably only need to list legal operations.
8461 static bool fp16SrcZerosHighBits(unsigned Opc) {
8462   switch (Opc) {
8463   case ISD::FADD:
8464   case ISD::FSUB:
8465   case ISD::FMUL:
8466   case ISD::FDIV:
8467   case ISD::FREM:
8468   case ISD::FMA:
8469   case ISD::FMAD:
8470   case ISD::FCANONICALIZE:
8471   case ISD::FP_ROUND:
8472   case ISD::UINT_TO_FP:
8473   case ISD::SINT_TO_FP:
8474   case ISD::FABS:
8475     // Fabs is lowered to a bit operation, but it's an and which will clear the
8476     // high bits anyway.
8477   case ISD::FSQRT:
8478   case ISD::FSIN:
8479   case ISD::FCOS:
8480   case ISD::FPOWI:
8481   case ISD::FPOW:
8482   case ISD::FLOG:
8483   case ISD::FLOG2:
8484   case ISD::FLOG10:
8485   case ISD::FEXP:
8486   case ISD::FEXP2:
8487   case ISD::FCEIL:
8488   case ISD::FTRUNC:
8489   case ISD::FRINT:
8490   case ISD::FNEARBYINT:
8491   case ISD::FROUND:
8492   case ISD::FFLOOR:
8493   case ISD::FMINNUM:
8494   case ISD::FMAXNUM:
8495   case AMDGPUISD::FRACT:
8496   case AMDGPUISD::CLAMP:
8497   case AMDGPUISD::COS_HW:
8498   case AMDGPUISD::SIN_HW:
8499   case AMDGPUISD::FMIN3:
8500   case AMDGPUISD::FMAX3:
8501   case AMDGPUISD::FMED3:
8502   case AMDGPUISD::FMAD_FTZ:
8503   case AMDGPUISD::RCP:
8504   case AMDGPUISD::RSQ:
8505   case AMDGPUISD::RCP_IFLAG:
8506   case AMDGPUISD::LDEXP:
8507     return true;
8508   default:
8509     // fcopysign, select and others may be lowered to 32-bit bit operations
8510     // which don't zero the high bits.
8511     return false;
8512   }
8513 }
8514 
8515 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
8516                                                    DAGCombinerInfo &DCI) const {
8517   if (!Subtarget->has16BitInsts() ||
8518       DCI.getDAGCombineLevel() < AfterLegalizeDAG)
8519     return SDValue();
8520 
8521   EVT VT = N->getValueType(0);
8522   if (VT != MVT::i32)
8523     return SDValue();
8524 
8525   SDValue Src = N->getOperand(0);
8526   if (Src.getValueType() != MVT::i16)
8527     return SDValue();
8528 
8529   // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
8530   // FIXME: It is not universally true that the high bits are zeroed on gfx9.
8531   if (Src.getOpcode() == ISD::BITCAST) {
8532     SDValue BCSrc = Src.getOperand(0);
8533     if (BCSrc.getValueType() == MVT::f16 &&
8534         fp16SrcZerosHighBits(BCSrc.getOpcode()))
8535       return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
8536   }
8537 
8538   return SDValue();
8539 }
8540 
8541 SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N,
8542                                                         DAGCombinerInfo &DCI)
8543                                                         const {
8544   SDValue Src = N->getOperand(0);
8545   auto *VTSign = cast<VTSDNode>(N->getOperand(1));
8546 
8547   if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE &&
8548       VTSign->getVT() == MVT::i8) ||
8549       (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT &&
8550       VTSign->getVT() == MVT::i16)) &&
8551       Src.hasOneUse()) {
8552     auto *M = cast<MemSDNode>(Src);
8553     SDValue Ops[] = {
8554       Src.getOperand(0), // Chain
8555       Src.getOperand(1), // rsrc
8556       Src.getOperand(2), // vindex
8557       Src.getOperand(3), // voffset
8558       Src.getOperand(4), // soffset
8559       Src.getOperand(5), // offset
8560       Src.getOperand(6),
8561       Src.getOperand(7)
8562     };
8563     // replace with BUFFER_LOAD_BYTE/SHORT
8564     SDVTList ResList = DCI.DAG.getVTList(MVT::i32,
8565                                          Src.getOperand(0).getValueType());
8566     unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ?
8567                    AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT;
8568     SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N),
8569                                                           ResList,
8570                                                           Ops, M->getMemoryVT(),
8571                                                           M->getMemOperand());
8572     return DCI.DAG.getMergeValues({BufferLoadSignExt,
8573                                   BufferLoadSignExt.getValue(1)}, SDLoc(N));
8574   }
8575   return SDValue();
8576 }
8577 
8578 SDValue SITargetLowering::performClassCombine(SDNode *N,
8579                                               DAGCombinerInfo &DCI) const {
8580   SelectionDAG &DAG = DCI.DAG;
8581   SDValue Mask = N->getOperand(1);
8582 
8583   // fp_class x, 0 -> false
8584   if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
8585     if (CMask->isNullValue())
8586       return DAG.getConstant(0, SDLoc(N), MVT::i1);
8587   }
8588 
8589   if (N->getOperand(0).isUndef())
8590     return DAG.getUNDEF(MVT::i1);
8591 
8592   return SDValue();
8593 }
8594 
8595 SDValue SITargetLowering::performRcpCombine(SDNode *N,
8596                                             DAGCombinerInfo &DCI) const {
8597   EVT VT = N->getValueType(0);
8598   SDValue N0 = N->getOperand(0);
8599 
8600   if (N0.isUndef())
8601     return N0;
8602 
8603   if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP ||
8604                          N0.getOpcode() == ISD::SINT_TO_FP)) {
8605     return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0,
8606                            N->getFlags());
8607   }
8608 
8609   if ((VT == MVT::f32 || VT == MVT::f16) && N0.getOpcode() == ISD::FSQRT) {
8610     return DCI.DAG.getNode(AMDGPUISD::RSQ, SDLoc(N), VT,
8611                            N0.getOperand(0), N->getFlags());
8612   }
8613 
8614   return AMDGPUTargetLowering::performRcpCombine(N, DCI);
8615 }
8616 
8617 bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
8618                                        unsigned MaxDepth) const {
8619   unsigned Opcode = Op.getOpcode();
8620   if (Opcode == ISD::FCANONICALIZE)
8621     return true;
8622 
8623   if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8624     auto F = CFP->getValueAPF();
8625     if (F.isNaN() && F.isSignaling())
8626       return false;
8627     return !F.isDenormal() || denormalsEnabledForType(DAG, Op.getValueType());
8628   }
8629 
8630   // If source is a result of another standard FP operation it is already in
8631   // canonical form.
8632   if (MaxDepth == 0)
8633     return false;
8634 
8635   switch (Opcode) {
8636   // These will flush denorms if required.
8637   case ISD::FADD:
8638   case ISD::FSUB:
8639   case ISD::FMUL:
8640   case ISD::FCEIL:
8641   case ISD::FFLOOR:
8642   case ISD::FMA:
8643   case ISD::FMAD:
8644   case ISD::FSQRT:
8645   case ISD::FDIV:
8646   case ISD::FREM:
8647   case ISD::FP_ROUND:
8648   case ISD::FP_EXTEND:
8649   case AMDGPUISD::FMUL_LEGACY:
8650   case AMDGPUISD::FMAD_FTZ:
8651   case AMDGPUISD::RCP:
8652   case AMDGPUISD::RSQ:
8653   case AMDGPUISD::RSQ_CLAMP:
8654   case AMDGPUISD::RCP_LEGACY:
8655   case AMDGPUISD::RSQ_LEGACY:
8656   case AMDGPUISD::RCP_IFLAG:
8657   case AMDGPUISD::TRIG_PREOP:
8658   case AMDGPUISD::DIV_SCALE:
8659   case AMDGPUISD::DIV_FMAS:
8660   case AMDGPUISD::DIV_FIXUP:
8661   case AMDGPUISD::FRACT:
8662   case AMDGPUISD::LDEXP:
8663   case AMDGPUISD::CVT_PKRTZ_F16_F32:
8664   case AMDGPUISD::CVT_F32_UBYTE0:
8665   case AMDGPUISD::CVT_F32_UBYTE1:
8666   case AMDGPUISD::CVT_F32_UBYTE2:
8667   case AMDGPUISD::CVT_F32_UBYTE3:
8668     return true;
8669 
8670   // It can/will be lowered or combined as a bit operation.
8671   // Need to check their input recursively to handle.
8672   case ISD::FNEG:
8673   case ISD::FABS:
8674   case ISD::FCOPYSIGN:
8675     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
8676 
8677   case ISD::FSIN:
8678   case ISD::FCOS:
8679   case ISD::FSINCOS:
8680     return Op.getValueType().getScalarType() != MVT::f16;
8681 
8682   case ISD::FMINNUM:
8683   case ISD::FMAXNUM:
8684   case ISD::FMINNUM_IEEE:
8685   case ISD::FMAXNUM_IEEE:
8686   case AMDGPUISD::CLAMP:
8687   case AMDGPUISD::FMED3:
8688   case AMDGPUISD::FMAX3:
8689   case AMDGPUISD::FMIN3: {
8690     // FIXME: Shouldn't treat the generic operations different based these.
8691     // However, we aren't really required to flush the result from
8692     // minnum/maxnum..
8693 
8694     // snans will be quieted, so we only need to worry about denormals.
8695     if (Subtarget->supportsMinMaxDenormModes() ||
8696         denormalsEnabledForType(DAG, Op.getValueType()))
8697       return true;
8698 
8699     // Flushing may be required.
8700     // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such
8701     // targets need to check their input recursively.
8702 
8703     // FIXME: Does this apply with clamp? It's implemented with max.
8704     for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) {
8705       if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1))
8706         return false;
8707     }
8708 
8709     return true;
8710   }
8711   case ISD::SELECT: {
8712     return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) &&
8713            isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1);
8714   }
8715   case ISD::BUILD_VECTOR: {
8716     for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
8717       SDValue SrcOp = Op.getOperand(i);
8718       if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1))
8719         return false;
8720     }
8721 
8722     return true;
8723   }
8724   case ISD::EXTRACT_VECTOR_ELT:
8725   case ISD::EXTRACT_SUBVECTOR: {
8726     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
8727   }
8728   case ISD::INSERT_VECTOR_ELT: {
8729     return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
8730            isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
8731   }
8732   case ISD::UNDEF:
8733     // Could be anything.
8734     return false;
8735 
8736   case ISD::BITCAST: {
8737     // Hack round the mess we make when legalizing extract_vector_elt
8738     SDValue Src = Op.getOperand(0);
8739     if (Src.getValueType() == MVT::i16 &&
8740         Src.getOpcode() == ISD::TRUNCATE) {
8741       SDValue TruncSrc = Src.getOperand(0);
8742       if (TruncSrc.getValueType() == MVT::i32 &&
8743           TruncSrc.getOpcode() == ISD::BITCAST &&
8744           TruncSrc.getOperand(0).getValueType() == MVT::v2f16) {
8745         return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1);
8746       }
8747     }
8748 
8749     return false;
8750   }
8751   case ISD::INTRINSIC_WO_CHAIN: {
8752     unsigned IntrinsicID
8753       = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8754     // TODO: Handle more intrinsics
8755     switch (IntrinsicID) {
8756     case Intrinsic::amdgcn_cvt_pkrtz:
8757     case Intrinsic::amdgcn_cubeid:
8758     case Intrinsic::amdgcn_frexp_mant:
8759     case Intrinsic::amdgcn_fdot2:
8760       return true;
8761     default:
8762       break;
8763     }
8764 
8765     LLVM_FALLTHROUGH;
8766   }
8767   default:
8768     return denormalsEnabledForType(DAG, Op.getValueType()) &&
8769            DAG.isKnownNeverSNaN(Op);
8770   }
8771 
8772   llvm_unreachable("invalid operation");
8773 }
8774 
8775 // Constant fold canonicalize.
8776 SDValue SITargetLowering::getCanonicalConstantFP(
8777   SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const {
8778   // Flush denormals to 0 if not enabled.
8779   if (C.isDenormal() && !denormalsEnabledForType(DAG, VT))
8780     return DAG.getConstantFP(0.0, SL, VT);
8781 
8782   if (C.isNaN()) {
8783     APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
8784     if (C.isSignaling()) {
8785       // Quiet a signaling NaN.
8786       // FIXME: Is this supposed to preserve payload bits?
8787       return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8788     }
8789 
8790     // Make sure it is the canonical NaN bitpattern.
8791     //
8792     // TODO: Can we use -1 as the canonical NaN value since it's an inline
8793     // immediate?
8794     if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
8795       return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8796   }
8797 
8798   // Already canonical.
8799   return DAG.getConstantFP(C, SL, VT);
8800 }
8801 
8802 static bool vectorEltWillFoldAway(SDValue Op) {
8803   return Op.isUndef() || isa<ConstantFPSDNode>(Op);
8804 }
8805 
8806 SDValue SITargetLowering::performFCanonicalizeCombine(
8807   SDNode *N,
8808   DAGCombinerInfo &DCI) const {
8809   SelectionDAG &DAG = DCI.DAG;
8810   SDValue N0 = N->getOperand(0);
8811   EVT VT = N->getValueType(0);
8812 
8813   // fcanonicalize undef -> qnan
8814   if (N0.isUndef()) {
8815     APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT));
8816     return DAG.getConstantFP(QNaN, SDLoc(N), VT);
8817   }
8818 
8819   if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) {
8820     EVT VT = N->getValueType(0);
8821     return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF());
8822   }
8823 
8824   // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x),
8825   //                                                   (fcanonicalize k)
8826   //
8827   // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0
8828 
8829   // TODO: This could be better with wider vectors that will be split to v2f16,
8830   // and to consider uses since there aren't that many packed operations.
8831   if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 &&
8832       isTypeLegal(MVT::v2f16)) {
8833     SDLoc SL(N);
8834     SDValue NewElts[2];
8835     SDValue Lo = N0.getOperand(0);
8836     SDValue Hi = N0.getOperand(1);
8837     EVT EltVT = Lo.getValueType();
8838 
8839     if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) {
8840       for (unsigned I = 0; I != 2; ++I) {
8841         SDValue Op = N0.getOperand(I);
8842         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8843           NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT,
8844                                               CFP->getValueAPF());
8845         } else if (Op.isUndef()) {
8846           // Handled below based on what the other operand is.
8847           NewElts[I] = Op;
8848         } else {
8849           NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op);
8850         }
8851       }
8852 
8853       // If one half is undef, and one is constant, perfer a splat vector rather
8854       // than the normal qNaN. If it's a register, prefer 0.0 since that's
8855       // cheaper to use and may be free with a packed operation.
8856       if (NewElts[0].isUndef()) {
8857         if (isa<ConstantFPSDNode>(NewElts[1]))
8858           NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ?
8859             NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT);
8860       }
8861 
8862       if (NewElts[1].isUndef()) {
8863         NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ?
8864           NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT);
8865       }
8866 
8867       return DAG.getBuildVector(VT, SL, NewElts);
8868     }
8869   }
8870 
8871   unsigned SrcOpc = N0.getOpcode();
8872 
8873   // If it's free to do so, push canonicalizes further up the source, which may
8874   // find a canonical source.
8875   //
8876   // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for
8877   // sNaNs.
8878   if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) {
8879     auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
8880     if (CRHS && N0.hasOneUse()) {
8881       SDLoc SL(N);
8882       SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT,
8883                                    N0.getOperand(0));
8884       SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF());
8885       DCI.AddToWorklist(Canon0.getNode());
8886 
8887       return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1);
8888     }
8889   }
8890 
8891   return isCanonicalized(DAG, N0) ? N0 : SDValue();
8892 }
8893 
8894 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
8895   switch (Opc) {
8896   case ISD::FMAXNUM:
8897   case ISD::FMAXNUM_IEEE:
8898     return AMDGPUISD::FMAX3;
8899   case ISD::SMAX:
8900     return AMDGPUISD::SMAX3;
8901   case ISD::UMAX:
8902     return AMDGPUISD::UMAX3;
8903   case ISD::FMINNUM:
8904   case ISD::FMINNUM_IEEE:
8905     return AMDGPUISD::FMIN3;
8906   case ISD::SMIN:
8907     return AMDGPUISD::SMIN3;
8908   case ISD::UMIN:
8909     return AMDGPUISD::UMIN3;
8910   default:
8911     llvm_unreachable("Not a min/max opcode");
8912   }
8913 }
8914 
8915 SDValue SITargetLowering::performIntMed3ImmCombine(
8916   SelectionDAG &DAG, const SDLoc &SL,
8917   SDValue Op0, SDValue Op1, bool Signed) const {
8918   ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
8919   if (!K1)
8920     return SDValue();
8921 
8922   ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
8923   if (!K0)
8924     return SDValue();
8925 
8926   if (Signed) {
8927     if (K0->getAPIntValue().sge(K1->getAPIntValue()))
8928       return SDValue();
8929   } else {
8930     if (K0->getAPIntValue().uge(K1->getAPIntValue()))
8931       return SDValue();
8932   }
8933 
8934   EVT VT = K0->getValueType(0);
8935   unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
8936   if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
8937     return DAG.getNode(Med3Opc, SL, VT,
8938                        Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
8939   }
8940 
8941   // If there isn't a 16-bit med3 operation, convert to 32-bit.
8942   MVT NVT = MVT::i32;
8943   unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
8944 
8945   SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
8946   SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
8947   SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
8948 
8949   SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
8950   return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
8951 }
8952 
8953 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
8954   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
8955     return C;
8956 
8957   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
8958     if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
8959       return C;
8960   }
8961 
8962   return nullptr;
8963 }
8964 
8965 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
8966                                                   const SDLoc &SL,
8967                                                   SDValue Op0,
8968                                                   SDValue Op1) const {
8969   ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
8970   if (!K1)
8971     return SDValue();
8972 
8973   ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
8974   if (!K0)
8975     return SDValue();
8976 
8977   // Ordered >= (although NaN inputs should have folded away by now).
8978   APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
8979   if (Cmp == APFloat::cmpGreaterThan)
8980     return SDValue();
8981 
8982   const MachineFunction &MF = DAG.getMachineFunction();
8983   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
8984 
8985   // TODO: Check IEEE bit enabled?
8986   EVT VT = Op0.getValueType();
8987   if (Info->getMode().DX10Clamp) {
8988     // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
8989     // hardware fmed3 behavior converting to a min.
8990     // FIXME: Should this be allowing -0.0?
8991     if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
8992       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
8993   }
8994 
8995   // med3 for f16 is only available on gfx9+, and not available for v2f16.
8996   if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
8997     // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
8998     // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
8999     // then give the other result, which is different from med3 with a NaN
9000     // input.
9001     SDValue Var = Op0.getOperand(0);
9002     if (!DAG.isKnownNeverSNaN(Var))
9003       return SDValue();
9004 
9005     const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
9006 
9007     if ((!K0->hasOneUse() ||
9008          TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) &&
9009         (!K1->hasOneUse() ||
9010          TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) {
9011       return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
9012                          Var, SDValue(K0, 0), SDValue(K1, 0));
9013     }
9014   }
9015 
9016   return SDValue();
9017 }
9018 
9019 SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
9020                                                DAGCombinerInfo &DCI) const {
9021   SelectionDAG &DAG = DCI.DAG;
9022 
9023   EVT VT = N->getValueType(0);
9024   unsigned Opc = N->getOpcode();
9025   SDValue Op0 = N->getOperand(0);
9026   SDValue Op1 = N->getOperand(1);
9027 
9028   // Only do this if the inner op has one use since this will just increases
9029   // register pressure for no benefit.
9030 
9031   if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
9032       !VT.isVector() &&
9033       (VT == MVT::i32 || VT == MVT::f32 ||
9034        ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) {
9035     // max(max(a, b), c) -> max3(a, b, c)
9036     // min(min(a, b), c) -> min3(a, b, c)
9037     if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
9038       SDLoc DL(N);
9039       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
9040                          DL,
9041                          N->getValueType(0),
9042                          Op0.getOperand(0),
9043                          Op0.getOperand(1),
9044                          Op1);
9045     }
9046 
9047     // Try commuted.
9048     // max(a, max(b, c)) -> max3(a, b, c)
9049     // min(a, min(b, c)) -> min3(a, b, c)
9050     if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
9051       SDLoc DL(N);
9052       return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
9053                          DL,
9054                          N->getValueType(0),
9055                          Op0,
9056                          Op1.getOperand(0),
9057                          Op1.getOperand(1));
9058     }
9059   }
9060 
9061   // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
9062   if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
9063     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
9064       return Med3;
9065   }
9066 
9067   if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
9068     if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
9069       return Med3;
9070   }
9071 
9072   // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
9073   if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
9074        (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) ||
9075        (Opc == AMDGPUISD::FMIN_LEGACY &&
9076         Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
9077       (VT == MVT::f32 || VT == MVT::f64 ||
9078        (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
9079        (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
9080       Op0.hasOneUse()) {
9081     if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
9082       return Res;
9083   }
9084 
9085   return SDValue();
9086 }
9087 
9088 static bool isClampZeroToOne(SDValue A, SDValue B) {
9089   if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
9090     if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
9091       // FIXME: Should this be allowing -0.0?
9092       return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
9093              (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
9094     }
9095   }
9096 
9097   return false;
9098 }
9099 
9100 // FIXME: Should only worry about snans for version with chain.
9101 SDValue SITargetLowering::performFMed3Combine(SDNode *N,
9102                                               DAGCombinerInfo &DCI) const {
9103   EVT VT = N->getValueType(0);
9104   // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
9105   // NaNs. With a NaN input, the order of the operands may change the result.
9106 
9107   SelectionDAG &DAG = DCI.DAG;
9108   SDLoc SL(N);
9109 
9110   SDValue Src0 = N->getOperand(0);
9111   SDValue Src1 = N->getOperand(1);
9112   SDValue Src2 = N->getOperand(2);
9113 
9114   if (isClampZeroToOne(Src0, Src1)) {
9115     // const_a, const_b, x -> clamp is safe in all cases including signaling
9116     // nans.
9117     // FIXME: Should this be allowing -0.0?
9118     return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
9119   }
9120 
9121   const MachineFunction &MF = DAG.getMachineFunction();
9122   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
9123 
9124   // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
9125   // handling no dx10-clamp?
9126   if (Info->getMode().DX10Clamp) {
9127     // If NaNs is clamped to 0, we are free to reorder the inputs.
9128 
9129     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
9130       std::swap(Src0, Src1);
9131 
9132     if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
9133       std::swap(Src1, Src2);
9134 
9135     if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
9136       std::swap(Src0, Src1);
9137 
9138     if (isClampZeroToOne(Src1, Src2))
9139       return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
9140   }
9141 
9142   return SDValue();
9143 }
9144 
9145 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
9146                                                  DAGCombinerInfo &DCI) const {
9147   SDValue Src0 = N->getOperand(0);
9148   SDValue Src1 = N->getOperand(1);
9149   if (Src0.isUndef() && Src1.isUndef())
9150     return DCI.DAG.getUNDEF(N->getValueType(0));
9151   return SDValue();
9152 }
9153 
9154 SDValue SITargetLowering::performExtractVectorEltCombine(
9155   SDNode *N, DAGCombinerInfo &DCI) const {
9156   SDValue Vec = N->getOperand(0);
9157   SelectionDAG &DAG = DCI.DAG;
9158 
9159   EVT VecVT = Vec.getValueType();
9160   EVT EltVT = VecVT.getVectorElementType();
9161 
9162   if ((Vec.getOpcode() == ISD::FNEG ||
9163        Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
9164     SDLoc SL(N);
9165     EVT EltVT = N->getValueType(0);
9166     SDValue Idx = N->getOperand(1);
9167     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9168                               Vec.getOperand(0), Idx);
9169     return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
9170   }
9171 
9172   // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
9173   //    =>
9174   // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
9175   // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
9176   // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
9177   if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
9178     SDLoc SL(N);
9179     EVT EltVT = N->getValueType(0);
9180     SDValue Idx = N->getOperand(1);
9181     unsigned Opc = Vec.getOpcode();
9182 
9183     switch(Opc) {
9184     default:
9185       break;
9186       // TODO: Support other binary operations.
9187     case ISD::FADD:
9188     case ISD::FSUB:
9189     case ISD::FMUL:
9190     case ISD::ADD:
9191     case ISD::UMIN:
9192     case ISD::UMAX:
9193     case ISD::SMIN:
9194     case ISD::SMAX:
9195     case ISD::FMAXNUM:
9196     case ISD::FMINNUM:
9197     case ISD::FMAXNUM_IEEE:
9198     case ISD::FMINNUM_IEEE: {
9199       SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9200                                  Vec.getOperand(0), Idx);
9201       SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9202                                  Vec.getOperand(1), Idx);
9203 
9204       DCI.AddToWorklist(Elt0.getNode());
9205       DCI.AddToWorklist(Elt1.getNode());
9206       return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags());
9207     }
9208     }
9209   }
9210 
9211   unsigned VecSize = VecVT.getSizeInBits();
9212   unsigned EltSize = EltVT.getSizeInBits();
9213 
9214   // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx)
9215   // This elminates non-constant index and subsequent movrel or scratch access.
9216   // Sub-dword vectors of size 2 dword or less have better implementation.
9217   // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
9218   // instructions.
9219   if (VecSize <= 256 && (VecSize > 64 || EltSize >= 32) &&
9220       !isa<ConstantSDNode>(N->getOperand(1))) {
9221     SDLoc SL(N);
9222     SDValue Idx = N->getOperand(1);
9223     SDValue V;
9224     for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
9225       SDValue IC = DAG.getVectorIdxConstant(I, SL);
9226       SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
9227       if (I == 0)
9228         V = Elt;
9229       else
9230         V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ);
9231     }
9232     return V;
9233   }
9234 
9235   if (!DCI.isBeforeLegalize())
9236     return SDValue();
9237 
9238   // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
9239   // elements. This exposes more load reduction opportunities by replacing
9240   // multiple small extract_vector_elements with a single 32-bit extract.
9241   auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
9242   if (isa<MemSDNode>(Vec) &&
9243       EltSize <= 16 &&
9244       EltVT.isByteSized() &&
9245       VecSize > 32 &&
9246       VecSize % 32 == 0 &&
9247       Idx) {
9248     EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
9249 
9250     unsigned BitIndex = Idx->getZExtValue() * EltSize;
9251     unsigned EltIdx = BitIndex / 32;
9252     unsigned LeftoverBitIdx = BitIndex % 32;
9253     SDLoc SL(N);
9254 
9255     SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
9256     DCI.AddToWorklist(Cast.getNode());
9257 
9258     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
9259                               DAG.getConstant(EltIdx, SL, MVT::i32));
9260     DCI.AddToWorklist(Elt.getNode());
9261     SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
9262                               DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
9263     DCI.AddToWorklist(Srl.getNode());
9264 
9265     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
9266     DCI.AddToWorklist(Trunc.getNode());
9267     return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
9268   }
9269 
9270   return SDValue();
9271 }
9272 
9273 SDValue
9274 SITargetLowering::performInsertVectorEltCombine(SDNode *N,
9275                                                 DAGCombinerInfo &DCI) const {
9276   SDValue Vec = N->getOperand(0);
9277   SDValue Idx = N->getOperand(2);
9278   EVT VecVT = Vec.getValueType();
9279   EVT EltVT = VecVT.getVectorElementType();
9280   unsigned VecSize = VecVT.getSizeInBits();
9281   unsigned EltSize = EltVT.getSizeInBits();
9282 
9283   // INSERT_VECTOR_ELT (<n x e>, var-idx)
9284   // => BUILD_VECTOR n x select (e, const-idx)
9285   // This elminates non-constant index and subsequent movrel or scratch access.
9286   // Sub-dword vectors of size 2 dword or less have better implementation.
9287   // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
9288   // instructions.
9289   if (isa<ConstantSDNode>(Idx) ||
9290       VecSize > 256 || (VecSize <= 64 && EltSize < 32))
9291     return SDValue();
9292 
9293   SelectionDAG &DAG = DCI.DAG;
9294   SDLoc SL(N);
9295   SDValue Ins = N->getOperand(1);
9296   EVT IdxVT = Idx.getValueType();
9297 
9298   SmallVector<SDValue, 16> Ops;
9299   for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
9300     SDValue IC = DAG.getConstant(I, SL, IdxVT);
9301     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
9302     SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ);
9303     Ops.push_back(V);
9304   }
9305 
9306   return DAG.getBuildVector(VecVT, SL, Ops);
9307 }
9308 
9309 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
9310                                           const SDNode *N0,
9311                                           const SDNode *N1) const {
9312   EVT VT = N0->getValueType(0);
9313 
9314   // Only do this if we are not trying to support denormals. v_mad_f32 does not
9315   // support denormals ever.
9316   if (((VT == MVT::f32 && !hasFP32Denormals(DAG.getMachineFunction())) ||
9317        (VT == MVT::f16 && !hasFP64FP16Denormals(DAG.getMachineFunction()) &&
9318         getSubtarget()->hasMadF16())) &&
9319        isOperationLegal(ISD::FMAD, VT))
9320     return ISD::FMAD;
9321 
9322   const TargetOptions &Options = DAG.getTarget().Options;
9323   if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
9324        (N0->getFlags().hasAllowContract() &&
9325         N1->getFlags().hasAllowContract())) &&
9326       isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
9327     return ISD::FMA;
9328   }
9329 
9330   return 0;
9331 }
9332 
9333 // For a reassociatable opcode perform:
9334 // op x, (op y, z) -> op (op x, z), y, if x and z are uniform
9335 SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
9336                                                SelectionDAG &DAG) const {
9337   EVT VT = N->getValueType(0);
9338   if (VT != MVT::i32 && VT != MVT::i64)
9339     return SDValue();
9340 
9341   unsigned Opc = N->getOpcode();
9342   SDValue Op0 = N->getOperand(0);
9343   SDValue Op1 = N->getOperand(1);
9344 
9345   if (!(Op0->isDivergent() ^ Op1->isDivergent()))
9346     return SDValue();
9347 
9348   if (Op0->isDivergent())
9349     std::swap(Op0, Op1);
9350 
9351   if (Op1.getOpcode() != Opc || !Op1.hasOneUse())
9352     return SDValue();
9353 
9354   SDValue Op2 = Op1.getOperand(1);
9355   Op1 = Op1.getOperand(0);
9356   if (!(Op1->isDivergent() ^ Op2->isDivergent()))
9357     return SDValue();
9358 
9359   if (Op1->isDivergent())
9360     std::swap(Op1, Op2);
9361 
9362   // If either operand is constant this will conflict with
9363   // DAGCombiner::ReassociateOps().
9364   if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) ||
9365       DAG.isConstantIntBuildVectorOrConstantInt(Op1))
9366     return SDValue();
9367 
9368   SDLoc SL(N);
9369   SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1);
9370   return DAG.getNode(Opc, SL, VT, Add1, Op2);
9371 }
9372 
9373 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
9374                            EVT VT,
9375                            SDValue N0, SDValue N1, SDValue N2,
9376                            bool Signed) {
9377   unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
9378   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
9379   SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
9380   return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
9381 }
9382 
9383 SDValue SITargetLowering::performAddCombine(SDNode *N,
9384                                             DAGCombinerInfo &DCI) const {
9385   SelectionDAG &DAG = DCI.DAG;
9386   EVT VT = N->getValueType(0);
9387   SDLoc SL(N);
9388   SDValue LHS = N->getOperand(0);
9389   SDValue RHS = N->getOperand(1);
9390 
9391   if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
9392       && Subtarget->hasMad64_32() &&
9393       !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
9394       VT.getScalarSizeInBits() <= 64) {
9395     if (LHS.getOpcode() != ISD::MUL)
9396       std::swap(LHS, RHS);
9397 
9398     SDValue MulLHS = LHS.getOperand(0);
9399     SDValue MulRHS = LHS.getOperand(1);
9400     SDValue AddRHS = RHS;
9401 
9402     // TODO: Maybe restrict if SGPR inputs.
9403     if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
9404         numBitsUnsigned(MulRHS, DAG) <= 32) {
9405       MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
9406       MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
9407       AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
9408       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
9409     }
9410 
9411     if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
9412       MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
9413       MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
9414       AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
9415       return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
9416     }
9417 
9418     return SDValue();
9419   }
9420 
9421   if (SDValue V = reassociateScalarOps(N, DAG)) {
9422     return V;
9423   }
9424 
9425   if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
9426     return SDValue();
9427 
9428   // add x, zext (setcc) => addcarry x, 0, setcc
9429   // add x, sext (setcc) => subcarry x, 0, setcc
9430   unsigned Opc = LHS.getOpcode();
9431   if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
9432       Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
9433     std::swap(RHS, LHS);
9434 
9435   Opc = RHS.getOpcode();
9436   switch (Opc) {
9437   default: break;
9438   case ISD::ZERO_EXTEND:
9439   case ISD::SIGN_EXTEND:
9440   case ISD::ANY_EXTEND: {
9441     auto Cond = RHS.getOperand(0);
9442     // If this won't be a real VOPC output, we would still need to insert an
9443     // extra instruction anyway.
9444     if (!isBoolSGPR(Cond))
9445       break;
9446     SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
9447     SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
9448     Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
9449     return DAG.getNode(Opc, SL, VTList, Args);
9450   }
9451   case ISD::ADDCARRY: {
9452     // add x, (addcarry y, 0, cc) => addcarry x, y, cc
9453     auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
9454     if (!C || C->getZExtValue() != 0) break;
9455     SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
9456     return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
9457   }
9458   }
9459   return SDValue();
9460 }
9461 
9462 SDValue SITargetLowering::performSubCombine(SDNode *N,
9463                                             DAGCombinerInfo &DCI) const {
9464   SelectionDAG &DAG = DCI.DAG;
9465   EVT VT = N->getValueType(0);
9466 
9467   if (VT != MVT::i32)
9468     return SDValue();
9469 
9470   SDLoc SL(N);
9471   SDValue LHS = N->getOperand(0);
9472   SDValue RHS = N->getOperand(1);
9473 
9474   // sub x, zext (setcc) => subcarry x, 0, setcc
9475   // sub x, sext (setcc) => addcarry x, 0, setcc
9476   unsigned Opc = RHS.getOpcode();
9477   switch (Opc) {
9478   default: break;
9479   case ISD::ZERO_EXTEND:
9480   case ISD::SIGN_EXTEND:
9481   case ISD::ANY_EXTEND: {
9482     auto Cond = RHS.getOperand(0);
9483     // If this won't be a real VOPC output, we would still need to insert an
9484     // extra instruction anyway.
9485     if (!isBoolSGPR(Cond))
9486       break;
9487     SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
9488     SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
9489     Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::ADDCARRY : ISD::SUBCARRY;
9490     return DAG.getNode(Opc, SL, VTList, Args);
9491   }
9492   }
9493 
9494   if (LHS.getOpcode() == ISD::SUBCARRY) {
9495     // sub (subcarry x, 0, cc), y => subcarry x, y, cc
9496     auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
9497     if (!C || !C->isNullValue())
9498       return SDValue();
9499     SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
9500     return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
9501   }
9502   return SDValue();
9503 }
9504 
9505 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
9506   DAGCombinerInfo &DCI) const {
9507 
9508   if (N->getValueType(0) != MVT::i32)
9509     return SDValue();
9510 
9511   auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
9512   if (!C || C->getZExtValue() != 0)
9513     return SDValue();
9514 
9515   SelectionDAG &DAG = DCI.DAG;
9516   SDValue LHS = N->getOperand(0);
9517 
9518   // addcarry (add x, y), 0, cc => addcarry x, y, cc
9519   // subcarry (sub x, y), 0, cc => subcarry x, y, cc
9520   unsigned LHSOpc = LHS.getOpcode();
9521   unsigned Opc = N->getOpcode();
9522   if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
9523       (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
9524     SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
9525     return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
9526   }
9527   return SDValue();
9528 }
9529 
9530 SDValue SITargetLowering::performFAddCombine(SDNode *N,
9531                                              DAGCombinerInfo &DCI) const {
9532   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9533     return SDValue();
9534 
9535   SelectionDAG &DAG = DCI.DAG;
9536   EVT VT = N->getValueType(0);
9537 
9538   SDLoc SL(N);
9539   SDValue LHS = N->getOperand(0);
9540   SDValue RHS = N->getOperand(1);
9541 
9542   // These should really be instruction patterns, but writing patterns with
9543   // source modiifiers is a pain.
9544 
9545   // fadd (fadd (a, a), b) -> mad 2.0, a, b
9546   if (LHS.getOpcode() == ISD::FADD) {
9547     SDValue A = LHS.getOperand(0);
9548     if (A == LHS.getOperand(1)) {
9549       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
9550       if (FusedOp != 0) {
9551         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9552         return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
9553       }
9554     }
9555   }
9556 
9557   // fadd (b, fadd (a, a)) -> mad 2.0, a, b
9558   if (RHS.getOpcode() == ISD::FADD) {
9559     SDValue A = RHS.getOperand(0);
9560     if (A == RHS.getOperand(1)) {
9561       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
9562       if (FusedOp != 0) {
9563         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9564         return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
9565       }
9566     }
9567   }
9568 
9569   return SDValue();
9570 }
9571 
9572 SDValue SITargetLowering::performFSubCombine(SDNode *N,
9573                                              DAGCombinerInfo &DCI) const {
9574   if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9575     return SDValue();
9576 
9577   SelectionDAG &DAG = DCI.DAG;
9578   SDLoc SL(N);
9579   EVT VT = N->getValueType(0);
9580   assert(!VT.isVector());
9581 
9582   // Try to get the fneg to fold into the source modifier. This undoes generic
9583   // DAG combines and folds them into the mad.
9584   //
9585   // Only do this if we are not trying to support denormals. v_mad_f32 does
9586   // not support denormals ever.
9587   SDValue LHS = N->getOperand(0);
9588   SDValue RHS = N->getOperand(1);
9589   if (LHS.getOpcode() == ISD::FADD) {
9590     // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
9591     SDValue A = LHS.getOperand(0);
9592     if (A == LHS.getOperand(1)) {
9593       unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
9594       if (FusedOp != 0){
9595         const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9596         SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
9597 
9598         return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
9599       }
9600     }
9601   }
9602 
9603   if (RHS.getOpcode() == ISD::FADD) {
9604     // (fsub c, (fadd a, a)) -> mad -2.0, a, c
9605 
9606     SDValue A = RHS.getOperand(0);
9607     if (A == RHS.getOperand(1)) {
9608       unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
9609       if (FusedOp != 0){
9610         const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
9611         return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
9612       }
9613     }
9614   }
9615 
9616   return SDValue();
9617 }
9618 
9619 SDValue SITargetLowering::performFMACombine(SDNode *N,
9620                                             DAGCombinerInfo &DCI) const {
9621   SelectionDAG &DAG = DCI.DAG;
9622   EVT VT = N->getValueType(0);
9623   SDLoc SL(N);
9624 
9625   if (!Subtarget->hasDot2Insts() || VT != MVT::f32)
9626     return SDValue();
9627 
9628   // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) ->
9629   //   FDOT2((V2F16)S0, (V2F16)S1, (F32)z))
9630   SDValue Op1 = N->getOperand(0);
9631   SDValue Op2 = N->getOperand(1);
9632   SDValue FMA = N->getOperand(2);
9633 
9634   if (FMA.getOpcode() != ISD::FMA ||
9635       Op1.getOpcode() != ISD::FP_EXTEND ||
9636       Op2.getOpcode() != ISD::FP_EXTEND)
9637     return SDValue();
9638 
9639   // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
9640   // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract
9641   // is sufficient to allow generaing fdot2.
9642   const TargetOptions &Options = DAG.getTarget().Options;
9643   if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
9644       (N->getFlags().hasAllowContract() &&
9645        FMA->getFlags().hasAllowContract())) {
9646     Op1 = Op1.getOperand(0);
9647     Op2 = Op2.getOperand(0);
9648     if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9649         Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9650       return SDValue();
9651 
9652     SDValue Vec1 = Op1.getOperand(0);
9653     SDValue Idx1 = Op1.getOperand(1);
9654     SDValue Vec2 = Op2.getOperand(0);
9655 
9656     SDValue FMAOp1 = FMA.getOperand(0);
9657     SDValue FMAOp2 = FMA.getOperand(1);
9658     SDValue FMAAcc = FMA.getOperand(2);
9659 
9660     if (FMAOp1.getOpcode() != ISD::FP_EXTEND ||
9661         FMAOp2.getOpcode() != ISD::FP_EXTEND)
9662       return SDValue();
9663 
9664     FMAOp1 = FMAOp1.getOperand(0);
9665     FMAOp2 = FMAOp2.getOperand(0);
9666     if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9667         FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9668       return SDValue();
9669 
9670     SDValue Vec3 = FMAOp1.getOperand(0);
9671     SDValue Vec4 = FMAOp2.getOperand(0);
9672     SDValue Idx2 = FMAOp1.getOperand(1);
9673 
9674     if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) ||
9675         // Idx1 and Idx2 cannot be the same.
9676         Idx1 == Idx2)
9677       return SDValue();
9678 
9679     if (Vec1 == Vec2 || Vec3 == Vec4)
9680       return SDValue();
9681 
9682     if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16)
9683       return SDValue();
9684 
9685     if ((Vec1 == Vec3 && Vec2 == Vec4) ||
9686         (Vec1 == Vec4 && Vec2 == Vec3)) {
9687       return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc,
9688                          DAG.getTargetConstant(0, SL, MVT::i1));
9689     }
9690   }
9691   return SDValue();
9692 }
9693 
9694 SDValue SITargetLowering::performSetCCCombine(SDNode *N,
9695                                               DAGCombinerInfo &DCI) const {
9696   SelectionDAG &DAG = DCI.DAG;
9697   SDLoc SL(N);
9698 
9699   SDValue LHS = N->getOperand(0);
9700   SDValue RHS = N->getOperand(1);
9701   EVT VT = LHS.getValueType();
9702   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
9703 
9704   auto CRHS = dyn_cast<ConstantSDNode>(RHS);
9705   if (!CRHS) {
9706     CRHS = dyn_cast<ConstantSDNode>(LHS);
9707     if (CRHS) {
9708       std::swap(LHS, RHS);
9709       CC = getSetCCSwappedOperands(CC);
9710     }
9711   }
9712 
9713   if (CRHS) {
9714     if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
9715         isBoolSGPR(LHS.getOperand(0))) {
9716       // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
9717       // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
9718       // setcc (sext from i1 cc),  0, eq|sge|ule) => not cc => xor cc, -1
9719       // setcc (sext from i1 cc),  0, ne|ugt|slt) => cc
9720       if ((CRHS->isAllOnesValue() &&
9721            (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
9722           (CRHS->isNullValue() &&
9723            (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
9724         return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9725                            DAG.getConstant(-1, SL, MVT::i1));
9726       if ((CRHS->isAllOnesValue() &&
9727            (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
9728           (CRHS->isNullValue() &&
9729            (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
9730         return LHS.getOperand(0);
9731     }
9732 
9733     uint64_t CRHSVal = CRHS->getZExtValue();
9734     if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
9735         LHS.getOpcode() == ISD::SELECT &&
9736         isa<ConstantSDNode>(LHS.getOperand(1)) &&
9737         isa<ConstantSDNode>(LHS.getOperand(2)) &&
9738         LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) &&
9739         isBoolSGPR(LHS.getOperand(0))) {
9740       // Given CT != FT:
9741       // setcc (select cc, CT, CF), CF, eq => xor cc, -1
9742       // setcc (select cc, CT, CF), CF, ne => cc
9743       // setcc (select cc, CT, CF), CT, ne => xor cc, -1
9744       // setcc (select cc, CT, CF), CT, eq => cc
9745       uint64_t CT = LHS.getConstantOperandVal(1);
9746       uint64_t CF = LHS.getConstantOperandVal(2);
9747 
9748       if ((CF == CRHSVal && CC == ISD::SETEQ) ||
9749           (CT == CRHSVal && CC == ISD::SETNE))
9750         return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9751                            DAG.getConstant(-1, SL, MVT::i1));
9752       if ((CF == CRHSVal && CC == ISD::SETNE) ||
9753           (CT == CRHSVal && CC == ISD::SETEQ))
9754         return LHS.getOperand(0);
9755     }
9756   }
9757 
9758   if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
9759                                            VT != MVT::f16))
9760     return SDValue();
9761 
9762   // Match isinf/isfinite pattern
9763   // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
9764   // (fcmp one (fabs x), inf) -> (fp_class x,
9765   // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero)
9766   if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) {
9767     const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
9768     if (!CRHS)
9769       return SDValue();
9770 
9771     const APFloat &APF = CRHS->getValueAPF();
9772     if (APF.isInfinity() && !APF.isNegative()) {
9773       const unsigned IsInfMask = SIInstrFlags::P_INFINITY |
9774                                  SIInstrFlags::N_INFINITY;
9775       const unsigned IsFiniteMask = SIInstrFlags::N_ZERO |
9776                                     SIInstrFlags::P_ZERO |
9777                                     SIInstrFlags::N_NORMAL |
9778                                     SIInstrFlags::P_NORMAL |
9779                                     SIInstrFlags::N_SUBNORMAL |
9780                                     SIInstrFlags::P_SUBNORMAL;
9781       unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask;
9782       return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
9783                          DAG.getConstant(Mask, SL, MVT::i32));
9784     }
9785   }
9786 
9787   return SDValue();
9788 }
9789 
9790 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
9791                                                      DAGCombinerInfo &DCI) const {
9792   SelectionDAG &DAG = DCI.DAG;
9793   SDLoc SL(N);
9794   unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
9795 
9796   SDValue Src = N->getOperand(0);
9797   SDValue Srl = N->getOperand(0);
9798   if (Srl.getOpcode() == ISD::ZERO_EXTEND)
9799     Srl = Srl.getOperand(0);
9800 
9801   // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
9802   if (Srl.getOpcode() == ISD::SRL) {
9803     // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
9804     // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
9805     // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
9806 
9807     if (const ConstantSDNode *C =
9808         dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
9809       Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
9810                                EVT(MVT::i32));
9811 
9812       unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
9813       if (SrcOffset < 32 && SrcOffset % 8 == 0) {
9814         return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
9815                            MVT::f32, Srl);
9816       }
9817     }
9818   }
9819 
9820   APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
9821 
9822   KnownBits Known;
9823   TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
9824                                         !DCI.isBeforeLegalizeOps());
9825   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9826   if (TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
9827     DCI.CommitTargetLoweringOpt(TLO);
9828   }
9829 
9830   return SDValue();
9831 }
9832 
9833 SDValue SITargetLowering::performClampCombine(SDNode *N,
9834                                               DAGCombinerInfo &DCI) const {
9835   ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
9836   if (!CSrc)
9837     return SDValue();
9838 
9839   const MachineFunction &MF = DCI.DAG.getMachineFunction();
9840   const APFloat &F = CSrc->getValueAPF();
9841   APFloat Zero = APFloat::getZero(F.getSemantics());
9842   APFloat::cmpResult Cmp0 = F.compare(Zero);
9843   if (Cmp0 == APFloat::cmpLessThan ||
9844       (Cmp0 == APFloat::cmpUnordered &&
9845        MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) {
9846     return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
9847   }
9848 
9849   APFloat One(F.getSemantics(), "1.0");
9850   APFloat::cmpResult Cmp1 = F.compare(One);
9851   if (Cmp1 == APFloat::cmpGreaterThan)
9852     return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
9853 
9854   return SDValue(CSrc, 0);
9855 }
9856 
9857 
9858 SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
9859                                             DAGCombinerInfo &DCI) const {
9860   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
9861     return SDValue();
9862   switch (N->getOpcode()) {
9863   default:
9864     return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
9865   case ISD::ADD:
9866     return performAddCombine(N, DCI);
9867   case ISD::SUB:
9868     return performSubCombine(N, DCI);
9869   case ISD::ADDCARRY:
9870   case ISD::SUBCARRY:
9871     return performAddCarrySubCarryCombine(N, DCI);
9872   case ISD::FADD:
9873     return performFAddCombine(N, DCI);
9874   case ISD::FSUB:
9875     return performFSubCombine(N, DCI);
9876   case ISD::SETCC:
9877     return performSetCCCombine(N, DCI);
9878   case ISD::FMAXNUM:
9879   case ISD::FMINNUM:
9880   case ISD::FMAXNUM_IEEE:
9881   case ISD::FMINNUM_IEEE:
9882   case ISD::SMAX:
9883   case ISD::SMIN:
9884   case ISD::UMAX:
9885   case ISD::UMIN:
9886   case AMDGPUISD::FMIN_LEGACY:
9887   case AMDGPUISD::FMAX_LEGACY:
9888     return performMinMaxCombine(N, DCI);
9889   case ISD::FMA:
9890     return performFMACombine(N, DCI);
9891   case ISD::LOAD: {
9892     if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
9893       return Widended;
9894     LLVM_FALLTHROUGH;
9895   }
9896   case ISD::STORE:
9897   case ISD::ATOMIC_LOAD:
9898   case ISD::ATOMIC_STORE:
9899   case ISD::ATOMIC_CMP_SWAP:
9900   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
9901   case ISD::ATOMIC_SWAP:
9902   case ISD::ATOMIC_LOAD_ADD:
9903   case ISD::ATOMIC_LOAD_SUB:
9904   case ISD::ATOMIC_LOAD_AND:
9905   case ISD::ATOMIC_LOAD_OR:
9906   case ISD::ATOMIC_LOAD_XOR:
9907   case ISD::ATOMIC_LOAD_NAND:
9908   case ISD::ATOMIC_LOAD_MIN:
9909   case ISD::ATOMIC_LOAD_MAX:
9910   case ISD::ATOMIC_LOAD_UMIN:
9911   case ISD::ATOMIC_LOAD_UMAX:
9912   case ISD::ATOMIC_LOAD_FADD:
9913   case AMDGPUISD::ATOMIC_INC:
9914   case AMDGPUISD::ATOMIC_DEC:
9915   case AMDGPUISD::ATOMIC_LOAD_FMIN:
9916   case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
9917     if (DCI.isBeforeLegalize())
9918       break;
9919     return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
9920   case ISD::AND:
9921     return performAndCombine(N, DCI);
9922   case ISD::OR:
9923     return performOrCombine(N, DCI);
9924   case ISD::XOR:
9925     return performXorCombine(N, DCI);
9926   case ISD::ZERO_EXTEND:
9927     return performZeroExtendCombine(N, DCI);
9928   case ISD::SIGN_EXTEND_INREG:
9929     return performSignExtendInRegCombine(N , DCI);
9930   case AMDGPUISD::FP_CLASS:
9931     return performClassCombine(N, DCI);
9932   case ISD::FCANONICALIZE:
9933     return performFCanonicalizeCombine(N, DCI);
9934   case AMDGPUISD::RCP:
9935     return performRcpCombine(N, DCI);
9936   case AMDGPUISD::FRACT:
9937   case AMDGPUISD::RSQ:
9938   case AMDGPUISD::RCP_LEGACY:
9939   case AMDGPUISD::RSQ_LEGACY:
9940   case AMDGPUISD::RCP_IFLAG:
9941   case AMDGPUISD::RSQ_CLAMP:
9942   case AMDGPUISD::LDEXP: {
9943     SDValue Src = N->getOperand(0);
9944     if (Src.isUndef())
9945       return Src;
9946     break;
9947   }
9948   case ISD::SINT_TO_FP:
9949   case ISD::UINT_TO_FP:
9950     return performUCharToFloatCombine(N, DCI);
9951   case AMDGPUISD::CVT_F32_UBYTE0:
9952   case AMDGPUISD::CVT_F32_UBYTE1:
9953   case AMDGPUISD::CVT_F32_UBYTE2:
9954   case AMDGPUISD::CVT_F32_UBYTE3:
9955     return performCvtF32UByteNCombine(N, DCI);
9956   case AMDGPUISD::FMED3:
9957     return performFMed3Combine(N, DCI);
9958   case AMDGPUISD::CVT_PKRTZ_F16_F32:
9959     return performCvtPkRTZCombine(N, DCI);
9960   case AMDGPUISD::CLAMP:
9961     return performClampCombine(N, DCI);
9962   case ISD::SCALAR_TO_VECTOR: {
9963     SelectionDAG &DAG = DCI.DAG;
9964     EVT VT = N->getValueType(0);
9965 
9966     // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
9967     if (VT == MVT::v2i16 || VT == MVT::v2f16) {
9968       SDLoc SL(N);
9969       SDValue Src = N->getOperand(0);
9970       EVT EltVT = Src.getValueType();
9971       if (EltVT == MVT::f16)
9972         Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
9973 
9974       SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
9975       return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
9976     }
9977 
9978     break;
9979   }
9980   case ISD::EXTRACT_VECTOR_ELT:
9981     return performExtractVectorEltCombine(N, DCI);
9982   case ISD::INSERT_VECTOR_ELT:
9983     return performInsertVectorEltCombine(N, DCI);
9984   }
9985   return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
9986 }
9987 
9988 /// Helper function for adjustWritemask
9989 static unsigned SubIdx2Lane(unsigned Idx) {
9990   switch (Idx) {
9991   default: return 0;
9992   case AMDGPU::sub0: return 0;
9993   case AMDGPU::sub1: return 1;
9994   case AMDGPU::sub2: return 2;
9995   case AMDGPU::sub3: return 3;
9996   case AMDGPU::sub4: return 4; // Possible with TFE/LWE
9997   }
9998 }
9999 
10000 /// Adjust the writemask of MIMG instructions
10001 SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
10002                                           SelectionDAG &DAG) const {
10003   unsigned Opcode = Node->getMachineOpcode();
10004 
10005   // Subtract 1 because the vdata output is not a MachineSDNode operand.
10006   int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1;
10007   if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
10008     return Node; // not implemented for D16
10009 
10010   SDNode *Users[5] = { nullptr };
10011   unsigned Lane = 0;
10012   unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
10013   unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
10014   unsigned NewDmask = 0;
10015   unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1;
10016   unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1;
10017   bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) ||
10018                   Node->getConstantOperandVal(LWEIdx)) ? 1 : 0;
10019   unsigned TFCLane = 0;
10020   bool HasChain = Node->getNumValues() > 1;
10021 
10022   if (OldDmask == 0) {
10023     // These are folded out, but on the chance it happens don't assert.
10024     return Node;
10025   }
10026 
10027   unsigned OldBitsSet = countPopulation(OldDmask);
10028   // Work out which is the TFE/LWE lane if that is enabled.
10029   if (UsesTFC) {
10030     TFCLane = OldBitsSet;
10031   }
10032 
10033   // Try to figure out the used register components
10034   for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
10035        I != E; ++I) {
10036 
10037     // Don't look at users of the chain.
10038     if (I.getUse().getResNo() != 0)
10039       continue;
10040 
10041     // Abort if we can't understand the usage
10042     if (!I->isMachineOpcode() ||
10043         I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
10044       return Node;
10045 
10046     // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
10047     // Note that subregs are packed, i.e. Lane==0 is the first bit set
10048     // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
10049     // set, etc.
10050     Lane = SubIdx2Lane(I->getConstantOperandVal(1));
10051 
10052     // Check if the use is for the TFE/LWE generated result at VGPRn+1.
10053     if (UsesTFC && Lane == TFCLane) {
10054       Users[Lane] = *I;
10055     } else {
10056       // Set which texture component corresponds to the lane.
10057       unsigned Comp;
10058       for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) {
10059         Comp = countTrailingZeros(Dmask);
10060         Dmask &= ~(1 << Comp);
10061       }
10062 
10063       // Abort if we have more than one user per component.
10064       if (Users[Lane])
10065         return Node;
10066 
10067       Users[Lane] = *I;
10068       NewDmask |= 1 << Comp;
10069     }
10070   }
10071 
10072   // Don't allow 0 dmask, as hardware assumes one channel enabled.
10073   bool NoChannels = !NewDmask;
10074   if (NoChannels) {
10075     if (!UsesTFC) {
10076       // No uses of the result and not using TFC. Then do nothing.
10077       return Node;
10078     }
10079     // If the original dmask has one channel - then nothing to do
10080     if (OldBitsSet == 1)
10081       return Node;
10082     // Use an arbitrary dmask - required for the instruction to work
10083     NewDmask = 1;
10084   }
10085   // Abort if there's no change
10086   if (NewDmask == OldDmask)
10087     return Node;
10088 
10089   unsigned BitsSet = countPopulation(NewDmask);
10090 
10091   // Check for TFE or LWE - increase the number of channels by one to account
10092   // for the extra return value
10093   // This will need adjustment for D16 if this is also included in
10094   // adjustWriteMask (this function) but at present D16 are excluded.
10095   unsigned NewChannels = BitsSet + UsesTFC;
10096 
10097   int NewOpcode =
10098       AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels);
10099   assert(NewOpcode != -1 &&
10100          NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
10101          "failed to find equivalent MIMG op");
10102 
10103   // Adjust the writemask in the node
10104   SmallVector<SDValue, 12> Ops;
10105   Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
10106   Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
10107   Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
10108 
10109   MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
10110 
10111   MVT ResultVT = NewChannels == 1 ?
10112     SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 :
10113                            NewChannels == 5 ? 8 : NewChannels);
10114   SDVTList NewVTList = HasChain ?
10115     DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
10116 
10117 
10118   MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
10119                                               NewVTList, Ops);
10120 
10121   if (HasChain) {
10122     // Update chain.
10123     DAG.setNodeMemRefs(NewNode, Node->memoperands());
10124     DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
10125   }
10126 
10127   if (NewChannels == 1) {
10128     assert(Node->hasNUsesOfValue(1, 0));
10129     SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
10130                                       SDLoc(Node), Users[Lane]->getValueType(0),
10131                                       SDValue(NewNode, 0));
10132     DAG.ReplaceAllUsesWith(Users[Lane], Copy);
10133     return nullptr;
10134   }
10135 
10136   // Update the users of the node with the new indices
10137   for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) {
10138     SDNode *User = Users[i];
10139     if (!User) {
10140       // Handle the special case of NoChannels. We set NewDmask to 1 above, but
10141       // Users[0] is still nullptr because channel 0 doesn't really have a use.
10142       if (i || !NoChannels)
10143         continue;
10144     } else {
10145       SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
10146       DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
10147     }
10148 
10149     switch (Idx) {
10150     default: break;
10151     case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
10152     case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
10153     case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
10154     case AMDGPU::sub3: Idx = AMDGPU::sub4; break;
10155     }
10156   }
10157 
10158   DAG.RemoveDeadNode(Node);
10159   return nullptr;
10160 }
10161 
10162 static bool isFrameIndexOp(SDValue Op) {
10163   if (Op.getOpcode() == ISD::AssertZext)
10164     Op = Op.getOperand(0);
10165 
10166   return isa<FrameIndexSDNode>(Op);
10167 }
10168 
10169 /// Legalize target independent instructions (e.g. INSERT_SUBREG)
10170 /// with frame index operands.
10171 /// LLVM assumes that inputs are to these instructions are registers.
10172 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
10173                                                         SelectionDAG &DAG) const {
10174   if (Node->getOpcode() == ISD::CopyToReg) {
10175     RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
10176     SDValue SrcVal = Node->getOperand(2);
10177 
10178     // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
10179     // to try understanding copies to physical registers.
10180     if (SrcVal.getValueType() == MVT::i1 &&
10181         Register::isPhysicalRegister(DestReg->getReg())) {
10182       SDLoc SL(Node);
10183       MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
10184       SDValue VReg = DAG.getRegister(
10185         MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
10186 
10187       SDNode *Glued = Node->getGluedNode();
10188       SDValue ToVReg
10189         = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
10190                          SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
10191       SDValue ToResultReg
10192         = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
10193                            VReg, ToVReg.getValue(1));
10194       DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
10195       DAG.RemoveDeadNode(Node);
10196       return ToResultReg.getNode();
10197     }
10198   }
10199 
10200   SmallVector<SDValue, 8> Ops;
10201   for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
10202     if (!isFrameIndexOp(Node->getOperand(i))) {
10203       Ops.push_back(Node->getOperand(i));
10204       continue;
10205     }
10206 
10207     SDLoc DL(Node);
10208     Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
10209                                      Node->getOperand(i).getValueType(),
10210                                      Node->getOperand(i)), 0));
10211   }
10212 
10213   return DAG.UpdateNodeOperands(Node, Ops);
10214 }
10215 
10216 /// Fold the instructions after selecting them.
10217 /// Returns null if users were already updated.
10218 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
10219                                           SelectionDAG &DAG) const {
10220   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10221   unsigned Opcode = Node->getMachineOpcode();
10222 
10223   if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
10224       !TII->isGather4(Opcode)) {
10225     return adjustWritemask(Node, DAG);
10226   }
10227 
10228   if (Opcode == AMDGPU::INSERT_SUBREG ||
10229       Opcode == AMDGPU::REG_SEQUENCE) {
10230     legalizeTargetIndependentNode(Node, DAG);
10231     return Node;
10232   }
10233 
10234   switch (Opcode) {
10235   case AMDGPU::V_DIV_SCALE_F32:
10236   case AMDGPU::V_DIV_SCALE_F64: {
10237     // Satisfy the operand register constraint when one of the inputs is
10238     // undefined. Ordinarily each undef value will have its own implicit_def of
10239     // a vreg, so force these to use a single register.
10240     SDValue Src0 = Node->getOperand(0);
10241     SDValue Src1 = Node->getOperand(1);
10242     SDValue Src2 = Node->getOperand(2);
10243 
10244     if ((Src0.isMachineOpcode() &&
10245          Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
10246         (Src0 == Src1 || Src0 == Src2))
10247       break;
10248 
10249     MVT VT = Src0.getValueType().getSimpleVT();
10250     const TargetRegisterClass *RC =
10251         getRegClassFor(VT, Src0.getNode()->isDivergent());
10252 
10253     MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
10254     SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
10255 
10256     SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
10257                                       UndefReg, Src0, SDValue());
10258 
10259     // src0 must be the same register as src1 or src2, even if the value is
10260     // undefined, so make sure we don't violate this constraint.
10261     if (Src0.isMachineOpcode() &&
10262         Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
10263       if (Src1.isMachineOpcode() &&
10264           Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
10265         Src0 = Src1;
10266       else if (Src2.isMachineOpcode() &&
10267                Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
10268         Src0 = Src2;
10269       else {
10270         assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
10271         Src0 = UndefReg;
10272         Src1 = UndefReg;
10273       }
10274     } else
10275       break;
10276 
10277     SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
10278     for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
10279       Ops.push_back(Node->getOperand(I));
10280 
10281     Ops.push_back(ImpDef.getValue(1));
10282     return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
10283   }
10284   default:
10285     break;
10286   }
10287 
10288   return Node;
10289 }
10290 
10291 /// Assign the register class depending on the number of
10292 /// bits set in the writemask
10293 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
10294                                                      SDNode *Node) const {
10295   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10296 
10297   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
10298 
10299   if (TII->isVOP3(MI.getOpcode())) {
10300     // Make sure constant bus requirements are respected.
10301     TII->legalizeOperandsVOP3(MRI, MI);
10302 
10303     // Prefer VGPRs over AGPRs in mAI instructions where possible.
10304     // This saves a chain-copy of registers and better ballance register
10305     // use between vgpr and agpr as agpr tuples tend to be big.
10306     if (const MCOperandInfo *OpInfo = MI.getDesc().OpInfo) {
10307       unsigned Opc = MI.getOpcode();
10308       const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
10309       for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
10310                       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) {
10311         if (I == -1)
10312           break;
10313         MachineOperand &Op = MI.getOperand(I);
10314         if ((OpInfo[I].RegClass != llvm::AMDGPU::AV_64RegClassID &&
10315              OpInfo[I].RegClass != llvm::AMDGPU::AV_32RegClassID) ||
10316             !Register::isVirtualRegister(Op.getReg()) ||
10317             !TRI->isAGPR(MRI, Op.getReg()))
10318           continue;
10319         auto *Src = MRI.getUniqueVRegDef(Op.getReg());
10320         if (!Src || !Src->isCopy() ||
10321             !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg()))
10322           continue;
10323         auto *RC = TRI->getRegClassForReg(MRI, Op.getReg());
10324         auto *NewRC = TRI->getEquivalentVGPRClass(RC);
10325         // All uses of agpr64 and agpr32 can also accept vgpr except for
10326         // v_accvgpr_read, but we do not produce agpr reads during selection,
10327         // so no use checks are needed.
10328         MRI.setRegClass(Op.getReg(), NewRC);
10329       }
10330     }
10331 
10332     return;
10333   }
10334 
10335   // Replace unused atomics with the no return version.
10336   int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
10337   if (NoRetAtomicOp != -1) {
10338     if (!Node->hasAnyUseOfValue(0)) {
10339       MI.setDesc(TII->get(NoRetAtomicOp));
10340       MI.RemoveOperand(0);
10341       return;
10342     }
10343 
10344     // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
10345     // instruction, because the return type of these instructions is a vec2 of
10346     // the memory type, so it can be tied to the input operand.
10347     // This means these instructions always have a use, so we need to add a
10348     // special case to check if the atomic has only one extract_subreg use,
10349     // which itself has no uses.
10350     if ((Node->hasNUsesOfValue(1, 0) &&
10351          Node->use_begin()->isMachineOpcode() &&
10352          Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
10353          !Node->use_begin()->hasAnyUseOfValue(0))) {
10354       Register Def = MI.getOperand(0).getReg();
10355 
10356       // Change this into a noret atomic.
10357       MI.setDesc(TII->get(NoRetAtomicOp));
10358       MI.RemoveOperand(0);
10359 
10360       // If we only remove the def operand from the atomic instruction, the
10361       // extract_subreg will be left with a use of a vreg without a def.
10362       // So we need to insert an implicit_def to avoid machine verifier
10363       // errors.
10364       BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
10365               TII->get(AMDGPU::IMPLICIT_DEF), Def);
10366     }
10367     return;
10368   }
10369 }
10370 
10371 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
10372                               uint64_t Val) {
10373   SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
10374   return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
10375 }
10376 
10377 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
10378                                                 const SDLoc &DL,
10379                                                 SDValue Ptr) const {
10380   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10381 
10382   // Build the half of the subregister with the constants before building the
10383   // full 128-bit register. If we are building multiple resource descriptors,
10384   // this will allow CSEing of the 2-component register.
10385   const SDValue Ops0[] = {
10386     DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
10387     buildSMovImm32(DAG, DL, 0),
10388     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
10389     buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
10390     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
10391   };
10392 
10393   SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
10394                                                 MVT::v2i32, Ops0), 0);
10395 
10396   // Combine the constants and the pointer.
10397   const SDValue Ops1[] = {
10398     DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32),
10399     Ptr,
10400     DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
10401     SubRegHi,
10402     DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
10403   };
10404 
10405   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
10406 }
10407 
10408 /// Return a resource descriptor with the 'Add TID' bit enabled
10409 ///        The TID (Thread ID) is multiplied by the stride value (bits [61:48]
10410 ///        of the resource descriptor) to create an offset, which is added to
10411 ///        the resource pointer.
10412 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
10413                                            SDValue Ptr, uint32_t RsrcDword1,
10414                                            uint64_t RsrcDword2And3) const {
10415   SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
10416   SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
10417   if (RsrcDword1) {
10418     PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
10419                                      DAG.getConstant(RsrcDword1, DL, MVT::i32)),
10420                     0);
10421   }
10422 
10423   SDValue DataLo = buildSMovImm32(DAG, DL,
10424                                   RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
10425   SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
10426 
10427   const SDValue Ops[] = {
10428     DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32),
10429     PtrLo,
10430     DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
10431     PtrHi,
10432     DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
10433     DataLo,
10434     DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
10435     DataHi,
10436     DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
10437   };
10438 
10439   return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
10440 }
10441 
10442 //===----------------------------------------------------------------------===//
10443 //                         SI Inline Assembly Support
10444 //===----------------------------------------------------------------------===//
10445 
10446 std::pair<unsigned, const TargetRegisterClass *>
10447 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
10448                                                StringRef Constraint,
10449                                                MVT VT) const {
10450   const TargetRegisterClass *RC = nullptr;
10451   if (Constraint.size() == 1) {
10452     switch (Constraint[0]) {
10453     default:
10454       return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10455     case 's':
10456     case 'r':
10457       switch (VT.getSizeInBits()) {
10458       default:
10459         return std::make_pair(0U, nullptr);
10460       case 32:
10461       case 16:
10462         RC = &AMDGPU::SReg_32RegClass;
10463         break;
10464       case 64:
10465         RC = &AMDGPU::SGPR_64RegClass;
10466         break;
10467       case 96:
10468         RC = &AMDGPU::SReg_96RegClass;
10469         break;
10470       case 128:
10471         RC = &AMDGPU::SGPR_128RegClass;
10472         break;
10473       case 160:
10474         RC = &AMDGPU::SReg_160RegClass;
10475         break;
10476       case 256:
10477         RC = &AMDGPU::SReg_256RegClass;
10478         break;
10479       case 512:
10480         RC = &AMDGPU::SReg_512RegClass;
10481         break;
10482       }
10483       break;
10484     case 'v':
10485       switch (VT.getSizeInBits()) {
10486       default:
10487         return std::make_pair(0U, nullptr);
10488       case 32:
10489       case 16:
10490         RC = &AMDGPU::VGPR_32RegClass;
10491         break;
10492       case 64:
10493         RC = &AMDGPU::VReg_64RegClass;
10494         break;
10495       case 96:
10496         RC = &AMDGPU::VReg_96RegClass;
10497         break;
10498       case 128:
10499         RC = &AMDGPU::VReg_128RegClass;
10500         break;
10501       case 160:
10502         RC = &AMDGPU::VReg_160RegClass;
10503         break;
10504       case 256:
10505         RC = &AMDGPU::VReg_256RegClass;
10506         break;
10507       case 512:
10508         RC = &AMDGPU::VReg_512RegClass;
10509         break;
10510       }
10511       break;
10512     case 'a':
10513       if (!Subtarget->hasMAIInsts())
10514         break;
10515       switch (VT.getSizeInBits()) {
10516       default:
10517         return std::make_pair(0U, nullptr);
10518       case 32:
10519       case 16:
10520         RC = &AMDGPU::AGPR_32RegClass;
10521         break;
10522       case 64:
10523         RC = &AMDGPU::AReg_64RegClass;
10524         break;
10525       case 128:
10526         RC = &AMDGPU::AReg_128RegClass;
10527         break;
10528       case 512:
10529         RC = &AMDGPU::AReg_512RegClass;
10530         break;
10531       case 1024:
10532         RC = &AMDGPU::AReg_1024RegClass;
10533         // v32 types are not legal but we support them here.
10534         return std::make_pair(0U, RC);
10535       }
10536       break;
10537     }
10538     // We actually support i128, i16 and f16 as inline parameters
10539     // even if they are not reported as legal
10540     if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
10541                VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
10542       return std::make_pair(0U, RC);
10543   }
10544 
10545   if (Constraint.size() > 1) {
10546     if (Constraint[1] == 'v') {
10547       RC = &AMDGPU::VGPR_32RegClass;
10548     } else if (Constraint[1] == 's') {
10549       RC = &AMDGPU::SGPR_32RegClass;
10550     } else if (Constraint[1] == 'a') {
10551       RC = &AMDGPU::AGPR_32RegClass;
10552     }
10553 
10554     if (RC) {
10555       uint32_t Idx;
10556       bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
10557       if (!Failed && Idx < RC->getNumRegs())
10558         return std::make_pair(RC->getRegister(Idx), RC);
10559     }
10560   }
10561 
10562   // FIXME: Returns VS_32 for physical SGPR constraints
10563   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10564 }
10565 
10566 SITargetLowering::ConstraintType
10567 SITargetLowering::getConstraintType(StringRef Constraint) const {
10568   if (Constraint.size() == 1) {
10569     switch (Constraint[0]) {
10570     default: break;
10571     case 's':
10572     case 'v':
10573     case 'a':
10574       return C_RegisterClass;
10575     }
10576   }
10577   return TargetLowering::getConstraintType(Constraint);
10578 }
10579 
10580 // Figure out which registers should be reserved for stack access. Only after
10581 // the function is legalized do we know all of the non-spill stack objects or if
10582 // calls are present.
10583 void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
10584   MachineRegisterInfo &MRI = MF.getRegInfo();
10585   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10586   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
10587   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
10588 
10589   if (Info->isEntryFunction()) {
10590     // Callable functions have fixed registers used for stack access.
10591     reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
10592   }
10593 
10594   assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
10595                              Info->getStackPtrOffsetReg()));
10596   if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG)
10597     MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
10598 
10599   // We need to worry about replacing the default register with itself in case
10600   // of MIR testcases missing the MFI.
10601   if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG)
10602     MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
10603 
10604   if (Info->getFrameOffsetReg() != AMDGPU::FP_REG)
10605     MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
10606 
10607   if (Info->getScratchWaveOffsetReg() != AMDGPU::SCRATCH_WAVE_OFFSET_REG) {
10608     MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
10609                        Info->getScratchWaveOffsetReg());
10610   }
10611 
10612   Info->limitOccupancy(MF);
10613 
10614   if (ST.isWave32() && !MF.empty()) {
10615     // Add VCC_HI def because many instructions marked as imp-use VCC where
10616     // we may only define VCC_LO. If nothing defines VCC_HI we may end up
10617     // having a use of undef.
10618 
10619     const SIInstrInfo *TII = ST.getInstrInfo();
10620     DebugLoc DL;
10621 
10622     MachineBasicBlock &MBB = MF.front();
10623     MachineBasicBlock::iterator I = MBB.getFirstNonDebugInstr();
10624     BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), AMDGPU::VCC_HI);
10625 
10626     for (auto &MBB : MF) {
10627       for (auto &MI : MBB) {
10628         TII->fixImplicitOperands(MI);
10629       }
10630     }
10631   }
10632 
10633   TargetLoweringBase::finalizeLowering(MF);
10634 }
10635 
10636 void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
10637                                                      KnownBits &Known,
10638                                                      const APInt &DemandedElts,
10639                                                      const SelectionDAG &DAG,
10640                                                      unsigned Depth) const {
10641   TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
10642                                                 DAG, Depth);
10643 
10644   // Set the high bits to zero based on the maximum allowed scratch size per
10645   // wave. We can't use vaddr in MUBUF instructions if we don't know the address
10646   // calculation won't overflow, so assume the sign bit is never set.
10647   Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
10648 }
10649 
10650 Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
10651   const Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
10652   const Align CacheLineAlign = Align(64);
10653 
10654   // Pre-GFX10 target did not benefit from loop alignment
10655   if (!ML || DisableLoopAlignment ||
10656       (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
10657       getSubtarget()->hasInstFwdPrefetchBug())
10658     return PrefAlign;
10659 
10660   // On GFX10 I$ is 4 x 64 bytes cache lines.
10661   // By default prefetcher keeps one cache line behind and reads two ahead.
10662   // We can modify it with S_INST_PREFETCH for larger loops to have two lines
10663   // behind and one ahead.
10664   // Therefor we can benefit from aligning loop headers if loop fits 192 bytes.
10665   // If loop fits 64 bytes it always spans no more than two cache lines and
10666   // does not need an alignment.
10667   // Else if loop is less or equal 128 bytes we do not need to modify prefetch,
10668   // Else if loop is less or equal 192 bytes we need two lines behind.
10669 
10670   const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10671   const MachineBasicBlock *Header = ML->getHeader();
10672   if (Header->getAlignment() != PrefAlign)
10673     return Header->getAlignment(); // Already processed.
10674 
10675   unsigned LoopSize = 0;
10676   for (const MachineBasicBlock *MBB : ML->blocks()) {
10677     // If inner loop block is aligned assume in average half of the alignment
10678     // size to be added as nops.
10679     if (MBB != Header)
10680       LoopSize += MBB->getAlignment().value() / 2;
10681 
10682     for (const MachineInstr &MI : *MBB) {
10683       LoopSize += TII->getInstSizeInBytes(MI);
10684       if (LoopSize > 192)
10685         return PrefAlign;
10686     }
10687   }
10688 
10689   if (LoopSize <= 64)
10690     return PrefAlign;
10691 
10692   if (LoopSize <= 128)
10693     return CacheLineAlign;
10694 
10695   // If any of parent loops is surrounded by prefetch instructions do not
10696   // insert new for inner loop, which would reset parent's settings.
10697   for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) {
10698     if (MachineBasicBlock *Exit = P->getExitBlock()) {
10699       auto I = Exit->getFirstNonDebugInstr();
10700       if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
10701         return CacheLineAlign;
10702     }
10703   }
10704 
10705   MachineBasicBlock *Pre = ML->getLoopPreheader();
10706   MachineBasicBlock *Exit = ML->getExitBlock();
10707 
10708   if (Pre && Exit) {
10709     BuildMI(*Pre, Pre->getFirstTerminator(), DebugLoc(),
10710             TII->get(AMDGPU::S_INST_PREFETCH))
10711       .addImm(1); // prefetch 2 lines behind PC
10712 
10713     BuildMI(*Exit, Exit->getFirstNonDebugInstr(), DebugLoc(),
10714             TII->get(AMDGPU::S_INST_PREFETCH))
10715       .addImm(2); // prefetch 1 line behind PC
10716   }
10717 
10718   return CacheLineAlign;
10719 }
10720 
10721 LLVM_ATTRIBUTE_UNUSED
10722 static bool isCopyFromRegOfInlineAsm(const SDNode *N) {
10723   assert(N->getOpcode() == ISD::CopyFromReg);
10724   do {
10725     // Follow the chain until we find an INLINEASM node.
10726     N = N->getOperand(0).getNode();
10727     if (N->getOpcode() == ISD::INLINEASM ||
10728         N->getOpcode() == ISD::INLINEASM_BR)
10729       return true;
10730   } while (N->getOpcode() == ISD::CopyFromReg);
10731   return false;
10732 }
10733 
10734 bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
10735   FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const
10736 {
10737   switch (N->getOpcode()) {
10738     case ISD::CopyFromReg:
10739     {
10740       const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1));
10741       const MachineFunction * MF = FLI->MF;
10742       const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
10743       const MachineRegisterInfo &MRI = MF->getRegInfo();
10744       const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
10745       unsigned Reg = R->getReg();
10746       if (Register::isPhysicalRegister(Reg))
10747         return !TRI.isSGPRReg(MRI, Reg);
10748 
10749       if (MRI.isLiveIn(Reg)) {
10750         // workitem.id.x workitem.id.y workitem.id.z
10751         // Any VGPR formal argument is also considered divergent
10752         if (!TRI.isSGPRReg(MRI, Reg))
10753           return true;
10754         // Formal arguments of non-entry functions
10755         // are conservatively considered divergent
10756         else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv()))
10757           return true;
10758         return false;
10759       }
10760       const Value *V = FLI->getValueFromVirtualReg(Reg);
10761       if (V)
10762         return KDA->isDivergent(V);
10763       assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N));
10764       return !TRI.isSGPRReg(MRI, Reg);
10765     }
10766     break;
10767     case ISD::LOAD: {
10768       const LoadSDNode *L = cast<LoadSDNode>(N);
10769       unsigned AS = L->getAddressSpace();
10770       // A flat load may access private memory.
10771       return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
10772     } break;
10773     case ISD::CALLSEQ_END:
10774     return true;
10775     break;
10776     case ISD::INTRINSIC_WO_CHAIN:
10777     {
10778 
10779     }
10780       return AMDGPU::isIntrinsicSourceOfDivergence(
10781       cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
10782     case ISD::INTRINSIC_W_CHAIN:
10783       return AMDGPU::isIntrinsicSourceOfDivergence(
10784       cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
10785   }
10786   return false;
10787 }
10788 
10789 bool SITargetLowering::denormalsEnabledForType(const SelectionDAG &DAG,
10790                                                EVT VT) const {
10791   switch (VT.getScalarType().getSimpleVT().SimpleTy) {
10792   case MVT::f32:
10793     return hasFP32Denormals(DAG.getMachineFunction());
10794   case MVT::f64:
10795   case MVT::f16:
10796     return hasFP64FP16Denormals(DAG.getMachineFunction());
10797   default:
10798     return false;
10799   }
10800 }
10801 
10802 bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
10803                                                     const SelectionDAG &DAG,
10804                                                     bool SNaN,
10805                                                     unsigned Depth) const {
10806   if (Op.getOpcode() == AMDGPUISD::CLAMP) {
10807     const MachineFunction &MF = DAG.getMachineFunction();
10808     const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10809 
10810     if (Info->getMode().DX10Clamp)
10811       return true; // Clamped to 0.
10812     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
10813   }
10814 
10815   return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG,
10816                                                             SNaN, Depth);
10817 }
10818 
10819 TargetLowering::AtomicExpansionKind
10820 SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
10821   switch (RMW->getOperation()) {
10822   case AtomicRMWInst::FAdd: {
10823     Type *Ty = RMW->getType();
10824 
10825     // We don't have a way to support 16-bit atomics now, so just leave them
10826     // as-is.
10827     if (Ty->isHalfTy())
10828       return AtomicExpansionKind::None;
10829 
10830     if (!Ty->isFloatTy())
10831       return AtomicExpansionKind::CmpXChg;
10832 
10833     // TODO: Do have these for flat. Older targets also had them for buffers.
10834     unsigned AS = RMW->getPointerAddressSpace();
10835 
10836     if (AS == AMDGPUAS::GLOBAL_ADDRESS && Subtarget->hasAtomicFaddInsts()) {
10837       return RMW->use_empty() ? AtomicExpansionKind::None :
10838                                 AtomicExpansionKind::CmpXChg;
10839     }
10840 
10841     return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ?
10842       AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg;
10843   }
10844   default:
10845     break;
10846   }
10847 
10848   return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
10849 }
10850 
10851 const TargetRegisterClass *
10852 SITargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {
10853   const TargetRegisterClass *RC = TargetLoweringBase::getRegClassFor(VT, false);
10854   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
10855   if (RC == &AMDGPU::VReg_1RegClass && !isDivergent)
10856     return Subtarget->getWavefrontSize() == 64 ? &AMDGPU::SReg_64RegClass
10857                                                : &AMDGPU::SReg_32RegClass;
10858   if (!TRI->isSGPRClass(RC) && !isDivergent)
10859     return TRI->getEquivalentSGPRClass(RC);
10860   else if (TRI->isSGPRClass(RC) && isDivergent)
10861     return TRI->getEquivalentVGPRClass(RC);
10862 
10863   return RC;
10864 }
10865 
10866 static bool hasCFUser(const Value *V, SmallPtrSet<const Value *, 16> &Visited,
10867                       unsigned WaveSize) {
10868   // FIXME: We asssume we never cast the mask results of a control flow
10869   // intrinsic.
10870   // Early exit if the type won't be consistent as a compile time hack.
10871   IntegerType *IT = dyn_cast<IntegerType>(V->getType());
10872   if (!IT || IT->getBitWidth() != WaveSize)
10873     return false;
10874 
10875   if (!isa<Instruction>(V))
10876     return false;
10877   if (!Visited.insert(V).second)
10878     return false;
10879   bool Result = false;
10880   for (auto U : V->users()) {
10881     if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(U)) {
10882       if (V == U->getOperand(1)) {
10883         switch (Intrinsic->getIntrinsicID()) {
10884         default:
10885           Result = false;
10886           break;
10887         case Intrinsic::amdgcn_if_break:
10888         case Intrinsic::amdgcn_if:
10889         case Intrinsic::amdgcn_else:
10890           Result = true;
10891           break;
10892         }
10893       }
10894       if (V == U->getOperand(0)) {
10895         switch (Intrinsic->getIntrinsicID()) {
10896         default:
10897           Result = false;
10898           break;
10899         case Intrinsic::amdgcn_end_cf:
10900         case Intrinsic::amdgcn_loop:
10901           Result = true;
10902           break;
10903         }
10904       }
10905     } else {
10906       Result = hasCFUser(U, Visited, WaveSize);
10907     }
10908     if (Result)
10909       break;
10910   }
10911   return Result;
10912 }
10913 
10914 bool SITargetLowering::requiresUniformRegister(MachineFunction &MF,
10915                                                const Value *V) const {
10916   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
10917     switch (Intrinsic->getIntrinsicID()) {
10918     default:
10919       return false;
10920     case Intrinsic::amdgcn_if_break:
10921       return true;
10922     }
10923   }
10924   if (const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V)) {
10925     if (const IntrinsicInst *Intrinsic =
10926             dyn_cast<IntrinsicInst>(ExtValue->getOperand(0))) {
10927       switch (Intrinsic->getIntrinsicID()) {
10928       default:
10929         return false;
10930       case Intrinsic::amdgcn_if:
10931       case Intrinsic::amdgcn_else: {
10932         ArrayRef<unsigned> Indices = ExtValue->getIndices();
10933         if (Indices.size() == 1 && Indices[0] == 1) {
10934           return true;
10935         }
10936       }
10937       }
10938     }
10939   }
10940   if (const CallInst *CI = dyn_cast<CallInst>(V)) {
10941     if (isa<InlineAsm>(CI->getCalledValue())) {
10942       const SIRegisterInfo *SIRI = Subtarget->getRegisterInfo();
10943       ImmutableCallSite CS(CI);
10944       TargetLowering::AsmOperandInfoVector TargetConstraints = ParseConstraints(
10945           MF.getDataLayout(), Subtarget->getRegisterInfo(), CS);
10946       for (auto &TC : TargetConstraints) {
10947         if (TC.Type == InlineAsm::isOutput) {
10948           ComputeConstraintToUse(TC, SDValue());
10949           unsigned AssignedReg;
10950           const TargetRegisterClass *RC;
10951           std::tie(AssignedReg, RC) = getRegForInlineAsmConstraint(
10952               SIRI, TC.ConstraintCode, TC.ConstraintVT);
10953           if (RC) {
10954             MachineRegisterInfo &MRI = MF.getRegInfo();
10955             if (AssignedReg != 0 && SIRI->isSGPRReg(MRI, AssignedReg))
10956               return true;
10957             else if (SIRI->isSGPRClass(RC))
10958               return true;
10959           }
10960         }
10961       }
10962     }
10963   }
10964   SmallPtrSet<const Value *, 16> Visited;
10965   return hasCFUser(V, Visited, Subtarget->getWavefrontSize());
10966 }
10967