1 //===-- AMDGPUISelDAGToDAG.cpp - A dag to dag inst selector for AMDGPU ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //==-----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Defines an instruction selector for the AMDGPU target.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUISelDAGToDAG.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUTargetMachine.h"
17 #include "MCTargetDesc/R600MCTargetDesc.h"
18 #include "R600RegisterInfo.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/FunctionLoweringInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/CodeGen/SelectionDAGNodes.h"
26 #include "llvm/IR/IntrinsicsAMDGPU.h"
27 #include "llvm/InitializePasses.h"
28 
29 #ifdef EXPENSIVE_CHECKS
30 #include "llvm/Analysis/LoopInfo.h"
31 #include "llvm/IR/Dominators.h"
32 #endif
33 
34 #define DEBUG_TYPE "isel"
35 
36 using namespace llvm;
37 
38 //===----------------------------------------------------------------------===//
39 // Instruction Selector Implementation
40 //===----------------------------------------------------------------------===//
41 
42 namespace {
43 
44 static SDValue stripBitcast(SDValue Val) {
45   return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val;
46 }
47 
48 // Figure out if this is really an extract of the high 16-bits of a dword.
49 static bool isExtractHiElt(SDValue In, SDValue &Out) {
50   In = stripBitcast(In);
51 
52   if (In.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
53     if (ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(In.getOperand(1))) {
54       if (!Idx->isOne())
55         return false;
56       Out = In.getOperand(0);
57       return true;
58     }
59   }
60 
61   if (In.getOpcode() != ISD::TRUNCATE)
62     return false;
63 
64   SDValue Srl = In.getOperand(0);
65   if (Srl.getOpcode() == ISD::SRL) {
66     if (ConstantSDNode *ShiftAmt = dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
67       if (ShiftAmt->getZExtValue() == 16) {
68         Out = stripBitcast(Srl.getOperand(0));
69         return true;
70       }
71     }
72   }
73 
74   return false;
75 }
76 
77 // Look through operations that obscure just looking at the low 16-bits of the
78 // same register.
79 static SDValue stripExtractLoElt(SDValue In) {
80   if (In.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
81     if (ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(In.getOperand(1))) {
82       if (Idx->isZero() && In.getValueSizeInBits() <= 32)
83         return In.getOperand(0);
84     }
85   }
86 
87   if (In.getOpcode() == ISD::TRUNCATE) {
88     SDValue Src = In.getOperand(0);
89     if (Src.getValueType().getSizeInBits() == 32)
90       return stripBitcast(Src);
91   }
92 
93   return In;
94 }
95 
96 }  // end anonymous namespace
97 
98 INITIALIZE_PASS_BEGIN(AMDGPUDAGToDAGISel, "amdgpu-isel",
99                       "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
100 INITIALIZE_PASS_DEPENDENCY(AMDGPUArgumentUsageInfo)
101 INITIALIZE_PASS_DEPENDENCY(AMDGPUPerfHintAnalysis)
102 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
103 #ifdef EXPENSIVE_CHECKS
104 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
105 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
106 #endif
107 INITIALIZE_PASS_END(AMDGPUDAGToDAGISel, "amdgpu-isel",
108                     "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
109 
110 /// This pass converts a legalized DAG into a AMDGPU-specific
111 // DAG, ready for instruction scheduling.
112 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine *TM,
113                                         CodeGenOpt::Level OptLevel) {
114   return new AMDGPUDAGToDAGISel(TM, OptLevel);
115 }
116 
117 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(
118     TargetMachine *TM /*= nullptr*/,
119     CodeGenOpt::Level OptLevel /*= CodeGenOpt::Default*/)
120     : SelectionDAGISel(*TM, OptLevel) {
121   EnableLateStructurizeCFG = AMDGPUTargetMachine::EnableLateStructurizeCFG;
122 }
123 
124 bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
125 #ifdef EXPENSIVE_CHECKS
126   DominatorTree & DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
127   LoopInfo * LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
128   for (auto &L : LI->getLoopsInPreorder()) {
129     assert(L->isLCSSAForm(DT));
130   }
131 #endif
132   Subtarget = &MF.getSubtarget<GCNSubtarget>();
133   Mode = AMDGPU::SIModeRegisterDefaults(MF.getFunction());
134   return SelectionDAGISel::runOnMachineFunction(MF);
135 }
136 
137 bool AMDGPUDAGToDAGISel::fp16SrcZerosHighBits(unsigned Opc) const {
138   // XXX - only need to list legal operations.
139   switch (Opc) {
140   case ISD::FADD:
141   case ISD::FSUB:
142   case ISD::FMUL:
143   case ISD::FDIV:
144   case ISD::FREM:
145   case ISD::FCANONICALIZE:
146   case ISD::UINT_TO_FP:
147   case ISD::SINT_TO_FP:
148   case ISD::FABS:
149     // Fabs is lowered to a bit operation, but it's an and which will clear the
150     // high bits anyway.
151   case ISD::FSQRT:
152   case ISD::FSIN:
153   case ISD::FCOS:
154   case ISD::FPOWI:
155   case ISD::FPOW:
156   case ISD::FLOG:
157   case ISD::FLOG2:
158   case ISD::FLOG10:
159   case ISD::FEXP:
160   case ISD::FEXP2:
161   case ISD::FCEIL:
162   case ISD::FTRUNC:
163   case ISD::FRINT:
164   case ISD::FNEARBYINT:
165   case ISD::FROUND:
166   case ISD::FFLOOR:
167   case ISD::FMINNUM:
168   case ISD::FMAXNUM:
169   case AMDGPUISD::FRACT:
170   case AMDGPUISD::CLAMP:
171   case AMDGPUISD::COS_HW:
172   case AMDGPUISD::SIN_HW:
173   case AMDGPUISD::FMIN3:
174   case AMDGPUISD::FMAX3:
175   case AMDGPUISD::FMED3:
176   case AMDGPUISD::FMAD_FTZ:
177   case AMDGPUISD::RCP:
178   case AMDGPUISD::RSQ:
179   case AMDGPUISD::RCP_IFLAG:
180   case AMDGPUISD::LDEXP:
181     // On gfx10, all 16-bit instructions preserve the high bits.
182     return Subtarget->getGeneration() <= AMDGPUSubtarget::GFX9;
183   case ISD::FP_ROUND:
184     // We may select fptrunc (fma/mad) to mad_mixlo, which does not zero the
185     // high bits on gfx9.
186     // TODO: If we had the source node we could see if the source was fma/mad
187     return Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS;
188   case ISD::FMA:
189   case ISD::FMAD:
190   case AMDGPUISD::DIV_FIXUP:
191     return Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS;
192   default:
193     // fcopysign, select and others may be lowered to 32-bit bit operations
194     // which don't zero the high bits.
195     return false;
196   }
197 }
198 
199 void AMDGPUDAGToDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
200   AU.addRequired<AMDGPUArgumentUsageInfo>();
201   AU.addRequired<LegacyDivergenceAnalysis>();
202 #ifdef EXPENSIVE_CHECKS
203   AU.addRequired<DominatorTreeWrapperPass>();
204   AU.addRequired<LoopInfoWrapperPass>();
205 #endif
206   SelectionDAGISel::getAnalysisUsage(AU);
207 }
208 
209 bool AMDGPUDAGToDAGISel::matchLoadD16FromBuildVector(SDNode *N) const {
210   assert(Subtarget->d16PreservesUnusedBits());
211   MVT VT = N->getValueType(0).getSimpleVT();
212   if (VT != MVT::v2i16 && VT != MVT::v2f16)
213     return false;
214 
215   SDValue Lo = N->getOperand(0);
216   SDValue Hi = N->getOperand(1);
217 
218   LoadSDNode *LdHi = dyn_cast<LoadSDNode>(stripBitcast(Hi));
219 
220   // build_vector lo, (load ptr) -> load_d16_hi ptr, lo
221   // build_vector lo, (zextload ptr from i8) -> load_d16_hi_u8 ptr, lo
222   // build_vector lo, (sextload ptr from i8) -> load_d16_hi_i8 ptr, lo
223 
224   // Need to check for possible indirect dependencies on the other half of the
225   // vector to avoid introducing a cycle.
226   if (LdHi && Hi.hasOneUse() && !LdHi->isPredecessorOf(Lo.getNode())) {
227     SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
228 
229     SDValue TiedIn = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Lo);
230     SDValue Ops[] = {
231       LdHi->getChain(), LdHi->getBasePtr(), TiedIn
232     };
233 
234     unsigned LoadOp = AMDGPUISD::LOAD_D16_HI;
235     if (LdHi->getMemoryVT() == MVT::i8) {
236       LoadOp = LdHi->getExtensionType() == ISD::SEXTLOAD ?
237         AMDGPUISD::LOAD_D16_HI_I8 : AMDGPUISD::LOAD_D16_HI_U8;
238     } else {
239       assert(LdHi->getMemoryVT() == MVT::i16);
240     }
241 
242     SDValue NewLoadHi =
243       CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdHi), VTList,
244                                   Ops, LdHi->getMemoryVT(),
245                                   LdHi->getMemOperand());
246 
247     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadHi);
248     CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdHi, 1), NewLoadHi.getValue(1));
249     return true;
250   }
251 
252   // build_vector (load ptr), hi -> load_d16_lo ptr, hi
253   // build_vector (zextload ptr from i8), hi -> load_d16_lo_u8 ptr, hi
254   // build_vector (sextload ptr from i8), hi -> load_d16_lo_i8 ptr, hi
255   LoadSDNode *LdLo = dyn_cast<LoadSDNode>(stripBitcast(Lo));
256   if (LdLo && Lo.hasOneUse()) {
257     SDValue TiedIn = getHi16Elt(Hi);
258     if (!TiedIn || LdLo->isPredecessorOf(TiedIn.getNode()))
259       return false;
260 
261     SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
262     unsigned LoadOp = AMDGPUISD::LOAD_D16_LO;
263     if (LdLo->getMemoryVT() == MVT::i8) {
264       LoadOp = LdLo->getExtensionType() == ISD::SEXTLOAD ?
265         AMDGPUISD::LOAD_D16_LO_I8 : AMDGPUISD::LOAD_D16_LO_U8;
266     } else {
267       assert(LdLo->getMemoryVT() == MVT::i16);
268     }
269 
270     TiedIn = CurDAG->getNode(ISD::BITCAST, SDLoc(N), VT, TiedIn);
271 
272     SDValue Ops[] = {
273       LdLo->getChain(), LdLo->getBasePtr(), TiedIn
274     };
275 
276     SDValue NewLoadLo =
277       CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdLo), VTList,
278                                   Ops, LdLo->getMemoryVT(),
279                                   LdLo->getMemOperand());
280 
281     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadLo);
282     CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdLo, 1), NewLoadLo.getValue(1));
283     return true;
284   }
285 
286   return false;
287 }
288 
289 void AMDGPUDAGToDAGISel::PreprocessISelDAG() {
290   if (!Subtarget->d16PreservesUnusedBits())
291     return;
292 
293   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
294 
295   bool MadeChange = false;
296   while (Position != CurDAG->allnodes_begin()) {
297     SDNode *N = &*--Position;
298     if (N->use_empty())
299       continue;
300 
301     switch (N->getOpcode()) {
302     case ISD::BUILD_VECTOR:
303       MadeChange |= matchLoadD16FromBuildVector(N);
304       break;
305     default:
306       break;
307     }
308   }
309 
310   if (MadeChange) {
311     CurDAG->RemoveDeadNodes();
312     LLVM_DEBUG(dbgs() << "After PreProcess:\n";
313                CurDAG->dump(););
314   }
315 }
316 
317 bool AMDGPUDAGToDAGISel::isNoNanSrc(SDValue N) const {
318   if (TM.Options.NoNaNsFPMath)
319     return true;
320 
321   // TODO: Move into isKnownNeverNaN
322   if (N->getFlags().hasNoNaNs())
323     return true;
324 
325   return CurDAG->isKnownNeverNaN(N);
326 }
327 
328 bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N,
329                                            bool Negated) const {
330   if (N->isUndef())
331     return true;
332 
333   const SIInstrInfo *TII = Subtarget->getInstrInfo();
334   if (Negated) {
335     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
336       return TII->isInlineConstant(-C->getAPIntValue());
337 
338     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
339       return TII->isInlineConstant(-C->getValueAPF().bitcastToAPInt());
340 
341   } else {
342     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
343       return TII->isInlineConstant(C->getAPIntValue());
344 
345     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
346       return TII->isInlineConstant(C->getValueAPF().bitcastToAPInt());
347   }
348 
349   return false;
350 }
351 
352 /// Determine the register class for \p OpNo
353 /// \returns The register class of the virtual register that will be used for
354 /// the given operand number \OpNo or NULL if the register class cannot be
355 /// determined.
356 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
357                                                           unsigned OpNo) const {
358   if (!N->isMachineOpcode()) {
359     if (N->getOpcode() == ISD::CopyToReg) {
360       Register Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
361       if (Reg.isVirtual()) {
362         MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo();
363         return MRI.getRegClass(Reg);
364       }
365 
366       const SIRegisterInfo *TRI
367         = static_cast<const GCNSubtarget *>(Subtarget)->getRegisterInfo();
368       return TRI->getPhysRegClass(Reg);
369     }
370 
371     return nullptr;
372   }
373 
374   switch (N->getMachineOpcode()) {
375   default: {
376     const MCInstrDesc &Desc =
377         Subtarget->getInstrInfo()->get(N->getMachineOpcode());
378     unsigned OpIdx = Desc.getNumDefs() + OpNo;
379     if (OpIdx >= Desc.getNumOperands())
380       return nullptr;
381     int RegClass = Desc.OpInfo[OpIdx].RegClass;
382     if (RegClass == -1)
383       return nullptr;
384 
385     return Subtarget->getRegisterInfo()->getRegClass(RegClass);
386   }
387   case AMDGPU::REG_SEQUENCE: {
388     unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
389     const TargetRegisterClass *SuperRC =
390         Subtarget->getRegisterInfo()->getRegClass(RCID);
391 
392     SDValue SubRegOp = N->getOperand(OpNo + 1);
393     unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
394     return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
395                                                               SubRegIdx);
396   }
397   }
398 }
399 
400 SDNode *AMDGPUDAGToDAGISel::glueCopyToOp(SDNode *N, SDValue NewChain,
401                                          SDValue Glue) const {
402   SmallVector <SDValue, 8> Ops;
403   Ops.push_back(NewChain); // Replace the chain.
404   for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
405     Ops.push_back(N->getOperand(i));
406 
407   Ops.push_back(Glue);
408   return CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops);
409 }
410 
411 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N, SDValue Val) const {
412   const SITargetLowering& Lowering =
413     *static_cast<const SITargetLowering*>(getTargetLowering());
414 
415   assert(N->getOperand(0).getValueType() == MVT::Other && "Expected chain");
416 
417   SDValue M0 = Lowering.copyToM0(*CurDAG, N->getOperand(0), SDLoc(N), Val);
418   return glueCopyToOp(N, M0, M0.getValue(1));
419 }
420 
421 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0LDSInit(SDNode *N) const {
422   unsigned AS = cast<MemSDNode>(N)->getAddressSpace();
423   if (AS == AMDGPUAS::LOCAL_ADDRESS) {
424     if (Subtarget->ldsRequiresM0Init())
425       return glueCopyToM0(N, CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32));
426   } else if (AS == AMDGPUAS::REGION_ADDRESS) {
427     MachineFunction &MF = CurDAG->getMachineFunction();
428     unsigned Value = MF.getInfo<SIMachineFunctionInfo>()->getGDSSize();
429     return
430         glueCopyToM0(N, CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i32));
431   }
432   return N;
433 }
434 
435 MachineSDNode *AMDGPUDAGToDAGISel::buildSMovImm64(SDLoc &DL, uint64_t Imm,
436                                                   EVT VT) const {
437   SDNode *Lo = CurDAG->getMachineNode(
438       AMDGPU::S_MOV_B32, DL, MVT::i32,
439       CurDAG->getTargetConstant(Imm & 0xFFFFFFFF, DL, MVT::i32));
440   SDNode *Hi =
441       CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
442                              CurDAG->getTargetConstant(Imm >> 32, DL, MVT::i32));
443   const SDValue Ops[] = {
444       CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
445       SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
446       SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)};
447 
448   return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, VT, Ops);
449 }
450 
451 void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) {
452   EVT VT = N->getValueType(0);
453   unsigned NumVectorElts = VT.getVectorNumElements();
454   EVT EltVT = VT.getVectorElementType();
455   SDLoc DL(N);
456   SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
457 
458   if (NumVectorElts == 1) {
459     CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0),
460                          RegClass);
461     return;
462   }
463 
464   assert(NumVectorElts <= 32 && "Vectors with more than 32 elements not "
465                                   "supported yet");
466   // 32 = Max Num Vector Elements
467   // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
468   // 1 = Vector Register Class
469   SmallVector<SDValue, 32 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
470 
471   bool IsGCN = CurDAG->getSubtarget().getTargetTriple().getArch() ==
472                Triple::amdgcn;
473   RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
474   bool IsRegSeq = true;
475   unsigned NOps = N->getNumOperands();
476   for (unsigned i = 0; i < NOps; i++) {
477     // XXX: Why is this here?
478     if (isa<RegisterSDNode>(N->getOperand(i))) {
479       IsRegSeq = false;
480       break;
481     }
482     unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i)
483                          : R600RegisterInfo::getSubRegFromChannel(i);
484     RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
485     RegSeqArgs[1 + (2 * i) + 1] = CurDAG->getTargetConstant(Sub, DL, MVT::i32);
486   }
487   if (NOps != NumVectorElts) {
488     // Fill in the missing undef elements if this was a scalar_to_vector.
489     assert(N->getOpcode() == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
490     MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
491                                                    DL, EltVT);
492     for (unsigned i = NOps; i < NumVectorElts; ++i) {
493       unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i)
494                            : R600RegisterInfo::getSubRegFromChannel(i);
495       RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
496       RegSeqArgs[1 + (2 * i) + 1] =
497           CurDAG->getTargetConstant(Sub, DL, MVT::i32);
498     }
499   }
500 
501   if (!IsRegSeq)
502     SelectCode(N);
503   CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs);
504 }
505 
506 void AMDGPUDAGToDAGISel::Select(SDNode *N) {
507   unsigned int Opc = N->getOpcode();
508   if (N->isMachineOpcode()) {
509     N->setNodeId(-1);
510     return;   // Already selected.
511   }
512 
513   // isa<MemSDNode> almost works but is slightly too permissive for some DS
514   // intrinsics.
515   if (Opc == ISD::LOAD || Opc == ISD::STORE || isa<AtomicSDNode>(N) ||
516       (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC ||
517        Opc == ISD::ATOMIC_LOAD_FADD ||
518        Opc == AMDGPUISD::ATOMIC_LOAD_FMIN ||
519        Opc == AMDGPUISD::ATOMIC_LOAD_FMAX)) {
520     N = glueCopyToM0LDSInit(N);
521     SelectCode(N);
522     return;
523   }
524 
525   switch (Opc) {
526   default:
527     break;
528   // We are selecting i64 ADD here instead of custom lower it during
529   // DAG legalization, so we can fold some i64 ADDs used for address
530   // calculation into the LOAD and STORE instructions.
531   case ISD::ADDC:
532   case ISD::ADDE:
533   case ISD::SUBC:
534   case ISD::SUBE: {
535     if (N->getValueType(0) != MVT::i64)
536       break;
537 
538     SelectADD_SUB_I64(N);
539     return;
540   }
541   case ISD::ADDCARRY:
542   case ISD::SUBCARRY:
543     if (N->getValueType(0) != MVT::i32)
544       break;
545 
546     SelectAddcSubb(N);
547     return;
548   case ISD::UADDO:
549   case ISD::USUBO: {
550     SelectUADDO_USUBO(N);
551     return;
552   }
553   case AMDGPUISD::FMUL_W_CHAIN: {
554     SelectFMUL_W_CHAIN(N);
555     return;
556   }
557   case AMDGPUISD::FMA_W_CHAIN: {
558     SelectFMA_W_CHAIN(N);
559     return;
560   }
561 
562   case ISD::SCALAR_TO_VECTOR:
563   case ISD::BUILD_VECTOR: {
564     EVT VT = N->getValueType(0);
565     unsigned NumVectorElts = VT.getVectorNumElements();
566     if (VT.getScalarSizeInBits() == 16) {
567       if (Opc == ISD::BUILD_VECTOR && NumVectorElts == 2) {
568         if (SDNode *Packed = packConstantV2I16(N, *CurDAG)) {
569           ReplaceNode(N, Packed);
570           return;
571         }
572       }
573 
574       break;
575     }
576 
577     assert(VT.getVectorElementType().bitsEq(MVT::i32));
578     unsigned RegClassID =
579         SIRegisterInfo::getSGPRClassForBitWidth(NumVectorElts * 32)->getID();
580     SelectBuildVector(N, RegClassID);
581     return;
582   }
583   case ISD::BUILD_PAIR: {
584     SDValue RC, SubReg0, SubReg1;
585     SDLoc DL(N);
586     if (N->getValueType(0) == MVT::i128) {
587       RC = CurDAG->getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32);
588       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32);
589       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32);
590     } else if (N->getValueType(0) == MVT::i64) {
591       RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32);
592       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
593       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
594     } else {
595       llvm_unreachable("Unhandled value type for BUILD_PAIR");
596     }
597     const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
598                             N->getOperand(1), SubReg1 };
599     ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
600                                           N->getValueType(0), Ops));
601     return;
602   }
603 
604   case ISD::Constant:
605   case ISD::ConstantFP: {
606     if (N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
607       break;
608 
609     uint64_t Imm;
610     if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
611       Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
612     else {
613       ConstantSDNode *C = cast<ConstantSDNode>(N);
614       Imm = C->getZExtValue();
615     }
616 
617     SDLoc DL(N);
618     ReplaceNode(N, buildSMovImm64(DL, Imm, N->getValueType(0)));
619     return;
620   }
621   case AMDGPUISD::BFE_I32:
622   case AMDGPUISD::BFE_U32: {
623     // There is a scalar version available, but unlike the vector version which
624     // has a separate operand for the offset and width, the scalar version packs
625     // the width and offset into a single operand. Try to move to the scalar
626     // version if the offsets are constant, so that we can try to keep extended
627     // loads of kernel arguments in SGPRs.
628 
629     // TODO: Technically we could try to pattern match scalar bitshifts of
630     // dynamic values, but it's probably not useful.
631     ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
632     if (!Offset)
633       break;
634 
635     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
636     if (!Width)
637       break;
638 
639     bool Signed = Opc == AMDGPUISD::BFE_I32;
640 
641     uint32_t OffsetVal = Offset->getZExtValue();
642     uint32_t WidthVal = Width->getZExtValue();
643 
644     ReplaceNode(N, getBFE32(Signed, SDLoc(N), N->getOperand(0), OffsetVal,
645                             WidthVal));
646     return;
647   }
648   case AMDGPUISD::DIV_SCALE: {
649     SelectDIV_SCALE(N);
650     return;
651   }
652   case AMDGPUISD::MAD_I64_I32:
653   case AMDGPUISD::MAD_U64_U32: {
654     SelectMAD_64_32(N);
655     return;
656   }
657   case ISD::SMUL_LOHI:
658   case ISD::UMUL_LOHI:
659     return SelectMUL_LOHI(N);
660   case ISD::CopyToReg: {
661     const SITargetLowering& Lowering =
662       *static_cast<const SITargetLowering*>(getTargetLowering());
663     N = Lowering.legalizeTargetIndependentNode(N, *CurDAG);
664     break;
665   }
666   case ISD::AND:
667   case ISD::SRL:
668   case ISD::SRA:
669   case ISD::SIGN_EXTEND_INREG:
670     if (N->getValueType(0) != MVT::i32)
671       break;
672 
673     SelectS_BFE(N);
674     return;
675   case ISD::BRCOND:
676     SelectBRCOND(N);
677     return;
678   case ISD::FMAD:
679   case ISD::FMA:
680     SelectFMAD_FMA(N);
681     return;
682   case AMDGPUISD::CVT_PKRTZ_F16_F32:
683   case AMDGPUISD::CVT_PKNORM_I16_F32:
684   case AMDGPUISD::CVT_PKNORM_U16_F32:
685   case AMDGPUISD::CVT_PK_U16_U32:
686   case AMDGPUISD::CVT_PK_I16_I32: {
687     // Hack around using a legal type if f16 is illegal.
688     if (N->getValueType(0) == MVT::i32) {
689       MVT NewVT = Opc == AMDGPUISD::CVT_PKRTZ_F16_F32 ? MVT::v2f16 : MVT::v2i16;
690       N = CurDAG->MorphNodeTo(N, N->getOpcode(), CurDAG->getVTList(NewVT),
691                               { N->getOperand(0), N->getOperand(1) });
692       SelectCode(N);
693       return;
694     }
695 
696     break;
697   }
698   case ISD::INTRINSIC_W_CHAIN: {
699     SelectINTRINSIC_W_CHAIN(N);
700     return;
701   }
702   case ISD::INTRINSIC_WO_CHAIN: {
703     SelectINTRINSIC_WO_CHAIN(N);
704     return;
705   }
706   case ISD::INTRINSIC_VOID: {
707     SelectINTRINSIC_VOID(N);
708     return;
709   }
710   }
711 
712   SelectCode(N);
713 }
714 
715 bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const {
716   const BasicBlock *BB = FuncInfo->MBB->getBasicBlock();
717   const Instruction *Term = BB->getTerminator();
718   return Term->getMetadata("amdgpu.uniform") ||
719          Term->getMetadata("structurizecfg.uniform");
720 }
721 
722 bool AMDGPUDAGToDAGISel::isUnneededShiftMask(const SDNode *N,
723                                              unsigned ShAmtBits) const {
724   assert(N->getOpcode() == ISD::AND);
725 
726   const APInt &RHS = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
727   if (RHS.countTrailingOnes() >= ShAmtBits)
728     return true;
729 
730   const APInt &LHSKnownZeros = CurDAG->computeKnownBits(N->getOperand(0)).Zero;
731   return (LHSKnownZeros | RHS).countTrailingOnes() >= ShAmtBits;
732 }
733 
734 static bool getBaseWithOffsetUsingSplitOR(SelectionDAG &DAG, SDValue Addr,
735                                           SDValue &N0, SDValue &N1) {
736   if (Addr.getValueType() == MVT::i64 && Addr.getOpcode() == ISD::BITCAST &&
737       Addr.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
738     // As we split 64-bit `or` earlier, it's complicated pattern to match, i.e.
739     // (i64 (bitcast (v2i32 (build_vector
740     //                        (or (extract_vector_elt V, 0), OFFSET),
741     //                        (extract_vector_elt V, 1)))))
742     SDValue Lo = Addr.getOperand(0).getOperand(0);
743     if (Lo.getOpcode() == ISD::OR && DAG.isBaseWithConstantOffset(Lo)) {
744       SDValue BaseLo = Lo.getOperand(0);
745       SDValue BaseHi = Addr.getOperand(0).getOperand(1);
746       // Check that split base (Lo and Hi) are extracted from the same one.
747       if (BaseLo.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
748           BaseHi.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
749           BaseLo.getOperand(0) == BaseHi.getOperand(0) &&
750           // Lo is statically extracted from index 0.
751           isa<ConstantSDNode>(BaseLo.getOperand(1)) &&
752           BaseLo.getConstantOperandVal(1) == 0 &&
753           // Hi is statically extracted from index 0.
754           isa<ConstantSDNode>(BaseHi.getOperand(1)) &&
755           BaseHi.getConstantOperandVal(1) == 1) {
756         N0 = BaseLo.getOperand(0).getOperand(0);
757         N1 = Lo.getOperand(1);
758         return true;
759       }
760     }
761   }
762   return false;
763 }
764 
765 bool AMDGPUDAGToDAGISel::isBaseWithConstantOffset64(SDValue Addr, SDValue &LHS,
766                                                     SDValue &RHS) const {
767   if (CurDAG->isBaseWithConstantOffset(Addr)) {
768     LHS = Addr.getOperand(0);
769     RHS = Addr.getOperand(1);
770     return true;
771   }
772 
773   if (getBaseWithOffsetUsingSplitOR(*CurDAG, Addr, LHS, RHS)) {
774     assert(LHS && RHS && isa<ConstantSDNode>(RHS));
775     return true;
776   }
777 
778   return false;
779 }
780 
781 StringRef AMDGPUDAGToDAGISel::getPassName() const {
782   return "AMDGPU DAG->DAG Pattern Instruction Selection";
783 }
784 
785 //===----------------------------------------------------------------------===//
786 // Complex Patterns
787 //===----------------------------------------------------------------------===//
788 
789 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
790                                             SDValue &Offset) {
791   return false;
792 }
793 
794 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
795                                             SDValue &Offset) {
796   ConstantSDNode *C;
797   SDLoc DL(Addr);
798 
799   if ((C = dyn_cast<ConstantSDNode>(Addr))) {
800     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
801     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
802   } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
803              (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
804     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
805     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
806   } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
807             (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
808     Base = Addr.getOperand(0);
809     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
810   } else {
811     Base = Addr;
812     Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
813   }
814 
815   return true;
816 }
817 
818 SDValue AMDGPUDAGToDAGISel::getMaterializedScalarImm32(int64_t Val,
819                                                        const SDLoc &DL) const {
820   SDNode *Mov = CurDAG->getMachineNode(
821     AMDGPU::S_MOV_B32, DL, MVT::i32,
822     CurDAG->getTargetConstant(Val, DL, MVT::i32));
823   return SDValue(Mov, 0);
824 }
825 
826 // FIXME: Should only handle addcarry/subcarry
827 void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
828   SDLoc DL(N);
829   SDValue LHS = N->getOperand(0);
830   SDValue RHS = N->getOperand(1);
831 
832   unsigned Opcode = N->getOpcode();
833   bool ConsumeCarry = (Opcode == ISD::ADDE || Opcode == ISD::SUBE);
834   bool ProduceCarry =
835       ConsumeCarry || Opcode == ISD::ADDC || Opcode == ISD::SUBC;
836   bool IsAdd = Opcode == ISD::ADD || Opcode == ISD::ADDC || Opcode == ISD::ADDE;
837 
838   SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
839   SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
840 
841   SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
842                                        DL, MVT::i32, LHS, Sub0);
843   SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
844                                        DL, MVT::i32, LHS, Sub1);
845 
846   SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
847                                        DL, MVT::i32, RHS, Sub0);
848   SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
849                                        DL, MVT::i32, RHS, Sub1);
850 
851   SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
852 
853   static const unsigned OpcMap[2][2][2] = {
854       {{AMDGPU::S_SUB_U32, AMDGPU::S_ADD_U32},
855        {AMDGPU::V_SUB_CO_U32_e32, AMDGPU::V_ADD_CO_U32_e32}},
856       {{AMDGPU::S_SUBB_U32, AMDGPU::S_ADDC_U32},
857        {AMDGPU::V_SUBB_U32_e32, AMDGPU::V_ADDC_U32_e32}}};
858 
859   unsigned Opc = OpcMap[0][N->isDivergent()][IsAdd];
860   unsigned CarryOpc = OpcMap[1][N->isDivergent()][IsAdd];
861 
862   SDNode *AddLo;
863   if (!ConsumeCarry) {
864     SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
865     AddLo = CurDAG->getMachineNode(Opc, DL, VTList, Args);
866   } else {
867     SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0), N->getOperand(2) };
868     AddLo = CurDAG->getMachineNode(CarryOpc, DL, VTList, Args);
869   }
870   SDValue AddHiArgs[] = {
871     SDValue(Hi0, 0),
872     SDValue(Hi1, 0),
873     SDValue(AddLo, 1)
874   };
875   SDNode *AddHi = CurDAG->getMachineNode(CarryOpc, DL, VTList, AddHiArgs);
876 
877   SDValue RegSequenceArgs[] = {
878     CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
879     SDValue(AddLo,0),
880     Sub0,
881     SDValue(AddHi,0),
882     Sub1,
883   };
884   SDNode *RegSequence = CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
885                                                MVT::i64, RegSequenceArgs);
886 
887   if (ProduceCarry) {
888     // Replace the carry-use
889     ReplaceUses(SDValue(N, 1), SDValue(AddHi, 1));
890   }
891 
892   // Replace the remaining uses.
893   ReplaceNode(N, RegSequence);
894 }
895 
896 void AMDGPUDAGToDAGISel::SelectAddcSubb(SDNode *N) {
897   SDLoc DL(N);
898   SDValue LHS = N->getOperand(0);
899   SDValue RHS = N->getOperand(1);
900   SDValue CI = N->getOperand(2);
901 
902   if (N->isDivergent()) {
903     unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::V_ADDC_U32_e64
904                                                    : AMDGPU::V_SUBB_U32_e64;
905     CurDAG->SelectNodeTo(
906         N, Opc, N->getVTList(),
907         {LHS, RHS, CI,
908          CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
909   } else {
910     unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::S_ADD_CO_PSEUDO
911                                                    : AMDGPU::S_SUB_CO_PSEUDO;
912     CurDAG->SelectNodeTo(N, Opc, N->getVTList(), {LHS, RHS, CI});
913   }
914 }
915 
916 void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) {
917   // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
918   // carry out despite the _i32 name. These were renamed in VI to _U32.
919   // FIXME: We should probably rename the opcodes here.
920   bool IsAdd = N->getOpcode() == ISD::UADDO;
921   bool IsVALU = N->isDivergent();
922 
923   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); UI != E;
924        ++UI)
925     if (UI.getUse().getResNo() == 1) {
926       if ((IsAdd && (UI->getOpcode() != ISD::ADDCARRY)) ||
927           (!IsAdd && (UI->getOpcode() != ISD::SUBCARRY))) {
928         IsVALU = true;
929         break;
930       }
931     }
932 
933   if (IsVALU) {
934     unsigned Opc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
935 
936     CurDAG->SelectNodeTo(
937         N, Opc, N->getVTList(),
938         {N->getOperand(0), N->getOperand(1),
939          CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
940   } else {
941     unsigned Opc = N->getOpcode() == ISD::UADDO ? AMDGPU::S_UADDO_PSEUDO
942                                                 : AMDGPU::S_USUBO_PSEUDO;
943 
944     CurDAG->SelectNodeTo(N, Opc, N->getVTList(),
945                          {N->getOperand(0), N->getOperand(1)});
946   }
947 }
948 
949 void AMDGPUDAGToDAGISel::SelectFMA_W_CHAIN(SDNode *N) {
950   SDLoc SL(N);
951   //  src0_modifiers, src0,  src1_modifiers, src1, src2_modifiers, src2, clamp, omod
952   SDValue Ops[10];
953 
954   SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[6], Ops[7]);
955   SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
956   SelectVOP3Mods(N->getOperand(3), Ops[5], Ops[4]);
957   Ops[8] = N->getOperand(0);
958   Ops[9] = N->getOperand(4);
959 
960   // If there are no source modifiers, prefer fmac over fma because it can use
961   // the smaller VOP2 encoding.
962   bool UseFMAC = Subtarget->hasDLInsts() &&
963                  cast<ConstantSDNode>(Ops[0])->isZero() &&
964                  cast<ConstantSDNode>(Ops[2])->isZero() &&
965                  cast<ConstantSDNode>(Ops[4])->isZero();
966   unsigned Opcode = UseFMAC ? AMDGPU::V_FMAC_F32_e64 : AMDGPU::V_FMA_F32_e64;
967   CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), Ops);
968 }
969 
970 void AMDGPUDAGToDAGISel::SelectFMUL_W_CHAIN(SDNode *N) {
971   SDLoc SL(N);
972   //    src0_modifiers, src0,  src1_modifiers, src1, clamp, omod
973   SDValue Ops[8];
974 
975   SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[4], Ops[5]);
976   SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
977   Ops[6] = N->getOperand(0);
978   Ops[7] = N->getOperand(3);
979 
980   CurDAG->SelectNodeTo(N, AMDGPU::V_MUL_F32_e64, N->getVTList(), Ops);
981 }
982 
983 // We need to handle this here because tablegen doesn't support matching
984 // instructions with multiple outputs.
985 void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
986   SDLoc SL(N);
987   EVT VT = N->getValueType(0);
988 
989   assert(VT == MVT::f32 || VT == MVT::f64);
990 
991   unsigned Opc
992     = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64_e64 : AMDGPU::V_DIV_SCALE_F32_e64;
993 
994   // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp,
995   // omod
996   SDValue Ops[8];
997   SelectVOP3BMods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
998   SelectVOP3BMods(N->getOperand(1), Ops[3], Ops[2]);
999   SelectVOP3BMods(N->getOperand(2), Ops[5], Ops[4]);
1000   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1001 }
1002 
1003 // We need to handle this here because tablegen doesn't support matching
1004 // instructions with multiple outputs.
1005 void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) {
1006   SDLoc SL(N);
1007   bool Signed = N->getOpcode() == AMDGPUISD::MAD_I64_I32;
1008   unsigned Opc;
1009   if (Subtarget->getGeneration() == AMDGPUSubtarget::GFX11)
1010     Opc = Signed ? AMDGPU::V_MAD_I64_I32_gfx11_e64
1011                  : AMDGPU::V_MAD_U64_U32_gfx11_e64;
1012   else
1013     Opc = Signed ? AMDGPU::V_MAD_I64_I32_e64 : AMDGPU::V_MAD_U64_U32_e64;
1014 
1015   SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1);
1016   SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
1017                     Clamp };
1018   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1019 }
1020 
1021 // We need to handle this here because tablegen doesn't support matching
1022 // instructions with multiple outputs.
1023 void AMDGPUDAGToDAGISel::SelectMUL_LOHI(SDNode *N) {
1024   SDLoc SL(N);
1025   bool Signed = N->getOpcode() == ISD::SMUL_LOHI;
1026   unsigned Opc;
1027   if (Subtarget->getGeneration() == AMDGPUSubtarget::GFX11)
1028     Opc = Signed ? AMDGPU::V_MAD_I64_I32_gfx11_e64
1029                  : AMDGPU::V_MAD_U64_U32_gfx11_e64;
1030   else
1031     Opc = Signed ? AMDGPU::V_MAD_I64_I32_e64 : AMDGPU::V_MAD_U64_U32_e64;
1032 
1033   SDValue Zero = CurDAG->getTargetConstant(0, SL, MVT::i64);
1034   SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1);
1035   SDValue Ops[] = {N->getOperand(0), N->getOperand(1), Zero, Clamp};
1036   SDNode *Mad = CurDAG->getMachineNode(Opc, SL, N->getVTList(), Ops);
1037   if (!SDValue(N, 0).use_empty()) {
1038     SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32);
1039     SDNode *Lo = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SL,
1040                                         MVT::i32, SDValue(Mad, 0), Sub0);
1041     ReplaceUses(SDValue(N, 0), SDValue(Lo, 0));
1042   }
1043   if (!SDValue(N, 1).use_empty()) {
1044     SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32);
1045     SDNode *Hi = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SL,
1046                                         MVT::i32, SDValue(Mad, 0), Sub1);
1047     ReplaceUses(SDValue(N, 1), SDValue(Hi, 0));
1048   }
1049   CurDAG->RemoveDeadNode(N);
1050 }
1051 
1052 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(SDValue Base, unsigned Offset) const {
1053   if (!isUInt<16>(Offset))
1054     return false;
1055 
1056   if (!Base || Subtarget->hasUsableDSOffset() ||
1057       Subtarget->unsafeDSOffsetFoldingEnabled())
1058     return true;
1059 
1060   // On Southern Islands instruction with a negative base value and an offset
1061   // don't seem to work.
1062   return CurDAG->SignBitIsZero(Base);
1063 }
1064 
1065 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
1066                                               SDValue &Offset) const {
1067   SDLoc DL(Addr);
1068   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1069     SDValue N0 = Addr.getOperand(0);
1070     SDValue N1 = Addr.getOperand(1);
1071     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1072     if (isDSOffsetLegal(N0, C1->getSExtValue())) {
1073       // (add n0, c0)
1074       Base = N0;
1075       Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1076       return true;
1077     }
1078   } else if (Addr.getOpcode() == ISD::SUB) {
1079     // sub C, x -> add (sub 0, x), C
1080     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1081       int64_t ByteOffset = C->getSExtValue();
1082       if (isDSOffsetLegal(SDValue(), ByteOffset)) {
1083         SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1084 
1085         // XXX - This is kind of hacky. Create a dummy sub node so we can check
1086         // the known bits in isDSOffsetLegal. We need to emit the selected node
1087         // here, so this is thrown away.
1088         SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
1089                                       Zero, Addr.getOperand(1));
1090 
1091         if (isDSOffsetLegal(Sub, ByteOffset)) {
1092           SmallVector<SDValue, 3> Opnds;
1093           Opnds.push_back(Zero);
1094           Opnds.push_back(Addr.getOperand(1));
1095 
1096           // FIXME: Select to VOP3 version for with-carry.
1097           unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32;
1098           if (Subtarget->hasAddNoCarry()) {
1099             SubOp = AMDGPU::V_SUB_U32_e64;
1100             Opnds.push_back(
1101                 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
1102           }
1103 
1104           MachineSDNode *MachineSub =
1105               CurDAG->getMachineNode(SubOp, DL, MVT::i32, Opnds);
1106 
1107           Base = SDValue(MachineSub, 0);
1108           Offset = CurDAG->getTargetConstant(ByteOffset, DL, MVT::i16);
1109           return true;
1110         }
1111       }
1112     }
1113   } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1114     // If we have a constant address, prefer to put the constant into the
1115     // offset. This can save moves to load the constant address since multiple
1116     // operations can share the zero base address register, and enables merging
1117     // into read2 / write2 instructions.
1118 
1119     SDLoc DL(Addr);
1120 
1121     if (isDSOffsetLegal(SDValue(), CAddr->getZExtValue())) {
1122       SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1123       MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
1124                                  DL, MVT::i32, Zero);
1125       Base = SDValue(MovZero, 0);
1126       Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1127       return true;
1128     }
1129   }
1130 
1131   // default case
1132   Base = Addr;
1133   Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i16);
1134   return true;
1135 }
1136 
1137 bool AMDGPUDAGToDAGISel::isDSOffset2Legal(SDValue Base, unsigned Offset0,
1138                                           unsigned Offset1,
1139                                           unsigned Size) const {
1140   if (Offset0 % Size != 0 || Offset1 % Size != 0)
1141     return false;
1142   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
1143     return false;
1144 
1145   if (!Base || Subtarget->hasUsableDSOffset() ||
1146       Subtarget->unsafeDSOffsetFoldingEnabled())
1147     return true;
1148 
1149   // On Southern Islands instruction with a negative base value and an offset
1150   // don't seem to work.
1151   return CurDAG->SignBitIsZero(Base);
1152 }
1153 
1154 // TODO: If offset is too big, put low 16-bit into offset.
1155 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
1156                                                    SDValue &Offset0,
1157                                                    SDValue &Offset1) const {
1158   return SelectDSReadWrite2(Addr, Base, Offset0, Offset1, 4);
1159 }
1160 
1161 bool AMDGPUDAGToDAGISel::SelectDS128Bit8ByteAligned(SDValue Addr, SDValue &Base,
1162                                                     SDValue &Offset0,
1163                                                     SDValue &Offset1) const {
1164   return SelectDSReadWrite2(Addr, Base, Offset0, Offset1, 8);
1165 }
1166 
1167 bool AMDGPUDAGToDAGISel::SelectDSReadWrite2(SDValue Addr, SDValue &Base,
1168                                             SDValue &Offset0, SDValue &Offset1,
1169                                             unsigned Size) const {
1170   SDLoc DL(Addr);
1171 
1172   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1173     SDValue N0 = Addr.getOperand(0);
1174     SDValue N1 = Addr.getOperand(1);
1175     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1176     unsigned OffsetValue0 = C1->getZExtValue();
1177     unsigned OffsetValue1 = OffsetValue0 + Size;
1178 
1179     // (add n0, c0)
1180     if (isDSOffset2Legal(N0, OffsetValue0, OffsetValue1, Size)) {
1181       Base = N0;
1182       Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1183       Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1184       return true;
1185     }
1186   } else if (Addr.getOpcode() == ISD::SUB) {
1187     // sub C, x -> add (sub 0, x), C
1188     if (const ConstantSDNode *C =
1189             dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1190       unsigned OffsetValue0 = C->getZExtValue();
1191       unsigned OffsetValue1 = OffsetValue0 + Size;
1192 
1193       if (isDSOffset2Legal(SDValue(), OffsetValue0, OffsetValue1, Size)) {
1194         SDLoc DL(Addr);
1195         SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1196 
1197         // XXX - This is kind of hacky. Create a dummy sub node so we can check
1198         // the known bits in isDSOffsetLegal. We need to emit the selected node
1199         // here, so this is thrown away.
1200         SDValue Sub =
1201             CurDAG->getNode(ISD::SUB, DL, MVT::i32, Zero, Addr.getOperand(1));
1202 
1203         if (isDSOffset2Legal(Sub, OffsetValue0, OffsetValue1, Size)) {
1204           SmallVector<SDValue, 3> Opnds;
1205           Opnds.push_back(Zero);
1206           Opnds.push_back(Addr.getOperand(1));
1207           unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32;
1208           if (Subtarget->hasAddNoCarry()) {
1209             SubOp = AMDGPU::V_SUB_U32_e64;
1210             Opnds.push_back(
1211                 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
1212           }
1213 
1214           MachineSDNode *MachineSub = CurDAG->getMachineNode(
1215               SubOp, DL, MVT::getIntegerVT(Size * 8), Opnds);
1216 
1217           Base = SDValue(MachineSub, 0);
1218           Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1219           Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1220           return true;
1221         }
1222       }
1223     }
1224   } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1225     unsigned OffsetValue0 = CAddr->getZExtValue();
1226     unsigned OffsetValue1 = OffsetValue0 + Size;
1227 
1228     if (isDSOffset2Legal(SDValue(), OffsetValue0, OffsetValue1, Size)) {
1229       SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1230       MachineSDNode *MovZero =
1231           CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32, Zero);
1232       Base = SDValue(MovZero, 0);
1233       Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1234       Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1235       return true;
1236     }
1237   }
1238 
1239   // default case
1240 
1241   Base = Addr;
1242   Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
1243   Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);
1244   return true;
1245 }
1246 
1247 bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr, SDValue &VAddr,
1248                                      SDValue &SOffset, SDValue &Offset,
1249                                      SDValue &Offen, SDValue &Idxen,
1250                                      SDValue &Addr64) const {
1251   // Subtarget prefers to use flat instruction
1252   // FIXME: This should be a pattern predicate and not reach here
1253   if (Subtarget->useFlatForGlobal())
1254     return false;
1255 
1256   SDLoc DL(Addr);
1257 
1258   Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1259   Offen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1260   Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1);
1261   SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1262 
1263   ConstantSDNode *C1 = nullptr;
1264   SDValue N0 = Addr;
1265   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1266     C1 = cast<ConstantSDNode>(Addr.getOperand(1));
1267     if (isUInt<32>(C1->getZExtValue()))
1268       N0 = Addr.getOperand(0);
1269     else
1270       C1 = nullptr;
1271   }
1272 
1273   if (N0.getOpcode() == ISD::ADD) {
1274     // (add N2, N3) -> addr64, or
1275     // (add (add N2, N3), C1) -> addr64
1276     SDValue N2 = N0.getOperand(0);
1277     SDValue N3 = N0.getOperand(1);
1278     Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1279 
1280     if (N2->isDivergent()) {
1281       if (N3->isDivergent()) {
1282         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
1283         // addr64, and construct the resource from a 0 address.
1284         Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1285         VAddr = N0;
1286       } else {
1287         // N2 is divergent, N3 is not.
1288         Ptr = N3;
1289         VAddr = N2;
1290       }
1291     } else {
1292       // N2 is not divergent.
1293       Ptr = N2;
1294       VAddr = N3;
1295     }
1296     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1297   } else if (N0->isDivergent()) {
1298     // N0 is divergent. Use it as the addr64, and construct the resource from a
1299     // 0 address.
1300     Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1301     VAddr = N0;
1302     Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1303   } else {
1304     // N0 -> offset, or
1305     // (N0 + C1) -> offset
1306     VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
1307     Ptr = N0;
1308   }
1309 
1310   if (!C1) {
1311     // No offset.
1312     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1313     return true;
1314   }
1315 
1316   if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue())) {
1317     // Legal offset for instruction.
1318     Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1319     return true;
1320   }
1321 
1322   // Illegal offset, store it in soffset.
1323   Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1324   SOffset =
1325       SDValue(CurDAG->getMachineNode(
1326                   AMDGPU::S_MOV_B32, DL, MVT::i32,
1327                   CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)),
1328               0);
1329   return true;
1330 }
1331 
1332 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
1333                                            SDValue &VAddr, SDValue &SOffset,
1334                                            SDValue &Offset) const {
1335   SDValue Ptr, Offen, Idxen, Addr64;
1336 
1337   // addr64 bit was removed for volcanic islands.
1338   // FIXME: This should be a pattern predicate and not reach here
1339   if (!Subtarget->hasAddr64())
1340     return false;
1341 
1342   if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64))
1343     return false;
1344 
1345   ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
1346   if (C->getSExtValue()) {
1347     SDLoc DL(Addr);
1348 
1349     const SITargetLowering& Lowering =
1350       *static_cast<const SITargetLowering*>(getTargetLowering());
1351 
1352     SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0);
1353     return true;
1354   }
1355 
1356   return false;
1357 }
1358 
1359 std::pair<SDValue, SDValue> AMDGPUDAGToDAGISel::foldFrameIndex(SDValue N) const {
1360   SDLoc DL(N);
1361 
1362   auto *FI = dyn_cast<FrameIndexSDNode>(N);
1363   SDValue TFI =
1364       FI ? CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0)) : N;
1365 
1366   // We rebase the base address into an absolute stack address and hence
1367   // use constant 0 for soffset. This value must be retained until
1368   // frame elimination and eliminateFrameIndex will choose the appropriate
1369   // frame register if need be.
1370   return std::make_pair(TFI, CurDAG->getTargetConstant(0, DL, MVT::i32));
1371 }
1372 
1373 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(SDNode *Parent,
1374                                                  SDValue Addr, SDValue &Rsrc,
1375                                                  SDValue &VAddr, SDValue &SOffset,
1376                                                  SDValue &ImmOffset) const {
1377 
1378   SDLoc DL(Addr);
1379   MachineFunction &MF = CurDAG->getMachineFunction();
1380   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1381 
1382   Rsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1383 
1384   if (ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1385     int64_t Imm = CAddr->getSExtValue();
1386     const int64_t NullPtr =
1387         AMDGPUTargetMachine::getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS);
1388     // Don't fold null pointer.
1389     if (Imm != NullPtr) {
1390       SDValue HighBits = CurDAG->getTargetConstant(Imm & ~4095, DL, MVT::i32);
1391       MachineSDNode *MovHighBits = CurDAG->getMachineNode(
1392         AMDGPU::V_MOV_B32_e32, DL, MVT::i32, HighBits);
1393       VAddr = SDValue(MovHighBits, 0);
1394 
1395       SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1396       ImmOffset = CurDAG->getTargetConstant(Imm & 4095, DL, MVT::i16);
1397       return true;
1398     }
1399   }
1400 
1401   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1402     // (add n0, c1)
1403 
1404     SDValue N0 = Addr.getOperand(0);
1405     SDValue N1 = Addr.getOperand(1);
1406 
1407     // Offsets in vaddr must be positive if range checking is enabled.
1408     //
1409     // The total computation of vaddr + soffset + offset must not overflow.  If
1410     // vaddr is negative, even if offset is 0 the sgpr offset add will end up
1411     // overflowing.
1412     //
1413     // Prior to gfx9, MUBUF instructions with the vaddr offset enabled would
1414     // always perform a range check. If a negative vaddr base index was used,
1415     // this would fail the range check. The overall address computation would
1416     // compute a valid address, but this doesn't happen due to the range
1417     // check. For out-of-bounds MUBUF loads, a 0 is returned.
1418     //
1419     // Therefore it should be safe to fold any VGPR offset on gfx9 into the
1420     // MUBUF vaddr, but not on older subtargets which can only do this if the
1421     // sign bit is known 0.
1422     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1423     if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue()) &&
1424         (!Subtarget->privateMemoryResourceIsRangeChecked() ||
1425          CurDAG->SignBitIsZero(N0))) {
1426       std::tie(VAddr, SOffset) = foldFrameIndex(N0);
1427       ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1428       return true;
1429     }
1430   }
1431 
1432   // (node)
1433   std::tie(VAddr, SOffset) = foldFrameIndex(Addr);
1434   ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1435   return true;
1436 }
1437 
1438 static bool IsCopyFromSGPR(const SIRegisterInfo &TRI, SDValue Val) {
1439   if (Val.getOpcode() != ISD::CopyFromReg)
1440     return false;
1441   auto RC =
1442       TRI.getPhysRegClass(cast<RegisterSDNode>(Val.getOperand(1))->getReg());
1443   return RC && TRI.isSGPRClass(RC);
1444 }
1445 
1446 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffset(SDNode *Parent,
1447                                                   SDValue Addr,
1448                                                   SDValue &SRsrc,
1449                                                   SDValue &SOffset,
1450                                                   SDValue &Offset) const {
1451   const SIRegisterInfo *TRI =
1452       static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
1453   MachineFunction &MF = CurDAG->getMachineFunction();
1454   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1455   SDLoc DL(Addr);
1456 
1457   // CopyFromReg <sgpr>
1458   if (IsCopyFromSGPR(*TRI, Addr)) {
1459     SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1460     SOffset = Addr;
1461     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1462     return true;
1463   }
1464 
1465   ConstantSDNode *CAddr;
1466   if (Addr.getOpcode() == ISD::ADD) {
1467     // Add (CopyFromReg <sgpr>) <constant>
1468     CAddr = dyn_cast<ConstantSDNode>(Addr.getOperand(1));
1469     if (!CAddr || !SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue()))
1470       return false;
1471     if (!IsCopyFromSGPR(*TRI, Addr.getOperand(0)))
1472       return false;
1473 
1474     SOffset = Addr.getOperand(0);
1475   } else if ((CAddr = dyn_cast<ConstantSDNode>(Addr)) &&
1476              SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue())) {
1477     // <constant>
1478     SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1479   } else {
1480     return false;
1481   }
1482 
1483   SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1484 
1485   Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1486   return true;
1487 }
1488 
1489 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1490                                            SDValue &SOffset, SDValue &Offset
1491                                            ) const {
1492   SDValue Ptr, VAddr, Offen, Idxen, Addr64;
1493   const SIInstrInfo *TII =
1494     static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
1495 
1496   if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64))
1497     return false;
1498 
1499   if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
1500       !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
1501       !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
1502     uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
1503                     APInt::getAllOnes(32).getZExtValue(); // Size
1504     SDLoc DL(Addr);
1505 
1506     const SITargetLowering& Lowering =
1507       *static_cast<const SITargetLowering*>(getTargetLowering());
1508 
1509     SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0);
1510     return true;
1511   }
1512   return false;
1513 }
1514 
1515 // Find a load or store from corresponding pattern root.
1516 // Roots may be build_vector, bitconvert or their combinations.
1517 static MemSDNode* findMemSDNode(SDNode *N) {
1518   N = AMDGPUTargetLowering::stripBitcast(SDValue(N,0)).getNode();
1519   if (MemSDNode *MN = dyn_cast<MemSDNode>(N))
1520     return MN;
1521   assert(isa<BuildVectorSDNode>(N));
1522   for (SDValue V : N->op_values())
1523     if (MemSDNode *MN =
1524           dyn_cast<MemSDNode>(AMDGPUTargetLowering::stripBitcast(V)))
1525       return MN;
1526   llvm_unreachable("cannot find MemSDNode in the pattern!");
1527 }
1528 
1529 bool AMDGPUDAGToDAGISel::SelectFlatOffsetImpl(SDNode *N, SDValue Addr,
1530                                               SDValue &VAddr, SDValue &Offset,
1531                                               uint64_t FlatVariant) const {
1532   int64_t OffsetVal = 0;
1533 
1534   unsigned AS = findMemSDNode(N)->getAddressSpace();
1535 
1536   bool CanHaveFlatSegmentOffsetBug =
1537       Subtarget->hasFlatSegmentOffsetBug() &&
1538       FlatVariant == SIInstrFlags::FLAT &&
1539       (AS == AMDGPUAS::FLAT_ADDRESS || AS == AMDGPUAS::GLOBAL_ADDRESS);
1540 
1541   if (Subtarget->hasFlatInstOffsets() && !CanHaveFlatSegmentOffsetBug) {
1542     SDValue N0, N1;
1543     if (isBaseWithConstantOffset64(Addr, N0, N1)) {
1544       int64_t COffsetVal = cast<ConstantSDNode>(N1)->getSExtValue();
1545 
1546       const SIInstrInfo *TII = Subtarget->getInstrInfo();
1547       if (TII->isLegalFLATOffset(COffsetVal, AS, FlatVariant)) {
1548         Addr = N0;
1549         OffsetVal = COffsetVal;
1550       } else {
1551         // If the offset doesn't fit, put the low bits into the offset field and
1552         // add the rest.
1553         //
1554         // For a FLAT instruction the hardware decides whether to access
1555         // global/scratch/shared memory based on the high bits of vaddr,
1556         // ignoring the offset field, so we have to ensure that when we add
1557         // remainder to vaddr it still points into the same underlying object.
1558         // The easiest way to do that is to make sure that we split the offset
1559         // into two pieces that are both >= 0 or both <= 0.
1560 
1561         SDLoc DL(N);
1562         uint64_t RemainderOffset;
1563 
1564         std::tie(OffsetVal, RemainderOffset) =
1565             TII->splitFlatOffset(COffsetVal, AS, FlatVariant);
1566 
1567         SDValue AddOffsetLo =
1568             getMaterializedScalarImm32(Lo_32(RemainderOffset), DL);
1569         SDValue Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
1570 
1571         if (Addr.getValueType().getSizeInBits() == 32) {
1572           SmallVector<SDValue, 3> Opnds;
1573           Opnds.push_back(N0);
1574           Opnds.push_back(AddOffsetLo);
1575           unsigned AddOp = AMDGPU::V_ADD_CO_U32_e32;
1576           if (Subtarget->hasAddNoCarry()) {
1577             AddOp = AMDGPU::V_ADD_U32_e64;
1578             Opnds.push_back(Clamp);
1579           }
1580           Addr = SDValue(CurDAG->getMachineNode(AddOp, DL, MVT::i32, Opnds), 0);
1581         } else {
1582           // TODO: Should this try to use a scalar add pseudo if the base address
1583           // is uniform and saddr is usable?
1584           SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
1585           SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
1586 
1587           SDNode *N0Lo = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1588                                                 DL, MVT::i32, N0, Sub0);
1589           SDNode *N0Hi = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1590                                                 DL, MVT::i32, N0, Sub1);
1591 
1592           SDValue AddOffsetHi =
1593               getMaterializedScalarImm32(Hi_32(RemainderOffset), DL);
1594 
1595           SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i1);
1596 
1597           SDNode *Add =
1598               CurDAG->getMachineNode(AMDGPU::V_ADD_CO_U32_e64, DL, VTs,
1599                                      {AddOffsetLo, SDValue(N0Lo, 0), Clamp});
1600 
1601           SDNode *Addc = CurDAG->getMachineNode(
1602               AMDGPU::V_ADDC_U32_e64, DL, VTs,
1603               {AddOffsetHi, SDValue(N0Hi, 0), SDValue(Add, 1), Clamp});
1604 
1605           SDValue RegSequenceArgs[] = {
1606               CurDAG->getTargetConstant(AMDGPU::VReg_64RegClassID, DL, MVT::i32),
1607               SDValue(Add, 0), Sub0, SDValue(Addc, 0), Sub1};
1608 
1609           Addr = SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
1610                                                 MVT::i64, RegSequenceArgs),
1611                          0);
1612         }
1613       }
1614     }
1615   }
1616 
1617   VAddr = Addr;
1618   Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i16);
1619   return true;
1620 }
1621 
1622 bool AMDGPUDAGToDAGISel::SelectFlatOffset(SDNode *N, SDValue Addr,
1623                                           SDValue &VAddr,
1624                                           SDValue &Offset) const {
1625   return SelectFlatOffsetImpl(N, Addr, VAddr, Offset, SIInstrFlags::FLAT);
1626 }
1627 
1628 bool AMDGPUDAGToDAGISel::SelectGlobalOffset(SDNode *N, SDValue Addr,
1629                                             SDValue &VAddr,
1630                                             SDValue &Offset) const {
1631   return SelectFlatOffsetImpl(N, Addr, VAddr, Offset, SIInstrFlags::FlatGlobal);
1632 }
1633 
1634 bool AMDGPUDAGToDAGISel::SelectScratchOffset(SDNode *N, SDValue Addr,
1635                                              SDValue &VAddr,
1636                                              SDValue &Offset) const {
1637   return SelectFlatOffsetImpl(N, Addr, VAddr, Offset,
1638                               SIInstrFlags::FlatScratch);
1639 }
1640 
1641 // If this matches zero_extend i32:x, return x
1642 static SDValue matchZExtFromI32(SDValue Op) {
1643   if (Op.getOpcode() != ISD::ZERO_EXTEND)
1644     return SDValue();
1645 
1646   SDValue ExtSrc = Op.getOperand(0);
1647   return (ExtSrc.getValueType() == MVT::i32) ? ExtSrc : SDValue();
1648 }
1649 
1650 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
1651 bool AMDGPUDAGToDAGISel::SelectGlobalSAddr(SDNode *N,
1652                                            SDValue Addr,
1653                                            SDValue &SAddr,
1654                                            SDValue &VOffset,
1655                                            SDValue &Offset) const {
1656   int64_t ImmOffset = 0;
1657 
1658   // Match the immediate offset first, which canonically is moved as low as
1659   // possible.
1660 
1661   SDValue LHS, RHS;
1662   if (isBaseWithConstantOffset64(Addr, LHS, RHS)) {
1663     int64_t COffsetVal = cast<ConstantSDNode>(RHS)->getSExtValue();
1664     const SIInstrInfo *TII = Subtarget->getInstrInfo();
1665 
1666     if (TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::GLOBAL_ADDRESS,
1667                                SIInstrFlags::FlatGlobal)) {
1668       Addr = LHS;
1669       ImmOffset = COffsetVal;
1670     } else if (!LHS->isDivergent()) {
1671       if (COffsetVal > 0) {
1672         SDLoc SL(N);
1673         // saddr + large_offset -> saddr +
1674         //                         (voffset = large_offset & ~MaxOffset) +
1675         //                         (large_offset & MaxOffset);
1676         int64_t SplitImmOffset, RemainderOffset;
1677         std::tie(SplitImmOffset, RemainderOffset) = TII->splitFlatOffset(
1678             COffsetVal, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
1679 
1680         if (isUInt<32>(RemainderOffset)) {
1681           SDNode *VMov = CurDAG->getMachineNode(
1682               AMDGPU::V_MOV_B32_e32, SL, MVT::i32,
1683               CurDAG->getTargetConstant(RemainderOffset, SDLoc(), MVT::i32));
1684           VOffset = SDValue(VMov, 0);
1685           SAddr = LHS;
1686           Offset = CurDAG->getTargetConstant(SplitImmOffset, SDLoc(), MVT::i16);
1687           return true;
1688         }
1689       }
1690 
1691       // We are adding a 64 bit SGPR and a constant. If constant bus limit
1692       // is 1 we would need to perform 1 or 2 extra moves for each half of
1693       // the constant and it is better to do a scalar add and then issue a
1694       // single VALU instruction to materialize zero. Otherwise it is less
1695       // instructions to perform VALU adds with immediates or inline literals.
1696       unsigned NumLiterals =
1697           !TII->isInlineConstant(APInt(32, COffsetVal & 0xffffffff)) +
1698           !TII->isInlineConstant(APInt(32, COffsetVal >> 32));
1699       if (Subtarget->getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
1700         return false;
1701     }
1702   }
1703 
1704   // Match the variable offset.
1705   if (Addr.getOpcode() == ISD::ADD) {
1706     LHS = Addr.getOperand(0);
1707     RHS = Addr.getOperand(1);
1708 
1709     if (!LHS->isDivergent()) {
1710       // add (i64 sgpr), (zero_extend (i32 vgpr))
1711       if (SDValue ZextRHS = matchZExtFromI32(RHS)) {
1712         SAddr = LHS;
1713         VOffset = ZextRHS;
1714       }
1715     }
1716 
1717     if (!SAddr && !RHS->isDivergent()) {
1718       // add (zero_extend (i32 vgpr)), (i64 sgpr)
1719       if (SDValue ZextLHS = matchZExtFromI32(LHS)) {
1720         SAddr = RHS;
1721         VOffset = ZextLHS;
1722       }
1723     }
1724 
1725     if (SAddr) {
1726       Offset = CurDAG->getTargetConstant(ImmOffset, SDLoc(), MVT::i16);
1727       return true;
1728     }
1729   }
1730 
1731   if (Addr->isDivergent() || Addr.getOpcode() == ISD::UNDEF ||
1732       isa<ConstantSDNode>(Addr))
1733     return false;
1734 
1735   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
1736   // moves required to copy a 64-bit SGPR to VGPR.
1737   SAddr = Addr;
1738   SDNode *VMov =
1739       CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, SDLoc(Addr), MVT::i32,
1740                              CurDAG->getTargetConstant(0, SDLoc(), MVT::i32));
1741   VOffset = SDValue(VMov, 0);
1742   Offset = CurDAG->getTargetConstant(ImmOffset, SDLoc(), MVT::i16);
1743   return true;
1744 }
1745 
1746 static SDValue SelectSAddrFI(SelectionDAG *CurDAG, SDValue SAddr) {
1747   if (auto FI = dyn_cast<FrameIndexSDNode>(SAddr)) {
1748     SAddr = CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0));
1749   } else if (SAddr.getOpcode() == ISD::ADD &&
1750              isa<FrameIndexSDNode>(SAddr.getOperand(0))) {
1751     // Materialize this into a scalar move for scalar address to avoid
1752     // readfirstlane.
1753     auto FI = cast<FrameIndexSDNode>(SAddr.getOperand(0));
1754     SDValue TFI = CurDAG->getTargetFrameIndex(FI->getIndex(),
1755                                               FI->getValueType(0));
1756     SAddr = SDValue(CurDAG->getMachineNode(AMDGPU::S_ADD_I32, SDLoc(SAddr),
1757                                            MVT::i32, TFI, SAddr.getOperand(1)),
1758                     0);
1759   }
1760 
1761   return SAddr;
1762 }
1763 
1764 // Match (32-bit SGPR base) + sext(imm offset)
1765 bool AMDGPUDAGToDAGISel::SelectScratchSAddr(SDNode *Parent, SDValue Addr,
1766                                             SDValue &SAddr,
1767                                             SDValue &Offset) const {
1768   if (Addr->isDivergent())
1769     return false;
1770 
1771   SDLoc DL(Addr);
1772 
1773   int64_t COffsetVal = 0;
1774 
1775   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1776     COffsetVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1777     SAddr = Addr.getOperand(0);
1778   } else {
1779     SAddr = Addr;
1780   }
1781 
1782   SAddr = SelectSAddrFI(CurDAG, SAddr);
1783 
1784   const SIInstrInfo *TII = Subtarget->getInstrInfo();
1785 
1786   if (!TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::PRIVATE_ADDRESS,
1787                               SIInstrFlags::FlatScratch)) {
1788     int64_t SplitImmOffset, RemainderOffset;
1789     std::tie(SplitImmOffset, RemainderOffset) = TII->splitFlatOffset(
1790         COffsetVal, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch);
1791 
1792     COffsetVal = SplitImmOffset;
1793 
1794     SDValue AddOffset =
1795         SAddr.getOpcode() == ISD::TargetFrameIndex
1796             ? getMaterializedScalarImm32(Lo_32(RemainderOffset), DL)
1797             : CurDAG->getTargetConstant(RemainderOffset, DL, MVT::i32);
1798     SAddr = SDValue(CurDAG->getMachineNode(AMDGPU::S_ADD_I32, DL, MVT::i32,
1799                                            SAddr, AddOffset),
1800                     0);
1801   }
1802 
1803   Offset = CurDAG->getTargetConstant(COffsetVal, DL, MVT::i16);
1804 
1805   return true;
1806 }
1807 
1808 // Check whether the flat scratch SVS swizzle bug affects this access.
1809 bool AMDGPUDAGToDAGISel::checkFlatScratchSVSSwizzleBug(
1810     SDValue VAddr, SDValue SAddr, uint64_t ImmOffset) const {
1811   if (!Subtarget->hasFlatScratchSVSSwizzleBug())
1812     return false;
1813 
1814   // The bug affects the swizzling of SVS accesses if there is any carry out
1815   // from the two low order bits (i.e. from bit 1 into bit 2) when adding
1816   // voffset to (soffset + inst_offset).
1817   KnownBits VKnown = CurDAG->computeKnownBits(VAddr);
1818   KnownBits SKnown = KnownBits::computeForAddSub(
1819       true, false, CurDAG->computeKnownBits(SAddr),
1820       KnownBits::makeConstant(APInt(32, ImmOffset)));
1821   uint64_t VMax = VKnown.getMaxValue().getZExtValue();
1822   uint64_t SMax = SKnown.getMaxValue().getZExtValue();
1823   return (VMax & 3) + (SMax & 3) >= 4;
1824 }
1825 
1826 bool AMDGPUDAGToDAGISel::SelectScratchSVAddr(SDNode *N, SDValue Addr,
1827                                              SDValue &VAddr, SDValue &SAddr,
1828                                              SDValue &Offset) const  {
1829   int64_t ImmOffset = 0;
1830 
1831   SDValue LHS, RHS;
1832   if (isBaseWithConstantOffset64(Addr, LHS, RHS)) {
1833     int64_t COffsetVal = cast<ConstantSDNode>(RHS)->getSExtValue();
1834     const SIInstrInfo *TII = Subtarget->getInstrInfo();
1835 
1836     if (TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::PRIVATE_ADDRESS, true)) {
1837       Addr = LHS;
1838       ImmOffset = COffsetVal;
1839     } else if (!LHS->isDivergent() && COffsetVal > 0) {
1840       SDLoc SL(N);
1841       // saddr + large_offset -> saddr + (vaddr = large_offset & ~MaxOffset) +
1842       //                         (large_offset & MaxOffset);
1843       int64_t SplitImmOffset, RemainderOffset;
1844       std::tie(SplitImmOffset, RemainderOffset)
1845         = TII->splitFlatOffset(COffsetVal, AMDGPUAS::PRIVATE_ADDRESS, true);
1846 
1847       if (isUInt<32>(RemainderOffset)) {
1848         SDNode *VMov = CurDAG->getMachineNode(
1849           AMDGPU::V_MOV_B32_e32, SL, MVT::i32,
1850           CurDAG->getTargetConstant(RemainderOffset, SDLoc(), MVT::i32));
1851         VAddr = SDValue(VMov, 0);
1852         SAddr = LHS;
1853         if (checkFlatScratchSVSSwizzleBug(VAddr, SAddr, SplitImmOffset))
1854           return false;
1855         Offset = CurDAG->getTargetConstant(SplitImmOffset, SDLoc(), MVT::i16);
1856         return true;
1857       }
1858     }
1859   }
1860 
1861   if (Addr.getOpcode() != ISD::ADD)
1862     return false;
1863 
1864   LHS = Addr.getOperand(0);
1865   RHS = Addr.getOperand(1);
1866 
1867   if (!LHS->isDivergent() && RHS->isDivergent()) {
1868     SAddr = LHS;
1869     VAddr = RHS;
1870   } else if (!RHS->isDivergent() && LHS->isDivergent()) {
1871     SAddr = RHS;
1872     VAddr = LHS;
1873   } else {
1874     return false;
1875   }
1876 
1877   if (checkFlatScratchSVSSwizzleBug(VAddr, SAddr, ImmOffset))
1878     return false;
1879   SAddr = SelectSAddrFI(CurDAG, SAddr);
1880   Offset = CurDAG->getTargetConstant(ImmOffset, SDLoc(), MVT::i16);
1881   return true;
1882 }
1883 
1884 bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode,
1885                                           SDValue &Offset, bool &Imm) const {
1886   ConstantSDNode *C = dyn_cast<ConstantSDNode>(ByteOffsetNode);
1887   if (!C) {
1888     if (ByteOffsetNode.getValueType().isScalarInteger() &&
1889         ByteOffsetNode.getValueType().getSizeInBits() == 32) {
1890       Offset = ByteOffsetNode;
1891       Imm = false;
1892       return true;
1893     }
1894     if (ByteOffsetNode.getOpcode() == ISD::ZERO_EXTEND) {
1895       if (ByteOffsetNode.getOperand(0).getValueType().getSizeInBits() == 32) {
1896         Offset = ByteOffsetNode.getOperand(0);
1897         Imm = false;
1898         return true;
1899       }
1900     }
1901     return false;
1902   }
1903 
1904   SDLoc SL(ByteOffsetNode);
1905   // GFX9 and GFX10 have signed byte immediate offsets.
1906   int64_t ByteOffset = C->getSExtValue();
1907   Optional<int64_t> EncodedOffset =
1908       AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset, false);
1909   if (EncodedOffset) {
1910     Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);
1911     Imm = true;
1912     return true;
1913   }
1914 
1915   // SGPR and literal offsets are unsigned.
1916   if (ByteOffset < 0)
1917     return false;
1918 
1919   EncodedOffset = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget, ByteOffset);
1920   if (EncodedOffset) {
1921     Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);
1922     return true;
1923   }
1924 
1925   if (!isUInt<32>(ByteOffset) && !isInt<32>(ByteOffset))
1926     return false;
1927 
1928   SDValue C32Bit = CurDAG->getTargetConstant(ByteOffset, SL, MVT::i32);
1929   Offset = SDValue(
1930       CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, C32Bit), 0);
1931 
1932   return true;
1933 }
1934 
1935 SDValue AMDGPUDAGToDAGISel::Expand32BitAddress(SDValue Addr) const {
1936   if (Addr.getValueType() != MVT::i32)
1937     return Addr;
1938 
1939   // Zero-extend a 32-bit address.
1940   SDLoc SL(Addr);
1941 
1942   const MachineFunction &MF = CurDAG->getMachineFunction();
1943   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1944   unsigned AddrHiVal = Info->get32BitAddressHighBits();
1945   SDValue AddrHi = CurDAG->getTargetConstant(AddrHiVal, SL, MVT::i32);
1946 
1947   const SDValue Ops[] = {
1948     CurDAG->getTargetConstant(AMDGPU::SReg_64_XEXECRegClassID, SL, MVT::i32),
1949     Addr,
1950     CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32),
1951     SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, AddrHi),
1952             0),
1953     CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32),
1954   };
1955 
1956   return SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, SL, MVT::i64,
1957                                         Ops), 0);
1958 }
1959 
1960 bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase,
1961                                      SDValue &Offset, bool &Imm) const {
1962   SDLoc SL(Addr);
1963 
1964   // A 32-bit (address + offset) should not cause unsigned 32-bit integer
1965   // wraparound, because s_load instructions perform the addition in 64 bits.
1966   if ((Addr.getValueType() != MVT::i32 ||
1967        Addr->getFlags().hasNoUnsignedWrap())) {
1968     SDValue N0, N1;
1969     // Extract the base and offset if possible.
1970     if (CurDAG->isBaseWithConstantOffset(Addr) ||
1971         Addr.getOpcode() == ISD::ADD) {
1972       N0 = Addr.getOperand(0);
1973       N1 = Addr.getOperand(1);
1974     } else if (getBaseWithOffsetUsingSplitOR(*CurDAG, Addr, N0, N1)) {
1975       assert(N0 && N1 && isa<ConstantSDNode>(N1));
1976     }
1977     if (N0 && N1) {
1978       if (SelectSMRDOffset(N1, Offset, Imm)) {
1979         SBase = Expand32BitAddress(N0);
1980         return true;
1981       }
1982     }
1983   }
1984   SBase = Expand32BitAddress(Addr);
1985   Offset = CurDAG->getTargetConstant(0, SL, MVT::i32);
1986   Imm = true;
1987   return true;
1988 }
1989 
1990 bool AMDGPUDAGToDAGISel::SelectSMRDImm(SDValue Addr, SDValue &SBase,
1991                                        SDValue &Offset) const {
1992   bool Imm = false;
1993   return SelectSMRD(Addr, SBase, Offset, Imm) && Imm;
1994 }
1995 
1996 bool AMDGPUDAGToDAGISel::SelectSMRDImm32(SDValue Addr, SDValue &SBase,
1997                                          SDValue &Offset) const {
1998 
1999   assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
2000 
2001   bool Imm = false;
2002   if (!SelectSMRD(Addr, SBase, Offset, Imm))
2003     return false;
2004 
2005   return !Imm && isa<ConstantSDNode>(Offset);
2006 }
2007 
2008 bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDValue Addr, SDValue &SBase,
2009                                         SDValue &Offset) const {
2010   bool Imm = false;
2011   return SelectSMRD(Addr, SBase, Offset, Imm) && !Imm &&
2012          !isa<ConstantSDNode>(Offset);
2013 }
2014 
2015 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm(SDValue Addr,
2016                                              SDValue &Offset) const {
2017   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr)) {
2018     // The immediate offset for S_BUFFER instructions is unsigned.
2019     if (auto Imm =
2020             AMDGPU::getSMRDEncodedOffset(*Subtarget, C->getZExtValue(), true)) {
2021       Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32);
2022       return true;
2023     }
2024   }
2025 
2026   return false;
2027 }
2028 
2029 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(SDValue Addr,
2030                                                SDValue &Offset) const {
2031   assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
2032 
2033   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr)) {
2034     if (auto Imm = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget,
2035                                                          C->getZExtValue())) {
2036       Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32);
2037       return true;
2038     }
2039   }
2040 
2041   return false;
2042 }
2043 
2044 bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index,
2045                                             SDValue &Base,
2046                                             SDValue &Offset) const {
2047   SDLoc DL(Index);
2048 
2049   if (CurDAG->isBaseWithConstantOffset(Index)) {
2050     SDValue N0 = Index.getOperand(0);
2051     SDValue N1 = Index.getOperand(1);
2052     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
2053 
2054     // (add n0, c0)
2055     // Don't peel off the offset (c0) if doing so could possibly lead
2056     // the base (n0) to be negative.
2057     // (or n0, |c0|) can never change a sign given isBaseWithConstantOffset.
2058     if (C1->getSExtValue() <= 0 || CurDAG->SignBitIsZero(N0) ||
2059         (Index->getOpcode() == ISD::OR && C1->getSExtValue() >= 0)) {
2060       Base = N0;
2061       Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32);
2062       return true;
2063     }
2064   }
2065 
2066   if (isa<ConstantSDNode>(Index))
2067     return false;
2068 
2069   Base = Index;
2070   Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
2071   return true;
2072 }
2073 
2074 SDNode *AMDGPUDAGToDAGISel::getBFE32(bool IsSigned, const SDLoc &DL,
2075                                      SDValue Val, uint32_t Offset,
2076                                      uint32_t Width) {
2077   if (Val->isDivergent()) {
2078     unsigned Opcode = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2079     SDValue Off = CurDAG->getTargetConstant(Offset, DL, MVT::i32);
2080     SDValue W = CurDAG->getTargetConstant(Width, DL, MVT::i32);
2081 
2082     return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, Off, W);
2083   }
2084   unsigned Opcode = IsSigned ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2085   // Transformation function, pack the offset and width of a BFE into
2086   // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
2087   // source, bits [5:0] contain the offset and bits [22:16] the width.
2088   uint32_t PackedVal = Offset | (Width << 16);
2089   SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32);
2090 
2091   return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
2092 }
2093 
2094 void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
2095   // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c)
2096   // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c)
2097   // Predicate: 0 < b <= c < 32
2098 
2099   const SDValue &Shl = N->getOperand(0);
2100   ConstantSDNode *B = dyn_cast<ConstantSDNode>(Shl->getOperand(1));
2101   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
2102 
2103   if (B && C) {
2104     uint32_t BVal = B->getZExtValue();
2105     uint32_t CVal = C->getZExtValue();
2106 
2107     if (0 < BVal && BVal <= CVal && CVal < 32) {
2108       bool Signed = N->getOpcode() == ISD::SRA;
2109       ReplaceNode(N, getBFE32(Signed, SDLoc(N), Shl.getOperand(0), CVal - BVal,
2110                   32 - CVal));
2111       return;
2112     }
2113   }
2114   SelectCode(N);
2115 }
2116 
2117 void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
2118   switch (N->getOpcode()) {
2119   case ISD::AND:
2120     if (N->getOperand(0).getOpcode() == ISD::SRL) {
2121       // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)"
2122       // Predicate: isMask(mask)
2123       const SDValue &Srl = N->getOperand(0);
2124       ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1));
2125       ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
2126 
2127       if (Shift && Mask) {
2128         uint32_t ShiftVal = Shift->getZExtValue();
2129         uint32_t MaskVal = Mask->getZExtValue();
2130 
2131         if (isMask_32(MaskVal)) {
2132           uint32_t WidthVal = countPopulation(MaskVal);
2133           ReplaceNode(N, getBFE32(false, SDLoc(N), Srl.getOperand(0), ShiftVal,
2134                                   WidthVal));
2135           return;
2136         }
2137       }
2138     }
2139     break;
2140   case ISD::SRL:
2141     if (N->getOperand(0).getOpcode() == ISD::AND) {
2142       // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)"
2143       // Predicate: isMask(mask >> b)
2144       const SDValue &And = N->getOperand(0);
2145       ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1));
2146       ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1));
2147 
2148       if (Shift && Mask) {
2149         uint32_t ShiftVal = Shift->getZExtValue();
2150         uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal;
2151 
2152         if (isMask_32(MaskVal)) {
2153           uint32_t WidthVal = countPopulation(MaskVal);
2154           ReplaceNode(N, getBFE32(false, SDLoc(N), And.getOperand(0), ShiftVal,
2155                       WidthVal));
2156           return;
2157         }
2158       }
2159     } else if (N->getOperand(0).getOpcode() == ISD::SHL) {
2160       SelectS_BFEFromShifts(N);
2161       return;
2162     }
2163     break;
2164   case ISD::SRA:
2165     if (N->getOperand(0).getOpcode() == ISD::SHL) {
2166       SelectS_BFEFromShifts(N);
2167       return;
2168     }
2169     break;
2170 
2171   case ISD::SIGN_EXTEND_INREG: {
2172     // sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8
2173     SDValue Src = N->getOperand(0);
2174     if (Src.getOpcode() != ISD::SRL)
2175       break;
2176 
2177     const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1));
2178     if (!Amt)
2179       break;
2180 
2181     unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
2182     ReplaceNode(N, getBFE32(true, SDLoc(N), Src.getOperand(0),
2183                             Amt->getZExtValue(), Width));
2184     return;
2185   }
2186   }
2187 
2188   SelectCode(N);
2189 }
2190 
2191 bool AMDGPUDAGToDAGISel::isCBranchSCC(const SDNode *N) const {
2192   assert(N->getOpcode() == ISD::BRCOND);
2193   if (!N->hasOneUse())
2194     return false;
2195 
2196   SDValue Cond = N->getOperand(1);
2197   if (Cond.getOpcode() == ISD::CopyToReg)
2198     Cond = Cond.getOperand(2);
2199 
2200   if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
2201     return false;
2202 
2203   MVT VT = Cond.getOperand(0).getSimpleValueType();
2204   if (VT == MVT::i32)
2205     return true;
2206 
2207   if (VT == MVT::i64) {
2208     auto ST = static_cast<const GCNSubtarget *>(Subtarget);
2209 
2210     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
2211     return (CC == ISD::SETEQ || CC == ISD::SETNE) && ST->hasScalarCompareEq64();
2212   }
2213 
2214   return false;
2215 }
2216 
2217 void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
2218   SDValue Cond = N->getOperand(1);
2219 
2220   if (Cond.isUndef()) {
2221     CurDAG->SelectNodeTo(N, AMDGPU::SI_BR_UNDEF, MVT::Other,
2222                          N->getOperand(2), N->getOperand(0));
2223     return;
2224   }
2225 
2226   const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget);
2227   const SIRegisterInfo *TRI = ST->getRegisterInfo();
2228 
2229   bool UseSCCBr = isCBranchSCC(N) && isUniformBr(N);
2230   unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ;
2231   Register CondReg = UseSCCBr ? AMDGPU::SCC : TRI->getVCC();
2232   SDLoc SL(N);
2233 
2234   if (!UseSCCBr) {
2235     // This is the case that we are selecting to S_CBRANCH_VCCNZ.  We have not
2236     // analyzed what generates the vcc value, so we do not know whether vcc
2237     // bits for disabled lanes are 0.  Thus we need to mask out bits for
2238     // disabled lanes.
2239     //
2240     // For the case that we select S_CBRANCH_SCC1 and it gets
2241     // changed to S_CBRANCH_VCCNZ in SIFixSGPRCopies, SIFixSGPRCopies calls
2242     // SIInstrInfo::moveToVALU which inserts the S_AND).
2243     //
2244     // We could add an analysis of what generates the vcc value here and omit
2245     // the S_AND when is unnecessary. But it would be better to add a separate
2246     // pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it
2247     // catches both cases.
2248     Cond = SDValue(CurDAG->getMachineNode(ST->isWave32() ? AMDGPU::S_AND_B32
2249                                                          : AMDGPU::S_AND_B64,
2250                      SL, MVT::i1,
2251                      CurDAG->getRegister(ST->isWave32() ? AMDGPU::EXEC_LO
2252                                                         : AMDGPU::EXEC,
2253                                          MVT::i1),
2254                     Cond),
2255                    0);
2256   }
2257 
2258   SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, CondReg, Cond);
2259   CurDAG->SelectNodeTo(N, BrOp, MVT::Other,
2260                        N->getOperand(2), // Basic Block
2261                        VCC.getValue(0));
2262 }
2263 
2264 void AMDGPUDAGToDAGISel::SelectFMAD_FMA(SDNode *N) {
2265   MVT VT = N->getSimpleValueType(0);
2266   bool IsFMA = N->getOpcode() == ISD::FMA;
2267   if (VT != MVT::f32 || (!Subtarget->hasMadMixInsts() &&
2268                          !Subtarget->hasFmaMixInsts()) ||
2269       ((IsFMA && Subtarget->hasMadMixInsts()) ||
2270        (!IsFMA && Subtarget->hasFmaMixInsts()))) {
2271     SelectCode(N);
2272     return;
2273   }
2274 
2275   SDValue Src0 = N->getOperand(0);
2276   SDValue Src1 = N->getOperand(1);
2277   SDValue Src2 = N->getOperand(2);
2278   unsigned Src0Mods, Src1Mods, Src2Mods;
2279 
2280   // Avoid using v_mad_mix_f32/v_fma_mix_f32 unless there is actually an operand
2281   // using the conversion from f16.
2282   bool Sel0 = SelectVOP3PMadMixModsImpl(Src0, Src0, Src0Mods);
2283   bool Sel1 = SelectVOP3PMadMixModsImpl(Src1, Src1, Src1Mods);
2284   bool Sel2 = SelectVOP3PMadMixModsImpl(Src2, Src2, Src2Mods);
2285 
2286   assert((IsFMA || !Mode.allFP32Denormals()) &&
2287          "fmad selected with denormals enabled");
2288   // TODO: We can select this with f32 denormals enabled if all the sources are
2289   // converted from f16 (in which case fmad isn't legal).
2290 
2291   if (Sel0 || Sel1 || Sel2) {
2292     // For dummy operands.
2293     SDValue Zero = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
2294     SDValue Ops[] = {
2295       CurDAG->getTargetConstant(Src0Mods, SDLoc(), MVT::i32), Src0,
2296       CurDAG->getTargetConstant(Src1Mods, SDLoc(), MVT::i32), Src1,
2297       CurDAG->getTargetConstant(Src2Mods, SDLoc(), MVT::i32), Src2,
2298       CurDAG->getTargetConstant(0, SDLoc(), MVT::i1),
2299       Zero, Zero
2300     };
2301 
2302     CurDAG->SelectNodeTo(N,
2303                          IsFMA ? AMDGPU::V_FMA_MIX_F32 : AMDGPU::V_MAD_MIX_F32,
2304                          MVT::f32, Ops);
2305   } else {
2306     SelectCode(N);
2307   }
2308 }
2309 
2310 void AMDGPUDAGToDAGISel::SelectDSAppendConsume(SDNode *N, unsigned IntrID) {
2311   // The address is assumed to be uniform, so if it ends up in a VGPR, it will
2312   // be copied to an SGPR with readfirstlane.
2313   unsigned Opc = IntrID == Intrinsic::amdgcn_ds_append ?
2314     AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
2315 
2316   SDValue Chain = N->getOperand(0);
2317   SDValue Ptr = N->getOperand(2);
2318   MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2319   MachineMemOperand *MMO = M->getMemOperand();
2320   bool IsGDS = M->getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
2321 
2322   SDValue Offset;
2323   if (CurDAG->isBaseWithConstantOffset(Ptr)) {
2324     SDValue PtrBase = Ptr.getOperand(0);
2325     SDValue PtrOffset = Ptr.getOperand(1);
2326 
2327     const APInt &OffsetVal = cast<ConstantSDNode>(PtrOffset)->getAPIntValue();
2328     if (isDSOffsetLegal(PtrBase, OffsetVal.getZExtValue())) {
2329       N = glueCopyToM0(N, PtrBase);
2330       Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i32);
2331     }
2332   }
2333 
2334   if (!Offset) {
2335     N = glueCopyToM0(N, Ptr);
2336     Offset = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
2337   }
2338 
2339   SDValue Ops[] = {
2340     Offset,
2341     CurDAG->getTargetConstant(IsGDS, SDLoc(), MVT::i32),
2342     Chain,
2343     N->getOperand(N->getNumOperands() - 1) // New glue
2344   };
2345 
2346   SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2347   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2348 }
2349 
2350 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
2351   switch (IntrID) {
2352   case Intrinsic::amdgcn_ds_gws_init:
2353     return AMDGPU::DS_GWS_INIT;
2354   case Intrinsic::amdgcn_ds_gws_barrier:
2355     return AMDGPU::DS_GWS_BARRIER;
2356   case Intrinsic::amdgcn_ds_gws_sema_v:
2357     return AMDGPU::DS_GWS_SEMA_V;
2358   case Intrinsic::amdgcn_ds_gws_sema_br:
2359     return AMDGPU::DS_GWS_SEMA_BR;
2360   case Intrinsic::amdgcn_ds_gws_sema_p:
2361     return AMDGPU::DS_GWS_SEMA_P;
2362   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2363     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
2364   default:
2365     llvm_unreachable("not a gws intrinsic");
2366   }
2367 }
2368 
2369 void AMDGPUDAGToDAGISel::SelectDS_GWS(SDNode *N, unsigned IntrID) {
2370   if (IntrID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
2371       !Subtarget->hasGWSSemaReleaseAll()) {
2372     // Let this error.
2373     SelectCode(N);
2374     return;
2375   }
2376 
2377   // Chain, intrinsic ID, vsrc, offset
2378   const bool HasVSrc = N->getNumOperands() == 4;
2379   assert(HasVSrc || N->getNumOperands() == 3);
2380 
2381   SDLoc SL(N);
2382   SDValue BaseOffset = N->getOperand(HasVSrc ? 3 : 2);
2383   int ImmOffset = 0;
2384   MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2385   MachineMemOperand *MMO = M->getMemOperand();
2386 
2387   // Don't worry if the offset ends up in a VGPR. Only one lane will have
2388   // effect, so SIFixSGPRCopies will validly insert readfirstlane.
2389 
2390   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
2391   // offset field) % 64. Some versions of the programming guide omit the m0
2392   // part, or claim it's from offset 0.
2393   if (ConstantSDNode *ConstOffset = dyn_cast<ConstantSDNode>(BaseOffset)) {
2394     // If we have a constant offset, try to use the 0 in m0 as the base.
2395     // TODO: Look into changing the default m0 initialization value. If the
2396     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
2397     // the immediate offset.
2398     glueCopyToM0(N, CurDAG->getTargetConstant(0, SL, MVT::i32));
2399     ImmOffset = ConstOffset->getZExtValue();
2400   } else {
2401     if (CurDAG->isBaseWithConstantOffset(BaseOffset)) {
2402       ImmOffset = BaseOffset.getConstantOperandVal(1);
2403       BaseOffset = BaseOffset.getOperand(0);
2404     }
2405 
2406     // Prefer to do the shift in an SGPR since it should be possible to use m0
2407     // as the result directly. If it's already an SGPR, it will be eliminated
2408     // later.
2409     SDNode *SGPROffset
2410       = CurDAG->getMachineNode(AMDGPU::V_READFIRSTLANE_B32, SL, MVT::i32,
2411                                BaseOffset);
2412     // Shift to offset in m0
2413     SDNode *M0Base
2414       = CurDAG->getMachineNode(AMDGPU::S_LSHL_B32, SL, MVT::i32,
2415                                SDValue(SGPROffset, 0),
2416                                CurDAG->getTargetConstant(16, SL, MVT::i32));
2417     glueCopyToM0(N, SDValue(M0Base, 0));
2418   }
2419 
2420   SDValue Chain = N->getOperand(0);
2421   SDValue OffsetField = CurDAG->getTargetConstant(ImmOffset, SL, MVT::i32);
2422 
2423   const unsigned Opc = gwsIntrinToOpcode(IntrID);
2424   SmallVector<SDValue, 5> Ops;
2425   if (HasVSrc)
2426     Ops.push_back(N->getOperand(2));
2427   Ops.push_back(OffsetField);
2428   Ops.push_back(Chain);
2429 
2430   SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2431   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2432 }
2433 
2434 void AMDGPUDAGToDAGISel::SelectInterpP1F16(SDNode *N) {
2435   if (Subtarget->getLDSBankCount() != 16) {
2436     // This is a single instruction with a pattern.
2437     SelectCode(N);
2438     return;
2439   }
2440 
2441   SDLoc DL(N);
2442 
2443   // This requires 2 instructions. It is possible to write a pattern to support
2444   // this, but the generated isel emitter doesn't correctly deal with multiple
2445   // output instructions using the same physical register input. The copy to m0
2446   // is incorrectly placed before the second instruction.
2447   //
2448   // TODO: Match source modifiers.
2449   //
2450   // def : Pat <
2451   //   (int_amdgcn_interp_p1_f16
2452   //    (VOP3Mods f32:$src0, i32:$src0_modifiers),
2453   //                             (i32 timm:$attrchan), (i32 timm:$attr),
2454   //                             (i1 timm:$high), M0),
2455   //   (V_INTERP_P1LV_F16 $src0_modifiers, VGPR_32:$src0, timm:$attr,
2456   //       timm:$attrchan, 0,
2457   //       (V_INTERP_MOV_F32 2, timm:$attr, timm:$attrchan), timm:$high)> {
2458   //   let Predicates = [has16BankLDS];
2459   // }
2460 
2461   // 16 bank LDS
2462   SDValue ToM0 = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, AMDGPU::M0,
2463                                       N->getOperand(5), SDValue());
2464 
2465   SDVTList VTs = CurDAG->getVTList(MVT::f32, MVT::Other);
2466 
2467   SDNode *InterpMov =
2468     CurDAG->getMachineNode(AMDGPU::V_INTERP_MOV_F32, DL, VTs, {
2469         CurDAG->getTargetConstant(2, DL, MVT::i32), // P0
2470         N->getOperand(3),  // Attr
2471         N->getOperand(2),  // Attrchan
2472         ToM0.getValue(1) // In glue
2473   });
2474 
2475   SDNode *InterpP1LV =
2476     CurDAG->getMachineNode(AMDGPU::V_INTERP_P1LV_F16, DL, MVT::f32, {
2477         CurDAG->getTargetConstant(0, DL, MVT::i32), // $src0_modifiers
2478         N->getOperand(1), // Src0
2479         N->getOperand(3), // Attr
2480         N->getOperand(2), // Attrchan
2481         CurDAG->getTargetConstant(0, DL, MVT::i32), // $src2_modifiers
2482         SDValue(InterpMov, 0), // Src2 - holds two f16 values selected by high
2483         N->getOperand(4), // high
2484         CurDAG->getTargetConstant(0, DL, MVT::i1), // $clamp
2485         CurDAG->getTargetConstant(0, DL, MVT::i32), // $omod
2486         SDValue(InterpMov, 1)
2487   });
2488 
2489   CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), SDValue(InterpP1LV, 0));
2490 }
2491 
2492 void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) {
2493   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2494   switch (IntrID) {
2495   case Intrinsic::amdgcn_ds_append:
2496   case Intrinsic::amdgcn_ds_consume: {
2497     if (N->getValueType(0) != MVT::i32)
2498       break;
2499     SelectDSAppendConsume(N, IntrID);
2500     return;
2501   }
2502   }
2503 
2504   SelectCode(N);
2505 }
2506 
2507 void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
2508   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
2509   unsigned Opcode;
2510   switch (IntrID) {
2511   case Intrinsic::amdgcn_wqm:
2512     Opcode = AMDGPU::WQM;
2513     break;
2514   case Intrinsic::amdgcn_softwqm:
2515     Opcode = AMDGPU::SOFT_WQM;
2516     break;
2517   case Intrinsic::amdgcn_wwm:
2518   case Intrinsic::amdgcn_strict_wwm:
2519     Opcode = AMDGPU::STRICT_WWM;
2520     break;
2521   case Intrinsic::amdgcn_strict_wqm:
2522     Opcode = AMDGPU::STRICT_WQM;
2523     break;
2524   case Intrinsic::amdgcn_interp_p1_f16:
2525     SelectInterpP1F16(N);
2526     return;
2527   default:
2528     SelectCode(N);
2529     return;
2530   }
2531 
2532   SDValue Src = N->getOperand(1);
2533   CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), {Src});
2534 }
2535 
2536 void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) {
2537   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2538   switch (IntrID) {
2539   case Intrinsic::amdgcn_ds_gws_init:
2540   case Intrinsic::amdgcn_ds_gws_barrier:
2541   case Intrinsic::amdgcn_ds_gws_sema_v:
2542   case Intrinsic::amdgcn_ds_gws_sema_br:
2543   case Intrinsic::amdgcn_ds_gws_sema_p:
2544   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2545     SelectDS_GWS(N, IntrID);
2546     return;
2547   default:
2548     break;
2549   }
2550 
2551   SelectCode(N);
2552 }
2553 
2554 bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src,
2555                                             unsigned &Mods,
2556                                             bool AllowAbs) const {
2557   Mods = 0;
2558   Src = In;
2559 
2560   if (Src.getOpcode() == ISD::FNEG) {
2561     Mods |= SISrcMods::NEG;
2562     Src = Src.getOperand(0);
2563   }
2564 
2565   if (AllowAbs && Src.getOpcode() == ISD::FABS) {
2566     Mods |= SISrcMods::ABS;
2567     Src = Src.getOperand(0);
2568   }
2569 
2570   return true;
2571 }
2572 
2573 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
2574                                         SDValue &SrcMods) const {
2575   unsigned Mods;
2576   if (SelectVOP3ModsImpl(In, Src, Mods)) {
2577     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2578     return true;
2579   }
2580 
2581   return false;
2582 }
2583 
2584 bool AMDGPUDAGToDAGISel::SelectVOP3BMods(SDValue In, SDValue &Src,
2585                                          SDValue &SrcMods) const {
2586   unsigned Mods;
2587   if (SelectVOP3ModsImpl(In, Src, Mods, /* AllowAbs */ false)) {
2588     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2589     return true;
2590   }
2591 
2592   return false;
2593 }
2594 
2595 bool AMDGPUDAGToDAGISel::SelectVOP3Mods_NNaN(SDValue In, SDValue &Src,
2596                                              SDValue &SrcMods) const {
2597   SelectVOP3Mods(In, Src, SrcMods);
2598   return isNoNanSrc(Src);
2599 }
2600 
2601 bool AMDGPUDAGToDAGISel::SelectVOP3NoMods(SDValue In, SDValue &Src) const {
2602   if (In.getOpcode() == ISD::FABS || In.getOpcode() == ISD::FNEG)
2603     return false;
2604 
2605   Src = In;
2606   return true;
2607 }
2608 
2609 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
2610                                          SDValue &SrcMods, SDValue &Clamp,
2611                                          SDValue &Omod) const {
2612   SDLoc DL(In);
2613   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2614   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2615 
2616   return SelectVOP3Mods(In, Src, SrcMods);
2617 }
2618 
2619 bool AMDGPUDAGToDAGISel::SelectVOP3BMods0(SDValue In, SDValue &Src,
2620                                           SDValue &SrcMods, SDValue &Clamp,
2621                                           SDValue &Omod) const {
2622   SDLoc DL(In);
2623   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2624   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2625 
2626   return SelectVOP3BMods(In, Src, SrcMods);
2627 }
2628 
2629 bool AMDGPUDAGToDAGISel::SelectVOP3OMods(SDValue In, SDValue &Src,
2630                                          SDValue &Clamp, SDValue &Omod) const {
2631   Src = In;
2632 
2633   SDLoc DL(In);
2634   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2635   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2636 
2637   return true;
2638 }
2639 
2640 bool AMDGPUDAGToDAGISel::SelectVOP3PMods(SDValue In, SDValue &Src,
2641                                          SDValue &SrcMods, bool IsDOT) const {
2642   unsigned Mods = 0;
2643   Src = In;
2644 
2645   if (Src.getOpcode() == ISD::FNEG) {
2646     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
2647     Src = Src.getOperand(0);
2648   }
2649 
2650   if (Src.getOpcode() == ISD::BUILD_VECTOR &&
2651       (!IsDOT || !Subtarget->hasDOTOpSelHazard())) {
2652     unsigned VecMods = Mods;
2653 
2654     SDValue Lo = stripBitcast(Src.getOperand(0));
2655     SDValue Hi = stripBitcast(Src.getOperand(1));
2656 
2657     if (Lo.getOpcode() == ISD::FNEG) {
2658       Lo = stripBitcast(Lo.getOperand(0));
2659       Mods ^= SISrcMods::NEG;
2660     }
2661 
2662     if (Hi.getOpcode() == ISD::FNEG) {
2663       Hi = stripBitcast(Hi.getOperand(0));
2664       Mods ^= SISrcMods::NEG_HI;
2665     }
2666 
2667     if (isExtractHiElt(Lo, Lo))
2668       Mods |= SISrcMods::OP_SEL_0;
2669 
2670     if (isExtractHiElt(Hi, Hi))
2671       Mods |= SISrcMods::OP_SEL_1;
2672 
2673     unsigned VecSize = Src.getValueSizeInBits();
2674     Lo = stripExtractLoElt(Lo);
2675     Hi = stripExtractLoElt(Hi);
2676 
2677     if (Lo.getValueSizeInBits() > VecSize) {
2678       Lo = CurDAG->getTargetExtractSubreg(
2679         (VecSize > 32) ? AMDGPU::sub0_sub1 : AMDGPU::sub0, SDLoc(In),
2680         MVT::getIntegerVT(VecSize), Lo);
2681     }
2682 
2683     if (Hi.getValueSizeInBits() > VecSize) {
2684       Hi = CurDAG->getTargetExtractSubreg(
2685         (VecSize > 32) ? AMDGPU::sub0_sub1 : AMDGPU::sub0, SDLoc(In),
2686         MVT::getIntegerVT(VecSize), Hi);
2687     }
2688 
2689     assert(Lo.getValueSizeInBits() <= VecSize &&
2690            Hi.getValueSizeInBits() <= VecSize);
2691 
2692     if (Lo == Hi && !isInlineImmediate(Lo.getNode())) {
2693       // Really a scalar input. Just select from the low half of the register to
2694       // avoid packing.
2695 
2696       if (VecSize == 32 || VecSize == Lo.getValueSizeInBits()) {
2697         Src = Lo;
2698       } else {
2699         assert(Lo.getValueSizeInBits() == 32 && VecSize == 64);
2700 
2701         SDLoc SL(In);
2702         SDValue Undef = SDValue(
2703           CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SL,
2704                                  Lo.getValueType()), 0);
2705         auto RC = Lo->isDivergent() ? AMDGPU::VReg_64RegClassID
2706                                     : AMDGPU::SReg_64RegClassID;
2707         const SDValue Ops[] = {
2708           CurDAG->getTargetConstant(RC, SL, MVT::i32),
2709           Lo, CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32),
2710           Undef, CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32) };
2711 
2712         Src = SDValue(CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SL,
2713                                              Src.getValueType(), Ops), 0);
2714       }
2715       SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2716       return true;
2717     }
2718 
2719     if (VecSize == 64 && Lo == Hi && isa<ConstantFPSDNode>(Lo)) {
2720       uint64_t Lit = cast<ConstantFPSDNode>(Lo)->getValueAPF()
2721                       .bitcastToAPInt().getZExtValue();
2722       if (AMDGPU::isInlinableLiteral32(Lit, Subtarget->hasInv2PiInlineImm())) {
2723         Src = CurDAG->getTargetConstant(Lit, SDLoc(In), MVT::i64);;
2724         SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2725         return true;
2726       }
2727     }
2728 
2729     Mods = VecMods;
2730   }
2731 
2732   // Packed instructions do not have abs modifiers.
2733   Mods |= SISrcMods::OP_SEL_1;
2734 
2735   SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2736   return true;
2737 }
2738 
2739 bool AMDGPUDAGToDAGISel::SelectVOP3PModsDOT(SDValue In, SDValue &Src,
2740                                             SDValue &SrcMods) const {
2741   return SelectVOP3PMods(In, Src, SrcMods, true);
2742 }
2743 
2744 bool AMDGPUDAGToDAGISel::SelectVOP3OpSel(SDValue In, SDValue &Src,
2745                                          SDValue &SrcMods) const {
2746   Src = In;
2747   // FIXME: Handle op_sel
2748   SrcMods = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
2749   return true;
2750 }
2751 
2752 bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods(SDValue In, SDValue &Src,
2753                                              SDValue &SrcMods) const {
2754   // FIXME: Handle op_sel
2755   return SelectVOP3Mods(In, Src, SrcMods);
2756 }
2757 
2758 // The return value is not whether the match is possible (which it always is),
2759 // but whether or not it a conversion is really used.
2760 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src,
2761                                                    unsigned &Mods) const {
2762   Mods = 0;
2763   SelectVOP3ModsImpl(In, Src, Mods);
2764 
2765   if (Src.getOpcode() == ISD::FP_EXTEND) {
2766     Src = Src.getOperand(0);
2767     assert(Src.getValueType() == MVT::f16);
2768     Src = stripBitcast(Src);
2769 
2770     // Be careful about folding modifiers if we already have an abs. fneg is
2771     // applied last, so we don't want to apply an earlier fneg.
2772     if ((Mods & SISrcMods::ABS) == 0) {
2773       unsigned ModsTmp;
2774       SelectVOP3ModsImpl(Src, Src, ModsTmp);
2775 
2776       if ((ModsTmp & SISrcMods::NEG) != 0)
2777         Mods ^= SISrcMods::NEG;
2778 
2779       if ((ModsTmp & SISrcMods::ABS) != 0)
2780         Mods |= SISrcMods::ABS;
2781     }
2782 
2783     // op_sel/op_sel_hi decide the source type and source.
2784     // If the source's op_sel_hi is set, it indicates to do a conversion from fp16.
2785     // If the sources's op_sel is set, it picks the high half of the source
2786     // register.
2787 
2788     Mods |= SISrcMods::OP_SEL_1;
2789     if (isExtractHiElt(Src, Src)) {
2790       Mods |= SISrcMods::OP_SEL_0;
2791 
2792       // TODO: Should we try to look for neg/abs here?
2793     }
2794 
2795     return true;
2796   }
2797 
2798   return false;
2799 }
2800 
2801 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixMods(SDValue In, SDValue &Src,
2802                                                SDValue &SrcMods) const {
2803   unsigned Mods = 0;
2804   SelectVOP3PMadMixModsImpl(In, Src, Mods);
2805   SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2806   return true;
2807 }
2808 
2809 SDValue AMDGPUDAGToDAGISel::getHi16Elt(SDValue In) const {
2810   if (In.isUndef())
2811     return CurDAG->getUNDEF(MVT::i32);
2812 
2813   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(In)) {
2814     SDLoc SL(In);
2815     return CurDAG->getConstant(C->getZExtValue() << 16, SL, MVT::i32);
2816   }
2817 
2818   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(In)) {
2819     SDLoc SL(In);
2820     return CurDAG->getConstant(
2821       C->getValueAPF().bitcastToAPInt().getZExtValue() << 16, SL, MVT::i32);
2822   }
2823 
2824   SDValue Src;
2825   if (isExtractHiElt(In, Src))
2826     return Src;
2827 
2828   return SDValue();
2829 }
2830 
2831 bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const {
2832   assert(CurDAG->getTarget().getTargetTriple().getArch() == Triple::amdgcn);
2833 
2834   const SIRegisterInfo *SIRI =
2835     static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
2836   const SIInstrInfo * SII =
2837     static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
2838 
2839   unsigned Limit = 0;
2840   bool AllUsesAcceptSReg = true;
2841   for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
2842     Limit < 10 && U != E; ++U, ++Limit) {
2843     const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
2844 
2845     // If the register class is unknown, it could be an unknown
2846     // register class that needs to be an SGPR, e.g. an inline asm
2847     // constraint
2848     if (!RC || SIRI->isSGPRClass(RC))
2849       return false;
2850 
2851     if (RC != &AMDGPU::VS_32RegClass) {
2852       AllUsesAcceptSReg = false;
2853       SDNode * User = *U;
2854       if (User->isMachineOpcode()) {
2855         unsigned Opc = User->getMachineOpcode();
2856         MCInstrDesc Desc = SII->get(Opc);
2857         if (Desc.isCommutable()) {
2858           unsigned OpIdx = Desc.getNumDefs() + U.getOperandNo();
2859           unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
2860           if (SII->findCommutedOpIndices(Desc, OpIdx, CommuteIdx1)) {
2861             unsigned CommutedOpNo = CommuteIdx1 - Desc.getNumDefs();
2862             const TargetRegisterClass *CommutedRC = getOperandRegClass(*U, CommutedOpNo);
2863             if (CommutedRC == &AMDGPU::VS_32RegClass)
2864               AllUsesAcceptSReg = true;
2865           }
2866         }
2867       }
2868       // If "AllUsesAcceptSReg == false" so far we haven't succeeded
2869       // commuting current user. This means have at least one use
2870       // that strictly require VGPR. Thus, we will not attempt to commute
2871       // other user instructions.
2872       if (!AllUsesAcceptSReg)
2873         break;
2874     }
2875   }
2876   return !AllUsesAcceptSReg && (Limit < 10);
2877 }
2878 
2879 bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode * N) const {
2880   auto Ld = cast<LoadSDNode>(N);
2881 
2882   return Ld->getAlign() >= Align(4) &&
2883          (((Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
2884             Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
2885            !N->isDivergent()) ||
2886           (Subtarget->getScalarizeGlobalBehavior() &&
2887            Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
2888            Ld->isSimple() && !N->isDivergent() &&
2889            static_cast<const SITargetLowering *>(getTargetLowering())
2890                ->isMemOpHasNoClobberedMemOperand(N)));
2891 }
2892 
2893 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
2894   const AMDGPUTargetLowering& Lowering =
2895     *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
2896   bool IsModified = false;
2897   do {
2898     IsModified = false;
2899 
2900     // Go over all selected nodes and try to fold them a bit more
2901     SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_begin();
2902     while (Position != CurDAG->allnodes_end()) {
2903       SDNode *Node = &*Position++;
2904       MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(Node);
2905       if (!MachineNode)
2906         continue;
2907 
2908       SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
2909       if (ResNode != Node) {
2910         if (ResNode)
2911           ReplaceUses(Node, ResNode);
2912         IsModified = true;
2913       }
2914     }
2915     CurDAG->RemoveDeadNodes();
2916   } while (IsModified);
2917 }
2918