1 //===-- AMDGPUISelDAGToDAG.cpp - A dag to dag inst selector for AMDGPU ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //==-----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Defines an instruction selector for the AMDGPU target.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUISelDAGToDAG.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUTargetMachine.h"
17 #include "MCTargetDesc/R600MCTargetDesc.h"
18 #include "R600RegisterInfo.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/FunctionLoweringInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/CodeGen/SelectionDAGNodes.h"
26 #include "llvm/IR/IntrinsicsAMDGPU.h"
27 #include "llvm/InitializePasses.h"
28 
29 #ifdef EXPENSIVE_CHECKS
30 #include "llvm/Analysis/LoopInfo.h"
31 #include "llvm/IR/Dominators.h"
32 #endif
33 
34 #define DEBUG_TYPE "isel"
35 
36 using namespace llvm;
37 
38 //===----------------------------------------------------------------------===//
39 // Instruction Selector Implementation
40 //===----------------------------------------------------------------------===//
41 
42 namespace {
43 
44 static SDValue stripBitcast(SDValue Val) {
45   return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val;
46 }
47 
48 // Figure out if this is really an extract of the high 16-bits of a dword.
49 static bool isExtractHiElt(SDValue In, SDValue &Out) {
50   In = stripBitcast(In);
51 
52   if (In.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
53     if (ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(In.getOperand(1))) {
54       if (!Idx->isOne())
55         return false;
56       Out = In.getOperand(0);
57       return true;
58     }
59   }
60 
61   if (In.getOpcode() != ISD::TRUNCATE)
62     return false;
63 
64   SDValue Srl = In.getOperand(0);
65   if (Srl.getOpcode() == ISD::SRL) {
66     if (ConstantSDNode *ShiftAmt = dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
67       if (ShiftAmt->getZExtValue() == 16) {
68         Out = stripBitcast(Srl.getOperand(0));
69         return true;
70       }
71     }
72   }
73 
74   return false;
75 }
76 
77 // Look through operations that obscure just looking at the low 16-bits of the
78 // same register.
79 static SDValue stripExtractLoElt(SDValue In) {
80   if (In.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
81     if (ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(In.getOperand(1))) {
82       if (Idx->isZero() && In.getValueSizeInBits() <= 32)
83         return In.getOperand(0);
84     }
85   }
86 
87   if (In.getOpcode() == ISD::TRUNCATE) {
88     SDValue Src = In.getOperand(0);
89     if (Src.getValueType().getSizeInBits() == 32)
90       return stripBitcast(Src);
91   }
92 
93   return In;
94 }
95 
96 }  // end anonymous namespace
97 
98 INITIALIZE_PASS_BEGIN(AMDGPUDAGToDAGISel, "amdgpu-isel",
99                       "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
100 INITIALIZE_PASS_DEPENDENCY(AMDGPUArgumentUsageInfo)
101 INITIALIZE_PASS_DEPENDENCY(AMDGPUPerfHintAnalysis)
102 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
103 #ifdef EXPENSIVE_CHECKS
104 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
105 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
106 #endif
107 INITIALIZE_PASS_END(AMDGPUDAGToDAGISel, "amdgpu-isel",
108                     "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
109 
110 /// This pass converts a legalized DAG into a AMDGPU-specific
111 // DAG, ready for instruction scheduling.
112 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine *TM,
113                                         CodeGenOpt::Level OptLevel) {
114   return new AMDGPUDAGToDAGISel(TM, OptLevel);
115 }
116 
117 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(
118     TargetMachine *TM /*= nullptr*/,
119     CodeGenOpt::Level OptLevel /*= CodeGenOpt::Default*/)
120     : SelectionDAGISel(*TM, OptLevel) {
121   EnableLateStructurizeCFG = AMDGPUTargetMachine::EnableLateStructurizeCFG;
122 }
123 
124 bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
125 #ifdef EXPENSIVE_CHECKS
126   DominatorTree & DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
127   LoopInfo * LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
128   for (auto &L : LI->getLoopsInPreorder()) {
129     assert(L->isLCSSAForm(DT));
130   }
131 #endif
132   Subtarget = &MF.getSubtarget<GCNSubtarget>();
133   Mode = AMDGPU::SIModeRegisterDefaults(MF.getFunction());
134   return SelectionDAGISel::runOnMachineFunction(MF);
135 }
136 
137 bool AMDGPUDAGToDAGISel::fp16SrcZerosHighBits(unsigned Opc) const {
138   // XXX - only need to list legal operations.
139   switch (Opc) {
140   case ISD::FADD:
141   case ISD::FSUB:
142   case ISD::FMUL:
143   case ISD::FDIV:
144   case ISD::FREM:
145   case ISD::FCANONICALIZE:
146   case ISD::UINT_TO_FP:
147   case ISD::SINT_TO_FP:
148   case ISD::FABS:
149     // Fabs is lowered to a bit operation, but it's an and which will clear the
150     // high bits anyway.
151   case ISD::FSQRT:
152   case ISD::FSIN:
153   case ISD::FCOS:
154   case ISD::FPOWI:
155   case ISD::FPOW:
156   case ISD::FLOG:
157   case ISD::FLOG2:
158   case ISD::FLOG10:
159   case ISD::FEXP:
160   case ISD::FEXP2:
161   case ISD::FCEIL:
162   case ISD::FTRUNC:
163   case ISD::FRINT:
164   case ISD::FNEARBYINT:
165   case ISD::FROUND:
166   case ISD::FFLOOR:
167   case ISD::FMINNUM:
168   case ISD::FMAXNUM:
169   case AMDGPUISD::FRACT:
170   case AMDGPUISD::CLAMP:
171   case AMDGPUISD::COS_HW:
172   case AMDGPUISD::SIN_HW:
173   case AMDGPUISD::FMIN3:
174   case AMDGPUISD::FMAX3:
175   case AMDGPUISD::FMED3:
176   case AMDGPUISD::FMAD_FTZ:
177   case AMDGPUISD::RCP:
178   case AMDGPUISD::RSQ:
179   case AMDGPUISD::RCP_IFLAG:
180   case AMDGPUISD::LDEXP:
181     // On gfx10, all 16-bit instructions preserve the high bits.
182     return Subtarget->getGeneration() <= AMDGPUSubtarget::GFX9;
183   case ISD::FP_ROUND:
184     // We may select fptrunc (fma/mad) to mad_mixlo, which does not zero the
185     // high bits on gfx9.
186     // TODO: If we had the source node we could see if the source was fma/mad
187     return Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS;
188   case ISD::FMA:
189   case ISD::FMAD:
190   case AMDGPUISD::DIV_FIXUP:
191     return Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS;
192   default:
193     // fcopysign, select and others may be lowered to 32-bit bit operations
194     // which don't zero the high bits.
195     return false;
196   }
197 }
198 
199 void AMDGPUDAGToDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
200   AU.addRequired<AMDGPUArgumentUsageInfo>();
201   AU.addRequired<LegacyDivergenceAnalysis>();
202 #ifdef EXPENSIVE_CHECKS
203   AU.addRequired<DominatorTreeWrapperPass>();
204   AU.addRequired<LoopInfoWrapperPass>();
205 #endif
206   SelectionDAGISel::getAnalysisUsage(AU);
207 }
208 
209 bool AMDGPUDAGToDAGISel::matchLoadD16FromBuildVector(SDNode *N) const {
210   assert(Subtarget->d16PreservesUnusedBits());
211   MVT VT = N->getValueType(0).getSimpleVT();
212   if (VT != MVT::v2i16 && VT != MVT::v2f16)
213     return false;
214 
215   SDValue Lo = N->getOperand(0);
216   SDValue Hi = N->getOperand(1);
217 
218   LoadSDNode *LdHi = dyn_cast<LoadSDNode>(stripBitcast(Hi));
219 
220   // build_vector lo, (load ptr) -> load_d16_hi ptr, lo
221   // build_vector lo, (zextload ptr from i8) -> load_d16_hi_u8 ptr, lo
222   // build_vector lo, (sextload ptr from i8) -> load_d16_hi_i8 ptr, lo
223 
224   // Need to check for possible indirect dependencies on the other half of the
225   // vector to avoid introducing a cycle.
226   if (LdHi && Hi.hasOneUse() && !LdHi->isPredecessorOf(Lo.getNode())) {
227     SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
228 
229     SDValue TiedIn = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Lo);
230     SDValue Ops[] = {
231       LdHi->getChain(), LdHi->getBasePtr(), TiedIn
232     };
233 
234     unsigned LoadOp = AMDGPUISD::LOAD_D16_HI;
235     if (LdHi->getMemoryVT() == MVT::i8) {
236       LoadOp = LdHi->getExtensionType() == ISD::SEXTLOAD ?
237         AMDGPUISD::LOAD_D16_HI_I8 : AMDGPUISD::LOAD_D16_HI_U8;
238     } else {
239       assert(LdHi->getMemoryVT() == MVT::i16);
240     }
241 
242     SDValue NewLoadHi =
243       CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdHi), VTList,
244                                   Ops, LdHi->getMemoryVT(),
245                                   LdHi->getMemOperand());
246 
247     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadHi);
248     CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdHi, 1), NewLoadHi.getValue(1));
249     return true;
250   }
251 
252   // build_vector (load ptr), hi -> load_d16_lo ptr, hi
253   // build_vector (zextload ptr from i8), hi -> load_d16_lo_u8 ptr, hi
254   // build_vector (sextload ptr from i8), hi -> load_d16_lo_i8 ptr, hi
255   LoadSDNode *LdLo = dyn_cast<LoadSDNode>(stripBitcast(Lo));
256   if (LdLo && Lo.hasOneUse()) {
257     SDValue TiedIn = getHi16Elt(Hi);
258     if (!TiedIn || LdLo->isPredecessorOf(TiedIn.getNode()))
259       return false;
260 
261     SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
262     unsigned LoadOp = AMDGPUISD::LOAD_D16_LO;
263     if (LdLo->getMemoryVT() == MVT::i8) {
264       LoadOp = LdLo->getExtensionType() == ISD::SEXTLOAD ?
265         AMDGPUISD::LOAD_D16_LO_I8 : AMDGPUISD::LOAD_D16_LO_U8;
266     } else {
267       assert(LdLo->getMemoryVT() == MVT::i16);
268     }
269 
270     TiedIn = CurDAG->getNode(ISD::BITCAST, SDLoc(N), VT, TiedIn);
271 
272     SDValue Ops[] = {
273       LdLo->getChain(), LdLo->getBasePtr(), TiedIn
274     };
275 
276     SDValue NewLoadLo =
277       CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdLo), VTList,
278                                   Ops, LdLo->getMemoryVT(),
279                                   LdLo->getMemOperand());
280 
281     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadLo);
282     CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdLo, 1), NewLoadLo.getValue(1));
283     return true;
284   }
285 
286   return false;
287 }
288 
289 void AMDGPUDAGToDAGISel::PreprocessISelDAG() {
290   if (!Subtarget->d16PreservesUnusedBits())
291     return;
292 
293   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
294 
295   bool MadeChange = false;
296   while (Position != CurDAG->allnodes_begin()) {
297     SDNode *N = &*--Position;
298     if (N->use_empty())
299       continue;
300 
301     switch (N->getOpcode()) {
302     case ISD::BUILD_VECTOR:
303       MadeChange |= matchLoadD16FromBuildVector(N);
304       break;
305     default:
306       break;
307     }
308   }
309 
310   if (MadeChange) {
311     CurDAG->RemoveDeadNodes();
312     LLVM_DEBUG(dbgs() << "After PreProcess:\n";
313                CurDAG->dump(););
314   }
315 }
316 
317 bool AMDGPUDAGToDAGISel::isNoNanSrc(SDValue N) const {
318   if (TM.Options.NoNaNsFPMath)
319     return true;
320 
321   // TODO: Move into isKnownNeverNaN
322   if (N->getFlags().hasNoNaNs())
323     return true;
324 
325   return CurDAG->isKnownNeverNaN(N);
326 }
327 
328 bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N,
329                                            bool Negated) const {
330   if (N->isUndef())
331     return true;
332 
333   const SIInstrInfo *TII = Subtarget->getInstrInfo();
334   if (Negated) {
335     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
336       return TII->isInlineConstant(-C->getAPIntValue());
337 
338     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
339       return TII->isInlineConstant(-C->getValueAPF().bitcastToAPInt());
340 
341   } else {
342     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
343       return TII->isInlineConstant(C->getAPIntValue());
344 
345     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
346       return TII->isInlineConstant(C->getValueAPF().bitcastToAPInt());
347   }
348 
349   return false;
350 }
351 
352 /// Determine the register class for \p OpNo
353 /// \returns The register class of the virtual register that will be used for
354 /// the given operand number \OpNo or NULL if the register class cannot be
355 /// determined.
356 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
357                                                           unsigned OpNo) const {
358   if (!N->isMachineOpcode()) {
359     if (N->getOpcode() == ISD::CopyToReg) {
360       Register Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
361       if (Reg.isVirtual()) {
362         MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo();
363         return MRI.getRegClass(Reg);
364       }
365 
366       const SIRegisterInfo *TRI
367         = static_cast<const GCNSubtarget *>(Subtarget)->getRegisterInfo();
368       return TRI->getPhysRegClass(Reg);
369     }
370 
371     return nullptr;
372   }
373 
374   switch (N->getMachineOpcode()) {
375   default: {
376     const MCInstrDesc &Desc =
377         Subtarget->getInstrInfo()->get(N->getMachineOpcode());
378     unsigned OpIdx = Desc.getNumDefs() + OpNo;
379     if (OpIdx >= Desc.getNumOperands())
380       return nullptr;
381     int RegClass = Desc.OpInfo[OpIdx].RegClass;
382     if (RegClass == -1)
383       return nullptr;
384 
385     return Subtarget->getRegisterInfo()->getRegClass(RegClass);
386   }
387   case AMDGPU::REG_SEQUENCE: {
388     unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
389     const TargetRegisterClass *SuperRC =
390         Subtarget->getRegisterInfo()->getRegClass(RCID);
391 
392     SDValue SubRegOp = N->getOperand(OpNo + 1);
393     unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
394     return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
395                                                               SubRegIdx);
396   }
397   }
398 }
399 
400 SDNode *AMDGPUDAGToDAGISel::glueCopyToOp(SDNode *N, SDValue NewChain,
401                                          SDValue Glue) const {
402   SmallVector <SDValue, 8> Ops;
403   Ops.push_back(NewChain); // Replace the chain.
404   for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
405     Ops.push_back(N->getOperand(i));
406 
407   Ops.push_back(Glue);
408   return CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops);
409 }
410 
411 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N, SDValue Val) const {
412   const SITargetLowering& Lowering =
413     *static_cast<const SITargetLowering*>(getTargetLowering());
414 
415   assert(N->getOperand(0).getValueType() == MVT::Other && "Expected chain");
416 
417   SDValue M0 = Lowering.copyToM0(*CurDAG, N->getOperand(0), SDLoc(N), Val);
418   return glueCopyToOp(N, M0, M0.getValue(1));
419 }
420 
421 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0LDSInit(SDNode *N) const {
422   unsigned AS = cast<MemSDNode>(N)->getAddressSpace();
423   if (AS == AMDGPUAS::LOCAL_ADDRESS) {
424     if (Subtarget->ldsRequiresM0Init())
425       return glueCopyToM0(N, CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32));
426   } else if (AS == AMDGPUAS::REGION_ADDRESS) {
427     MachineFunction &MF = CurDAG->getMachineFunction();
428     unsigned Value = MF.getInfo<SIMachineFunctionInfo>()->getGDSSize();
429     return
430         glueCopyToM0(N, CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i32));
431   }
432   return N;
433 }
434 
435 MachineSDNode *AMDGPUDAGToDAGISel::buildSMovImm64(SDLoc &DL, uint64_t Imm,
436                                                   EVT VT) const {
437   SDNode *Lo = CurDAG->getMachineNode(
438       AMDGPU::S_MOV_B32, DL, MVT::i32,
439       CurDAG->getTargetConstant(Imm & 0xFFFFFFFF, DL, MVT::i32));
440   SDNode *Hi =
441       CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
442                              CurDAG->getTargetConstant(Imm >> 32, DL, MVT::i32));
443   const SDValue Ops[] = {
444       CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
445       SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
446       SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)};
447 
448   return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, VT, Ops);
449 }
450 
451 void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) {
452   EVT VT = N->getValueType(0);
453   unsigned NumVectorElts = VT.getVectorNumElements();
454   EVT EltVT = VT.getVectorElementType();
455   SDLoc DL(N);
456   SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
457 
458   if (NumVectorElts == 1) {
459     CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0),
460                          RegClass);
461     return;
462   }
463 
464   assert(NumVectorElts <= 32 && "Vectors with more than 32 elements not "
465                                   "supported yet");
466   // 32 = Max Num Vector Elements
467   // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
468   // 1 = Vector Register Class
469   SmallVector<SDValue, 32 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
470 
471   bool IsGCN = CurDAG->getSubtarget().getTargetTriple().getArch() ==
472                Triple::amdgcn;
473   RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
474   bool IsRegSeq = true;
475   unsigned NOps = N->getNumOperands();
476   for (unsigned i = 0; i < NOps; i++) {
477     // XXX: Why is this here?
478     if (isa<RegisterSDNode>(N->getOperand(i))) {
479       IsRegSeq = false;
480       break;
481     }
482     unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i)
483                          : R600RegisterInfo::getSubRegFromChannel(i);
484     RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
485     RegSeqArgs[1 + (2 * i) + 1] = CurDAG->getTargetConstant(Sub, DL, MVT::i32);
486   }
487   if (NOps != NumVectorElts) {
488     // Fill in the missing undef elements if this was a scalar_to_vector.
489     assert(N->getOpcode() == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
490     MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
491                                                    DL, EltVT);
492     for (unsigned i = NOps; i < NumVectorElts; ++i) {
493       unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i)
494                            : R600RegisterInfo::getSubRegFromChannel(i);
495       RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
496       RegSeqArgs[1 + (2 * i) + 1] =
497           CurDAG->getTargetConstant(Sub, DL, MVT::i32);
498     }
499   }
500 
501   if (!IsRegSeq)
502     SelectCode(N);
503   CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs);
504 }
505 
506 void AMDGPUDAGToDAGISel::Select(SDNode *N) {
507   unsigned int Opc = N->getOpcode();
508   if (N->isMachineOpcode()) {
509     N->setNodeId(-1);
510     return;   // Already selected.
511   }
512 
513   // isa<MemSDNode> almost works but is slightly too permissive for some DS
514   // intrinsics.
515   if (Opc == ISD::LOAD || Opc == ISD::STORE || isa<AtomicSDNode>(N) ||
516       (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC ||
517        Opc == ISD::ATOMIC_LOAD_FADD ||
518        Opc == AMDGPUISD::ATOMIC_LOAD_FMIN ||
519        Opc == AMDGPUISD::ATOMIC_LOAD_FMAX)) {
520     N = glueCopyToM0LDSInit(N);
521     SelectCode(N);
522     return;
523   }
524 
525   switch (Opc) {
526   default:
527     break;
528   // We are selecting i64 ADD here instead of custom lower it during
529   // DAG legalization, so we can fold some i64 ADDs used for address
530   // calculation into the LOAD and STORE instructions.
531   case ISD::ADDC:
532   case ISD::ADDE:
533   case ISD::SUBC:
534   case ISD::SUBE: {
535     if (N->getValueType(0) != MVT::i64)
536       break;
537 
538     SelectADD_SUB_I64(N);
539     return;
540   }
541   case ISD::ADDCARRY:
542   case ISD::SUBCARRY:
543     if (N->getValueType(0) != MVT::i32)
544       break;
545 
546     SelectAddcSubb(N);
547     return;
548   case ISD::UADDO:
549   case ISD::USUBO: {
550     SelectUADDO_USUBO(N);
551     return;
552   }
553   case AMDGPUISD::FMUL_W_CHAIN: {
554     SelectFMUL_W_CHAIN(N);
555     return;
556   }
557   case AMDGPUISD::FMA_W_CHAIN: {
558     SelectFMA_W_CHAIN(N);
559     return;
560   }
561 
562   case ISD::SCALAR_TO_VECTOR:
563   case ISD::BUILD_VECTOR: {
564     EVT VT = N->getValueType(0);
565     unsigned NumVectorElts = VT.getVectorNumElements();
566     if (VT.getScalarSizeInBits() == 16) {
567       if (Opc == ISD::BUILD_VECTOR && NumVectorElts == 2) {
568         if (SDNode *Packed = packConstantV2I16(N, *CurDAG)) {
569           ReplaceNode(N, Packed);
570           return;
571         }
572       }
573 
574       break;
575     }
576 
577     assert(VT.getVectorElementType().bitsEq(MVT::i32));
578     unsigned RegClassID =
579         SIRegisterInfo::getSGPRClassForBitWidth(NumVectorElts * 32)->getID();
580     SelectBuildVector(N, RegClassID);
581     return;
582   }
583   case ISD::BUILD_PAIR: {
584     SDValue RC, SubReg0, SubReg1;
585     SDLoc DL(N);
586     if (N->getValueType(0) == MVT::i128) {
587       RC = CurDAG->getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32);
588       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32);
589       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32);
590     } else if (N->getValueType(0) == MVT::i64) {
591       RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32);
592       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
593       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
594     } else {
595       llvm_unreachable("Unhandled value type for BUILD_PAIR");
596     }
597     const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
598                             N->getOperand(1), SubReg1 };
599     ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
600                                           N->getValueType(0), Ops));
601     return;
602   }
603 
604   case ISD::Constant:
605   case ISD::ConstantFP: {
606     if (N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
607       break;
608 
609     uint64_t Imm;
610     if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
611       Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
612     else {
613       ConstantSDNode *C = cast<ConstantSDNode>(N);
614       Imm = C->getZExtValue();
615     }
616 
617     SDLoc DL(N);
618     ReplaceNode(N, buildSMovImm64(DL, Imm, N->getValueType(0)));
619     return;
620   }
621   case AMDGPUISD::BFE_I32:
622   case AMDGPUISD::BFE_U32: {
623     // There is a scalar version available, but unlike the vector version which
624     // has a separate operand for the offset and width, the scalar version packs
625     // the width and offset into a single operand. Try to move to the scalar
626     // version if the offsets are constant, so that we can try to keep extended
627     // loads of kernel arguments in SGPRs.
628 
629     // TODO: Technically we could try to pattern match scalar bitshifts of
630     // dynamic values, but it's probably not useful.
631     ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
632     if (!Offset)
633       break;
634 
635     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
636     if (!Width)
637       break;
638 
639     bool Signed = Opc == AMDGPUISD::BFE_I32;
640 
641     uint32_t OffsetVal = Offset->getZExtValue();
642     uint32_t WidthVal = Width->getZExtValue();
643 
644     ReplaceNode(N, getBFE32(Signed, SDLoc(N), N->getOperand(0), OffsetVal,
645                             WidthVal));
646     return;
647   }
648   case AMDGPUISD::DIV_SCALE: {
649     SelectDIV_SCALE(N);
650     return;
651   }
652   case AMDGPUISD::MAD_I64_I32:
653   case AMDGPUISD::MAD_U64_U32: {
654     SelectMAD_64_32(N);
655     return;
656   }
657   case ISD::SMUL_LOHI:
658   case ISD::UMUL_LOHI:
659     return SelectMUL_LOHI(N);
660   case ISD::CopyToReg: {
661     const SITargetLowering& Lowering =
662       *static_cast<const SITargetLowering*>(getTargetLowering());
663     N = Lowering.legalizeTargetIndependentNode(N, *CurDAG);
664     break;
665   }
666   case ISD::AND:
667   case ISD::SRL:
668   case ISD::SRA:
669   case ISD::SIGN_EXTEND_INREG:
670     if (N->getValueType(0) != MVT::i32)
671       break;
672 
673     SelectS_BFE(N);
674     return;
675   case ISD::BRCOND:
676     SelectBRCOND(N);
677     return;
678   case ISD::FMAD:
679   case ISD::FMA:
680     SelectFMAD_FMA(N);
681     return;
682   case AMDGPUISD::CVT_PKRTZ_F16_F32:
683   case AMDGPUISD::CVT_PKNORM_I16_F32:
684   case AMDGPUISD::CVT_PKNORM_U16_F32:
685   case AMDGPUISD::CVT_PK_U16_U32:
686   case AMDGPUISD::CVT_PK_I16_I32: {
687     // Hack around using a legal type if f16 is illegal.
688     if (N->getValueType(0) == MVT::i32) {
689       MVT NewVT = Opc == AMDGPUISD::CVT_PKRTZ_F16_F32 ? MVT::v2f16 : MVT::v2i16;
690       N = CurDAG->MorphNodeTo(N, N->getOpcode(), CurDAG->getVTList(NewVT),
691                               { N->getOperand(0), N->getOperand(1) });
692       SelectCode(N);
693       return;
694     }
695 
696     break;
697   }
698   case ISD::INTRINSIC_W_CHAIN: {
699     SelectINTRINSIC_W_CHAIN(N);
700     return;
701   }
702   case ISD::INTRINSIC_WO_CHAIN: {
703     SelectINTRINSIC_WO_CHAIN(N);
704     return;
705   }
706   case ISD::INTRINSIC_VOID: {
707     SelectINTRINSIC_VOID(N);
708     return;
709   }
710   }
711 
712   SelectCode(N);
713 }
714 
715 bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const {
716   const BasicBlock *BB = FuncInfo->MBB->getBasicBlock();
717   const Instruction *Term = BB->getTerminator();
718   return Term->getMetadata("amdgpu.uniform") ||
719          Term->getMetadata("structurizecfg.uniform");
720 }
721 
722 bool AMDGPUDAGToDAGISel::isUnneededShiftMask(const SDNode *N,
723                                              unsigned ShAmtBits) const {
724   assert(N->getOpcode() == ISD::AND);
725 
726   const APInt &RHS = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
727   if (RHS.countTrailingOnes() >= ShAmtBits)
728     return true;
729 
730   const APInt &LHSKnownZeros = CurDAG->computeKnownBits(N->getOperand(0)).Zero;
731   return (LHSKnownZeros | RHS).countTrailingOnes() >= ShAmtBits;
732 }
733 
734 static bool getBaseWithOffsetUsingSplitOR(SelectionDAG &DAG, SDValue Addr,
735                                           SDValue &N0, SDValue &N1) {
736   if (Addr.getValueType() == MVT::i64 && Addr.getOpcode() == ISD::BITCAST &&
737       Addr.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
738     // As we split 64-bit `or` earlier, it's complicated pattern to match, i.e.
739     // (i64 (bitcast (v2i32 (build_vector
740     //                        (or (extract_vector_elt V, 0), OFFSET),
741     //                        (extract_vector_elt V, 1)))))
742     SDValue Lo = Addr.getOperand(0).getOperand(0);
743     if (Lo.getOpcode() == ISD::OR && DAG.isBaseWithConstantOffset(Lo)) {
744       SDValue BaseLo = Lo.getOperand(0);
745       SDValue BaseHi = Addr.getOperand(0).getOperand(1);
746       // Check that split base (Lo and Hi) are extracted from the same one.
747       if (BaseLo.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
748           BaseHi.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
749           BaseLo.getOperand(0) == BaseHi.getOperand(0) &&
750           // Lo is statically extracted from index 0.
751           isa<ConstantSDNode>(BaseLo.getOperand(1)) &&
752           BaseLo.getConstantOperandVal(1) == 0 &&
753           // Hi is statically extracted from index 0.
754           isa<ConstantSDNode>(BaseHi.getOperand(1)) &&
755           BaseHi.getConstantOperandVal(1) == 1) {
756         N0 = BaseLo.getOperand(0).getOperand(0);
757         N1 = Lo.getOperand(1);
758         return true;
759       }
760     }
761   }
762   return false;
763 }
764 
765 bool AMDGPUDAGToDAGISel::isBaseWithConstantOffset64(SDValue Addr, SDValue &LHS,
766                                                     SDValue &RHS) const {
767   if (CurDAG->isBaseWithConstantOffset(Addr)) {
768     LHS = Addr.getOperand(0);
769     RHS = Addr.getOperand(1);
770     return true;
771   }
772 
773   if (getBaseWithOffsetUsingSplitOR(*CurDAG, Addr, LHS, RHS)) {
774     assert(LHS && RHS && isa<ConstantSDNode>(RHS));
775     return true;
776   }
777 
778   return false;
779 }
780 
781 StringRef AMDGPUDAGToDAGISel::getPassName() const {
782   return "AMDGPU DAG->DAG Pattern Instruction Selection";
783 }
784 
785 //===----------------------------------------------------------------------===//
786 // Complex Patterns
787 //===----------------------------------------------------------------------===//
788 
789 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
790                                             SDValue &Offset) {
791   return false;
792 }
793 
794 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
795                                             SDValue &Offset) {
796   ConstantSDNode *C;
797   SDLoc DL(Addr);
798 
799   if ((C = dyn_cast<ConstantSDNode>(Addr))) {
800     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
801     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
802   } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
803              (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
804     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
805     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
806   } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
807             (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
808     Base = Addr.getOperand(0);
809     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
810   } else {
811     Base = Addr;
812     Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
813   }
814 
815   return true;
816 }
817 
818 SDValue AMDGPUDAGToDAGISel::getMaterializedScalarImm32(int64_t Val,
819                                                        const SDLoc &DL) const {
820   SDNode *Mov = CurDAG->getMachineNode(
821     AMDGPU::S_MOV_B32, DL, MVT::i32,
822     CurDAG->getTargetConstant(Val, DL, MVT::i32));
823   return SDValue(Mov, 0);
824 }
825 
826 // FIXME: Should only handle addcarry/subcarry
827 void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
828   SDLoc DL(N);
829   SDValue LHS = N->getOperand(0);
830   SDValue RHS = N->getOperand(1);
831 
832   unsigned Opcode = N->getOpcode();
833   bool ConsumeCarry = (Opcode == ISD::ADDE || Opcode == ISD::SUBE);
834   bool ProduceCarry =
835       ConsumeCarry || Opcode == ISD::ADDC || Opcode == ISD::SUBC;
836   bool IsAdd = Opcode == ISD::ADD || Opcode == ISD::ADDC || Opcode == ISD::ADDE;
837 
838   SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
839   SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
840 
841   SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
842                                        DL, MVT::i32, LHS, Sub0);
843   SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
844                                        DL, MVT::i32, LHS, Sub1);
845 
846   SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
847                                        DL, MVT::i32, RHS, Sub0);
848   SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
849                                        DL, MVT::i32, RHS, Sub1);
850 
851   SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
852 
853   static const unsigned OpcMap[2][2][2] = {
854       {{AMDGPU::S_SUB_U32, AMDGPU::S_ADD_U32},
855        {AMDGPU::V_SUB_CO_U32_e32, AMDGPU::V_ADD_CO_U32_e32}},
856       {{AMDGPU::S_SUBB_U32, AMDGPU::S_ADDC_U32},
857        {AMDGPU::V_SUBB_U32_e32, AMDGPU::V_ADDC_U32_e32}}};
858 
859   unsigned Opc = OpcMap[0][N->isDivergent()][IsAdd];
860   unsigned CarryOpc = OpcMap[1][N->isDivergent()][IsAdd];
861 
862   SDNode *AddLo;
863   if (!ConsumeCarry) {
864     SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
865     AddLo = CurDAG->getMachineNode(Opc, DL, VTList, Args);
866   } else {
867     SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0), N->getOperand(2) };
868     AddLo = CurDAG->getMachineNode(CarryOpc, DL, VTList, Args);
869   }
870   SDValue AddHiArgs[] = {
871     SDValue(Hi0, 0),
872     SDValue(Hi1, 0),
873     SDValue(AddLo, 1)
874   };
875   SDNode *AddHi = CurDAG->getMachineNode(CarryOpc, DL, VTList, AddHiArgs);
876 
877   SDValue RegSequenceArgs[] = {
878     CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
879     SDValue(AddLo,0),
880     Sub0,
881     SDValue(AddHi,0),
882     Sub1,
883   };
884   SDNode *RegSequence = CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
885                                                MVT::i64, RegSequenceArgs);
886 
887   if (ProduceCarry) {
888     // Replace the carry-use
889     ReplaceUses(SDValue(N, 1), SDValue(AddHi, 1));
890   }
891 
892   // Replace the remaining uses.
893   ReplaceNode(N, RegSequence);
894 }
895 
896 void AMDGPUDAGToDAGISel::SelectAddcSubb(SDNode *N) {
897   SDLoc DL(N);
898   SDValue LHS = N->getOperand(0);
899   SDValue RHS = N->getOperand(1);
900   SDValue CI = N->getOperand(2);
901 
902   if (N->isDivergent()) {
903     unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::V_ADDC_U32_e64
904                                                    : AMDGPU::V_SUBB_U32_e64;
905     CurDAG->SelectNodeTo(
906         N, Opc, N->getVTList(),
907         {LHS, RHS, CI,
908          CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
909   } else {
910     unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::S_ADD_CO_PSEUDO
911                                                    : AMDGPU::S_SUB_CO_PSEUDO;
912     CurDAG->SelectNodeTo(N, Opc, N->getVTList(), {LHS, RHS, CI});
913   }
914 }
915 
916 void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) {
917   // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
918   // carry out despite the _i32 name. These were renamed in VI to _U32.
919   // FIXME: We should probably rename the opcodes here.
920   bool IsAdd = N->getOpcode() == ISD::UADDO;
921   bool IsVALU = N->isDivergent();
922 
923   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); UI != E;
924        ++UI)
925     if (UI.getUse().getResNo() == 1) {
926       if ((IsAdd && (UI->getOpcode() != ISD::ADDCARRY)) ||
927           (!IsAdd && (UI->getOpcode() != ISD::SUBCARRY))) {
928         IsVALU = true;
929         break;
930       }
931     }
932 
933   if (IsVALU) {
934     unsigned Opc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
935 
936     CurDAG->SelectNodeTo(
937         N, Opc, N->getVTList(),
938         {N->getOperand(0), N->getOperand(1),
939          CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
940   } else {
941     unsigned Opc = N->getOpcode() == ISD::UADDO ? AMDGPU::S_UADDO_PSEUDO
942                                                 : AMDGPU::S_USUBO_PSEUDO;
943 
944     CurDAG->SelectNodeTo(N, Opc, N->getVTList(),
945                          {N->getOperand(0), N->getOperand(1)});
946   }
947 }
948 
949 void AMDGPUDAGToDAGISel::SelectFMA_W_CHAIN(SDNode *N) {
950   SDLoc SL(N);
951   //  src0_modifiers, src0,  src1_modifiers, src1, src2_modifiers, src2, clamp, omod
952   SDValue Ops[10];
953 
954   SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[6], Ops[7]);
955   SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
956   SelectVOP3Mods(N->getOperand(3), Ops[5], Ops[4]);
957   Ops[8] = N->getOperand(0);
958   Ops[9] = N->getOperand(4);
959 
960   // If there are no source modifiers, prefer fmac over fma because it can use
961   // the smaller VOP2 encoding.
962   bool UseFMAC = Subtarget->hasDLInsts() &&
963                  cast<ConstantSDNode>(Ops[0])->isZero() &&
964                  cast<ConstantSDNode>(Ops[2])->isZero() &&
965                  cast<ConstantSDNode>(Ops[4])->isZero();
966   unsigned Opcode = UseFMAC ? AMDGPU::V_FMAC_F32_e64 : AMDGPU::V_FMA_F32_e64;
967   CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), Ops);
968 }
969 
970 void AMDGPUDAGToDAGISel::SelectFMUL_W_CHAIN(SDNode *N) {
971   SDLoc SL(N);
972   //    src0_modifiers, src0,  src1_modifiers, src1, clamp, omod
973   SDValue Ops[8];
974 
975   SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[4], Ops[5]);
976   SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
977   Ops[6] = N->getOperand(0);
978   Ops[7] = N->getOperand(3);
979 
980   CurDAG->SelectNodeTo(N, AMDGPU::V_MUL_F32_e64, N->getVTList(), Ops);
981 }
982 
983 // We need to handle this here because tablegen doesn't support matching
984 // instructions with multiple outputs.
985 void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
986   SDLoc SL(N);
987   EVT VT = N->getValueType(0);
988 
989   assert(VT == MVT::f32 || VT == MVT::f64);
990 
991   unsigned Opc
992     = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64_e64 : AMDGPU::V_DIV_SCALE_F32_e64;
993 
994   // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp,
995   // omod
996   SDValue Ops[8];
997   SelectVOP3BMods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
998   SelectVOP3BMods(N->getOperand(1), Ops[3], Ops[2]);
999   SelectVOP3BMods(N->getOperand(2), Ops[5], Ops[4]);
1000   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1001 }
1002 
1003 // We need to handle this here because tablegen doesn't support matching
1004 // instructions with multiple outputs.
1005 void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) {
1006   SDLoc SL(N);
1007   bool Signed = N->getOpcode() == AMDGPUISD::MAD_I64_I32;
1008   unsigned Opc;
1009   if (Subtarget->getGeneration() == AMDGPUSubtarget::GFX11)
1010     Opc = Signed ? AMDGPU::V_MAD_I64_I32_gfx11_e64
1011                  : AMDGPU::V_MAD_U64_U32_gfx11_e64;
1012   else
1013     Opc = Signed ? AMDGPU::V_MAD_I64_I32_e64 : AMDGPU::V_MAD_U64_U32_e64;
1014 
1015   SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1);
1016   SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
1017                     Clamp };
1018   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1019 }
1020 
1021 // We need to handle this here because tablegen doesn't support matching
1022 // instructions with multiple outputs.
1023 void AMDGPUDAGToDAGISel::SelectMUL_LOHI(SDNode *N) {
1024   SDLoc SL(N);
1025   bool Signed = N->getOpcode() == ISD::SMUL_LOHI;
1026   unsigned Opc;
1027   if (Subtarget->getGeneration() == AMDGPUSubtarget::GFX11)
1028     Opc = Signed ? AMDGPU::V_MAD_I64_I32_gfx11_e64
1029                  : AMDGPU::V_MAD_U64_U32_gfx11_e64;
1030   else
1031     Opc = Signed ? AMDGPU::V_MAD_I64_I32_e64 : AMDGPU::V_MAD_U64_U32_e64;
1032 
1033   SDValue Zero = CurDAG->getTargetConstant(0, SL, MVT::i64);
1034   SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1);
1035   SDValue Ops[] = {N->getOperand(0), N->getOperand(1), Zero, Clamp};
1036   SDNode *Mad = CurDAG->getMachineNode(Opc, SL, N->getVTList(), Ops);
1037   if (!SDValue(N, 0).use_empty()) {
1038     SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32);
1039     SDNode *Lo = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SL,
1040                                         MVT::i32, SDValue(Mad, 0), Sub0);
1041     ReplaceUses(SDValue(N, 0), SDValue(Lo, 0));
1042   }
1043   if (!SDValue(N, 1).use_empty()) {
1044     SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32);
1045     SDNode *Hi = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SL,
1046                                         MVT::i32, SDValue(Mad, 0), Sub1);
1047     ReplaceUses(SDValue(N, 1), SDValue(Hi, 0));
1048   }
1049   CurDAG->RemoveDeadNode(N);
1050 }
1051 
1052 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(SDValue Base, unsigned Offset) const {
1053   if (!isUInt<16>(Offset))
1054     return false;
1055 
1056   if (!Base || Subtarget->hasUsableDSOffset() ||
1057       Subtarget->unsafeDSOffsetFoldingEnabled())
1058     return true;
1059 
1060   // On Southern Islands instruction with a negative base value and an offset
1061   // don't seem to work.
1062   return CurDAG->SignBitIsZero(Base);
1063 }
1064 
1065 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
1066                                               SDValue &Offset) const {
1067   SDLoc DL(Addr);
1068   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1069     SDValue N0 = Addr.getOperand(0);
1070     SDValue N1 = Addr.getOperand(1);
1071     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1072     if (isDSOffsetLegal(N0, C1->getSExtValue())) {
1073       // (add n0, c0)
1074       Base = N0;
1075       Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1076       return true;
1077     }
1078   } else if (Addr.getOpcode() == ISD::SUB) {
1079     // sub C, x -> add (sub 0, x), C
1080     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1081       int64_t ByteOffset = C->getSExtValue();
1082       if (isDSOffsetLegal(SDValue(), ByteOffset)) {
1083         SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1084 
1085         // XXX - This is kind of hacky. Create a dummy sub node so we can check
1086         // the known bits in isDSOffsetLegal. We need to emit the selected node
1087         // here, so this is thrown away.
1088         SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
1089                                       Zero, Addr.getOperand(1));
1090 
1091         if (isDSOffsetLegal(Sub, ByteOffset)) {
1092           SmallVector<SDValue, 3> Opnds;
1093           Opnds.push_back(Zero);
1094           Opnds.push_back(Addr.getOperand(1));
1095 
1096           // FIXME: Select to VOP3 version for with-carry.
1097           unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32;
1098           if (Subtarget->hasAddNoCarry()) {
1099             SubOp = AMDGPU::V_SUB_U32_e64;
1100             Opnds.push_back(
1101                 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
1102           }
1103 
1104           MachineSDNode *MachineSub =
1105               CurDAG->getMachineNode(SubOp, DL, MVT::i32, Opnds);
1106 
1107           Base = SDValue(MachineSub, 0);
1108           Offset = CurDAG->getTargetConstant(ByteOffset, DL, MVT::i16);
1109           return true;
1110         }
1111       }
1112     }
1113   } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1114     // If we have a constant address, prefer to put the constant into the
1115     // offset. This can save moves to load the constant address since multiple
1116     // operations can share the zero base address register, and enables merging
1117     // into read2 / write2 instructions.
1118 
1119     SDLoc DL(Addr);
1120 
1121     if (isDSOffsetLegal(SDValue(), CAddr->getZExtValue())) {
1122       SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1123       MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
1124                                  DL, MVT::i32, Zero);
1125       Base = SDValue(MovZero, 0);
1126       Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1127       return true;
1128     }
1129   }
1130 
1131   // default case
1132   Base = Addr;
1133   Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i16);
1134   return true;
1135 }
1136 
1137 bool AMDGPUDAGToDAGISel::isDSOffset2Legal(SDValue Base, unsigned Offset0,
1138                                           unsigned Offset1,
1139                                           unsigned Size) const {
1140   if (Offset0 % Size != 0 || Offset1 % Size != 0)
1141     return false;
1142   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
1143     return false;
1144 
1145   if (!Base || Subtarget->hasUsableDSOffset() ||
1146       Subtarget->unsafeDSOffsetFoldingEnabled())
1147     return true;
1148 
1149   // On Southern Islands instruction with a negative base value and an offset
1150   // don't seem to work.
1151   return CurDAG->SignBitIsZero(Base);
1152 }
1153 
1154 // TODO: If offset is too big, put low 16-bit into offset.
1155 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
1156                                                    SDValue &Offset0,
1157                                                    SDValue &Offset1) const {
1158   return SelectDSReadWrite2(Addr, Base, Offset0, Offset1, 4);
1159 }
1160 
1161 bool AMDGPUDAGToDAGISel::SelectDS128Bit8ByteAligned(SDValue Addr, SDValue &Base,
1162                                                     SDValue &Offset0,
1163                                                     SDValue &Offset1) const {
1164   return SelectDSReadWrite2(Addr, Base, Offset0, Offset1, 8);
1165 }
1166 
1167 bool AMDGPUDAGToDAGISel::SelectDSReadWrite2(SDValue Addr, SDValue &Base,
1168                                             SDValue &Offset0, SDValue &Offset1,
1169                                             unsigned Size) const {
1170   SDLoc DL(Addr);
1171 
1172   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1173     SDValue N0 = Addr.getOperand(0);
1174     SDValue N1 = Addr.getOperand(1);
1175     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1176     unsigned OffsetValue0 = C1->getZExtValue();
1177     unsigned OffsetValue1 = OffsetValue0 + Size;
1178 
1179     // (add n0, c0)
1180     if (isDSOffset2Legal(N0, OffsetValue0, OffsetValue1, Size)) {
1181       Base = N0;
1182       Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1183       Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1184       return true;
1185     }
1186   } else if (Addr.getOpcode() == ISD::SUB) {
1187     // sub C, x -> add (sub 0, x), C
1188     if (const ConstantSDNode *C =
1189             dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1190       unsigned OffsetValue0 = C->getZExtValue();
1191       unsigned OffsetValue1 = OffsetValue0 + Size;
1192 
1193       if (isDSOffset2Legal(SDValue(), OffsetValue0, OffsetValue1, Size)) {
1194         SDLoc DL(Addr);
1195         SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1196 
1197         // XXX - This is kind of hacky. Create a dummy sub node so we can check
1198         // the known bits in isDSOffsetLegal. We need to emit the selected node
1199         // here, so this is thrown away.
1200         SDValue Sub =
1201             CurDAG->getNode(ISD::SUB, DL, MVT::i32, Zero, Addr.getOperand(1));
1202 
1203         if (isDSOffset2Legal(Sub, OffsetValue0, OffsetValue1, Size)) {
1204           SmallVector<SDValue, 3> Opnds;
1205           Opnds.push_back(Zero);
1206           Opnds.push_back(Addr.getOperand(1));
1207           unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32;
1208           if (Subtarget->hasAddNoCarry()) {
1209             SubOp = AMDGPU::V_SUB_U32_e64;
1210             Opnds.push_back(
1211                 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
1212           }
1213 
1214           MachineSDNode *MachineSub = CurDAG->getMachineNode(
1215               SubOp, DL, MVT::getIntegerVT(Size * 8), Opnds);
1216 
1217           Base = SDValue(MachineSub, 0);
1218           Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1219           Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1220           return true;
1221         }
1222       }
1223     }
1224   } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1225     unsigned OffsetValue0 = CAddr->getZExtValue();
1226     unsigned OffsetValue1 = OffsetValue0 + Size;
1227 
1228     if (isDSOffset2Legal(SDValue(), OffsetValue0, OffsetValue1, Size)) {
1229       SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1230       MachineSDNode *MovZero =
1231           CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32, Zero);
1232       Base = SDValue(MovZero, 0);
1233       Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1234       Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1235       return true;
1236     }
1237   }
1238 
1239   // default case
1240 
1241   Base = Addr;
1242   Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
1243   Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);
1244   return true;
1245 }
1246 
1247 bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr, SDValue &VAddr,
1248                                      SDValue &SOffset, SDValue &Offset,
1249                                      SDValue &Offen, SDValue &Idxen,
1250                                      SDValue &Addr64) const {
1251   // Subtarget prefers to use flat instruction
1252   // FIXME: This should be a pattern predicate and not reach here
1253   if (Subtarget->useFlatForGlobal())
1254     return false;
1255 
1256   SDLoc DL(Addr);
1257 
1258   Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1259   Offen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1260   Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1);
1261   SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1262 
1263   ConstantSDNode *C1 = nullptr;
1264   SDValue N0 = Addr;
1265   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1266     C1 = cast<ConstantSDNode>(Addr.getOperand(1));
1267     if (isUInt<32>(C1->getZExtValue()))
1268       N0 = Addr.getOperand(0);
1269     else
1270       C1 = nullptr;
1271   }
1272 
1273   if (N0.getOpcode() == ISD::ADD) {
1274     // (add N2, N3) -> addr64, or
1275     // (add (add N2, N3), C1) -> addr64
1276     SDValue N2 = N0.getOperand(0);
1277     SDValue N3 = N0.getOperand(1);
1278     Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1279 
1280     if (N2->isDivergent()) {
1281       if (N3->isDivergent()) {
1282         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
1283         // addr64, and construct the resource from a 0 address.
1284         Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1285         VAddr = N0;
1286       } else {
1287         // N2 is divergent, N3 is not.
1288         Ptr = N3;
1289         VAddr = N2;
1290       }
1291     } else {
1292       // N2 is not divergent.
1293       Ptr = N2;
1294       VAddr = N3;
1295     }
1296     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1297   } else if (N0->isDivergent()) {
1298     // N0 is divergent. Use it as the addr64, and construct the resource from a
1299     // 0 address.
1300     Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1301     VAddr = N0;
1302     Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1303   } else {
1304     // N0 -> offset, or
1305     // (N0 + C1) -> offset
1306     VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
1307     Ptr = N0;
1308   }
1309 
1310   if (!C1) {
1311     // No offset.
1312     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1313     return true;
1314   }
1315 
1316   if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue())) {
1317     // Legal offset for instruction.
1318     Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1319     return true;
1320   }
1321 
1322   // Illegal offset, store it in soffset.
1323   Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1324   SOffset =
1325       SDValue(CurDAG->getMachineNode(
1326                   AMDGPU::S_MOV_B32, DL, MVT::i32,
1327                   CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)),
1328               0);
1329   return true;
1330 }
1331 
1332 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
1333                                            SDValue &VAddr, SDValue &SOffset,
1334                                            SDValue &Offset) const {
1335   SDValue Ptr, Offen, Idxen, Addr64;
1336 
1337   // addr64 bit was removed for volcanic islands.
1338   // FIXME: This should be a pattern predicate and not reach here
1339   if (!Subtarget->hasAddr64())
1340     return false;
1341 
1342   if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64))
1343     return false;
1344 
1345   ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
1346   if (C->getSExtValue()) {
1347     SDLoc DL(Addr);
1348 
1349     const SITargetLowering& Lowering =
1350       *static_cast<const SITargetLowering*>(getTargetLowering());
1351 
1352     SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0);
1353     return true;
1354   }
1355 
1356   return false;
1357 }
1358 
1359 std::pair<SDValue, SDValue> AMDGPUDAGToDAGISel::foldFrameIndex(SDValue N) const {
1360   SDLoc DL(N);
1361 
1362   auto *FI = dyn_cast<FrameIndexSDNode>(N);
1363   SDValue TFI =
1364       FI ? CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0)) : N;
1365 
1366   // We rebase the base address into an absolute stack address and hence
1367   // use constant 0 for soffset. This value must be retained until
1368   // frame elimination and eliminateFrameIndex will choose the appropriate
1369   // frame register if need be.
1370   return std::make_pair(TFI, CurDAG->getTargetConstant(0, DL, MVT::i32));
1371 }
1372 
1373 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(SDNode *Parent,
1374                                                  SDValue Addr, SDValue &Rsrc,
1375                                                  SDValue &VAddr, SDValue &SOffset,
1376                                                  SDValue &ImmOffset) const {
1377 
1378   SDLoc DL(Addr);
1379   MachineFunction &MF = CurDAG->getMachineFunction();
1380   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1381 
1382   Rsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1383 
1384   if (ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1385     int64_t Imm = CAddr->getSExtValue();
1386     const int64_t NullPtr =
1387         AMDGPUTargetMachine::getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS);
1388     // Don't fold null pointer.
1389     if (Imm != NullPtr) {
1390       SDValue HighBits = CurDAG->getTargetConstant(Imm & ~4095, DL, MVT::i32);
1391       MachineSDNode *MovHighBits = CurDAG->getMachineNode(
1392         AMDGPU::V_MOV_B32_e32, DL, MVT::i32, HighBits);
1393       VAddr = SDValue(MovHighBits, 0);
1394 
1395       SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1396       ImmOffset = CurDAG->getTargetConstant(Imm & 4095, DL, MVT::i16);
1397       return true;
1398     }
1399   }
1400 
1401   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1402     // (add n0, c1)
1403 
1404     SDValue N0 = Addr.getOperand(0);
1405     SDValue N1 = Addr.getOperand(1);
1406 
1407     // Offsets in vaddr must be positive if range checking is enabled.
1408     //
1409     // The total computation of vaddr + soffset + offset must not overflow.  If
1410     // vaddr is negative, even if offset is 0 the sgpr offset add will end up
1411     // overflowing.
1412     //
1413     // Prior to gfx9, MUBUF instructions with the vaddr offset enabled would
1414     // always perform a range check. If a negative vaddr base index was used,
1415     // this would fail the range check. The overall address computation would
1416     // compute a valid address, but this doesn't happen due to the range
1417     // check. For out-of-bounds MUBUF loads, a 0 is returned.
1418     //
1419     // Therefore it should be safe to fold any VGPR offset on gfx9 into the
1420     // MUBUF vaddr, but not on older subtargets which can only do this if the
1421     // sign bit is known 0.
1422     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1423     if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue()) &&
1424         (!Subtarget->privateMemoryResourceIsRangeChecked() ||
1425          CurDAG->SignBitIsZero(N0))) {
1426       std::tie(VAddr, SOffset) = foldFrameIndex(N0);
1427       ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1428       return true;
1429     }
1430   }
1431 
1432   // (node)
1433   std::tie(VAddr, SOffset) = foldFrameIndex(Addr);
1434   ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1435   return true;
1436 }
1437 
1438 static bool IsCopyFromSGPR(const SIRegisterInfo &TRI, SDValue Val) {
1439   if (Val.getOpcode() != ISD::CopyFromReg)
1440     return false;
1441   auto RC =
1442       TRI.getPhysRegClass(cast<RegisterSDNode>(Val.getOperand(1))->getReg());
1443   return RC && TRI.isSGPRClass(RC);
1444 }
1445 
1446 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffset(SDNode *Parent,
1447                                                   SDValue Addr,
1448                                                   SDValue &SRsrc,
1449                                                   SDValue &SOffset,
1450                                                   SDValue &Offset) const {
1451   const SIRegisterInfo *TRI =
1452       static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
1453   MachineFunction &MF = CurDAG->getMachineFunction();
1454   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1455   SDLoc DL(Addr);
1456 
1457   // CopyFromReg <sgpr>
1458   if (IsCopyFromSGPR(*TRI, Addr)) {
1459     SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1460     SOffset = Addr;
1461     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1462     return true;
1463   }
1464 
1465   ConstantSDNode *CAddr;
1466   if (Addr.getOpcode() == ISD::ADD) {
1467     // Add (CopyFromReg <sgpr>) <constant>
1468     CAddr = dyn_cast<ConstantSDNode>(Addr.getOperand(1));
1469     if (!CAddr || !SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue()))
1470       return false;
1471     if (!IsCopyFromSGPR(*TRI, Addr.getOperand(0)))
1472       return false;
1473 
1474     SOffset = Addr.getOperand(0);
1475   } else if ((CAddr = dyn_cast<ConstantSDNode>(Addr)) &&
1476              SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue())) {
1477     // <constant>
1478     SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1479   } else {
1480     return false;
1481   }
1482 
1483   SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1484 
1485   Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1486   return true;
1487 }
1488 
1489 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1490                                            SDValue &SOffset, SDValue &Offset
1491                                            ) const {
1492   SDValue Ptr, VAddr, Offen, Idxen, Addr64;
1493   const SIInstrInfo *TII =
1494     static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
1495 
1496   if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64))
1497     return false;
1498 
1499   if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
1500       !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
1501       !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
1502     uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
1503                     APInt::getAllOnes(32).getZExtValue(); // Size
1504     SDLoc DL(Addr);
1505 
1506     const SITargetLowering& Lowering =
1507       *static_cast<const SITargetLowering*>(getTargetLowering());
1508 
1509     SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0);
1510     return true;
1511   }
1512   return false;
1513 }
1514 
1515 // Find a load or store from corresponding pattern root.
1516 // Roots may be build_vector, bitconvert or their combinations.
1517 static MemSDNode* findMemSDNode(SDNode *N) {
1518   N = AMDGPUTargetLowering::stripBitcast(SDValue(N,0)).getNode();
1519   if (MemSDNode *MN = dyn_cast<MemSDNode>(N))
1520     return MN;
1521   assert(isa<BuildVectorSDNode>(N));
1522   for (SDValue V : N->op_values())
1523     if (MemSDNode *MN =
1524           dyn_cast<MemSDNode>(AMDGPUTargetLowering::stripBitcast(V)))
1525       return MN;
1526   llvm_unreachable("cannot find MemSDNode in the pattern!");
1527 }
1528 
1529 bool AMDGPUDAGToDAGISel::SelectFlatOffsetImpl(SDNode *N, SDValue Addr,
1530                                               SDValue &VAddr, SDValue &Offset,
1531                                               uint64_t FlatVariant) const {
1532   int64_t OffsetVal = 0;
1533 
1534   unsigned AS = findMemSDNode(N)->getAddressSpace();
1535 
1536   bool CanHaveFlatSegmentOffsetBug =
1537       Subtarget->hasFlatSegmentOffsetBug() &&
1538       FlatVariant == SIInstrFlags::FLAT &&
1539       (AS == AMDGPUAS::FLAT_ADDRESS || AS == AMDGPUAS::GLOBAL_ADDRESS);
1540 
1541   if (Subtarget->hasFlatInstOffsets() && !CanHaveFlatSegmentOffsetBug) {
1542     SDValue N0, N1;
1543     if (isBaseWithConstantOffset64(Addr, N0, N1)) {
1544       int64_t COffsetVal = cast<ConstantSDNode>(N1)->getSExtValue();
1545 
1546       const SIInstrInfo *TII = Subtarget->getInstrInfo();
1547       if (TII->isLegalFLATOffset(COffsetVal, AS, FlatVariant)) {
1548         Addr = N0;
1549         OffsetVal = COffsetVal;
1550       } else {
1551         // If the offset doesn't fit, put the low bits into the offset field and
1552         // add the rest.
1553         //
1554         // For a FLAT instruction the hardware decides whether to access
1555         // global/scratch/shared memory based on the high bits of vaddr,
1556         // ignoring the offset field, so we have to ensure that when we add
1557         // remainder to vaddr it still points into the same underlying object.
1558         // The easiest way to do that is to make sure that we split the offset
1559         // into two pieces that are both >= 0 or both <= 0.
1560 
1561         SDLoc DL(N);
1562         uint64_t RemainderOffset;
1563 
1564         std::tie(OffsetVal, RemainderOffset) =
1565             TII->splitFlatOffset(COffsetVal, AS, FlatVariant);
1566 
1567         SDValue AddOffsetLo =
1568             getMaterializedScalarImm32(Lo_32(RemainderOffset), DL);
1569         SDValue Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
1570 
1571         if (Addr.getValueType().getSizeInBits() == 32) {
1572           SmallVector<SDValue, 3> Opnds;
1573           Opnds.push_back(N0);
1574           Opnds.push_back(AddOffsetLo);
1575           unsigned AddOp = AMDGPU::V_ADD_CO_U32_e32;
1576           if (Subtarget->hasAddNoCarry()) {
1577             AddOp = AMDGPU::V_ADD_U32_e64;
1578             Opnds.push_back(Clamp);
1579           }
1580           Addr = SDValue(CurDAG->getMachineNode(AddOp, DL, MVT::i32, Opnds), 0);
1581         } else {
1582           // TODO: Should this try to use a scalar add pseudo if the base address
1583           // is uniform and saddr is usable?
1584           SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
1585           SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
1586 
1587           SDNode *N0Lo = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1588                                                 DL, MVT::i32, N0, Sub0);
1589           SDNode *N0Hi = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1590                                                 DL, MVT::i32, N0, Sub1);
1591 
1592           SDValue AddOffsetHi =
1593               getMaterializedScalarImm32(Hi_32(RemainderOffset), DL);
1594 
1595           SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i1);
1596 
1597           SDNode *Add =
1598               CurDAG->getMachineNode(AMDGPU::V_ADD_CO_U32_e64, DL, VTs,
1599                                      {AddOffsetLo, SDValue(N0Lo, 0), Clamp});
1600 
1601           SDNode *Addc = CurDAG->getMachineNode(
1602               AMDGPU::V_ADDC_U32_e64, DL, VTs,
1603               {AddOffsetHi, SDValue(N0Hi, 0), SDValue(Add, 1), Clamp});
1604 
1605           SDValue RegSequenceArgs[] = {
1606               CurDAG->getTargetConstant(AMDGPU::VReg_64RegClassID, DL, MVT::i32),
1607               SDValue(Add, 0), Sub0, SDValue(Addc, 0), Sub1};
1608 
1609           Addr = SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
1610                                                 MVT::i64, RegSequenceArgs),
1611                          0);
1612         }
1613       }
1614     }
1615   }
1616 
1617   VAddr = Addr;
1618   Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i16);
1619   return true;
1620 }
1621 
1622 bool AMDGPUDAGToDAGISel::SelectFlatOffset(SDNode *N, SDValue Addr,
1623                                           SDValue &VAddr,
1624                                           SDValue &Offset) const {
1625   return SelectFlatOffsetImpl(N, Addr, VAddr, Offset, SIInstrFlags::FLAT);
1626 }
1627 
1628 bool AMDGPUDAGToDAGISel::SelectGlobalOffset(SDNode *N, SDValue Addr,
1629                                             SDValue &VAddr,
1630                                             SDValue &Offset) const {
1631   return SelectFlatOffsetImpl(N, Addr, VAddr, Offset, SIInstrFlags::FlatGlobal);
1632 }
1633 
1634 bool AMDGPUDAGToDAGISel::SelectScratchOffset(SDNode *N, SDValue Addr,
1635                                              SDValue &VAddr,
1636                                              SDValue &Offset) const {
1637   return SelectFlatOffsetImpl(N, Addr, VAddr, Offset,
1638                               SIInstrFlags::FlatScratch);
1639 }
1640 
1641 // If this matches zero_extend i32:x, return x
1642 static SDValue matchZExtFromI32(SDValue Op) {
1643   if (Op.getOpcode() != ISD::ZERO_EXTEND)
1644     return SDValue();
1645 
1646   SDValue ExtSrc = Op.getOperand(0);
1647   return (ExtSrc.getValueType() == MVT::i32) ? ExtSrc : SDValue();
1648 }
1649 
1650 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
1651 bool AMDGPUDAGToDAGISel::SelectGlobalSAddr(SDNode *N,
1652                                            SDValue Addr,
1653                                            SDValue &SAddr,
1654                                            SDValue &VOffset,
1655                                            SDValue &Offset) const {
1656   int64_t ImmOffset = 0;
1657 
1658   // Match the immediate offset first, which canonically is moved as low as
1659   // possible.
1660 
1661   SDValue LHS, RHS;
1662   if (isBaseWithConstantOffset64(Addr, LHS, RHS)) {
1663     int64_t COffsetVal = cast<ConstantSDNode>(RHS)->getSExtValue();
1664     const SIInstrInfo *TII = Subtarget->getInstrInfo();
1665 
1666     if (TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::GLOBAL_ADDRESS,
1667                                SIInstrFlags::FlatGlobal)) {
1668       Addr = LHS;
1669       ImmOffset = COffsetVal;
1670     } else if (!LHS->isDivergent()) {
1671       if (COffsetVal > 0) {
1672         SDLoc SL(N);
1673         // saddr + large_offset -> saddr +
1674         //                         (voffset = large_offset & ~MaxOffset) +
1675         //                         (large_offset & MaxOffset);
1676         int64_t SplitImmOffset, RemainderOffset;
1677         std::tie(SplitImmOffset, RemainderOffset) = TII->splitFlatOffset(
1678             COffsetVal, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
1679 
1680         if (isUInt<32>(RemainderOffset)) {
1681           SDNode *VMov = CurDAG->getMachineNode(
1682               AMDGPU::V_MOV_B32_e32, SL, MVT::i32,
1683               CurDAG->getTargetConstant(RemainderOffset, SDLoc(), MVT::i32));
1684           VOffset = SDValue(VMov, 0);
1685           SAddr = LHS;
1686           Offset = CurDAG->getTargetConstant(SplitImmOffset, SDLoc(), MVT::i16);
1687           return true;
1688         }
1689       }
1690 
1691       // We are adding a 64 bit SGPR and a constant. If constant bus limit
1692       // is 1 we would need to perform 1 or 2 extra moves for each half of
1693       // the constant and it is better to do a scalar add and then issue a
1694       // single VALU instruction to materialize zero. Otherwise it is less
1695       // instructions to perform VALU adds with immediates or inline literals.
1696       unsigned NumLiterals =
1697           !TII->isInlineConstant(APInt(32, COffsetVal & 0xffffffff)) +
1698           !TII->isInlineConstant(APInt(32, COffsetVal >> 32));
1699       if (Subtarget->getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
1700         return false;
1701     }
1702   }
1703 
1704   // Match the variable offset.
1705   if (Addr.getOpcode() == ISD::ADD) {
1706     LHS = Addr.getOperand(0);
1707     RHS = Addr.getOperand(1);
1708 
1709     if (!LHS->isDivergent()) {
1710       // add (i64 sgpr), (zero_extend (i32 vgpr))
1711       if (SDValue ZextRHS = matchZExtFromI32(RHS)) {
1712         SAddr = LHS;
1713         VOffset = ZextRHS;
1714       }
1715     }
1716 
1717     if (!SAddr && !RHS->isDivergent()) {
1718       // add (zero_extend (i32 vgpr)), (i64 sgpr)
1719       if (SDValue ZextLHS = matchZExtFromI32(LHS)) {
1720         SAddr = RHS;
1721         VOffset = ZextLHS;
1722       }
1723     }
1724 
1725     if (SAddr) {
1726       Offset = CurDAG->getTargetConstant(ImmOffset, SDLoc(), MVT::i16);
1727       return true;
1728     }
1729   }
1730 
1731   if (Addr->isDivergent() || Addr.getOpcode() == ISD::UNDEF ||
1732       isa<ConstantSDNode>(Addr))
1733     return false;
1734 
1735   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
1736   // moves required to copy a 64-bit SGPR to VGPR.
1737   SAddr = Addr;
1738   SDNode *VMov =
1739       CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, SDLoc(Addr), MVT::i32,
1740                              CurDAG->getTargetConstant(0, SDLoc(), MVT::i32));
1741   VOffset = SDValue(VMov, 0);
1742   Offset = CurDAG->getTargetConstant(ImmOffset, SDLoc(), MVT::i16);
1743   return true;
1744 }
1745 
1746 static SDValue SelectSAddrFI(SelectionDAG *CurDAG, SDValue SAddr) {
1747   if (auto FI = dyn_cast<FrameIndexSDNode>(SAddr)) {
1748     SAddr = CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0));
1749   } else if (SAddr.getOpcode() == ISD::ADD &&
1750              isa<FrameIndexSDNode>(SAddr.getOperand(0))) {
1751     // Materialize this into a scalar move for scalar address to avoid
1752     // readfirstlane.
1753     auto FI = cast<FrameIndexSDNode>(SAddr.getOperand(0));
1754     SDValue TFI = CurDAG->getTargetFrameIndex(FI->getIndex(),
1755                                               FI->getValueType(0));
1756     SAddr = SDValue(CurDAG->getMachineNode(AMDGPU::S_ADD_I32, SDLoc(SAddr),
1757                                            MVT::i32, TFI, SAddr.getOperand(1)),
1758                     0);
1759   }
1760 
1761   return SAddr;
1762 }
1763 
1764 // Match (32-bit SGPR base) + sext(imm offset)
1765 bool AMDGPUDAGToDAGISel::SelectScratchSAddr(SDNode *Parent, SDValue Addr,
1766                                             SDValue &SAddr,
1767                                             SDValue &Offset) const {
1768   if (Addr->isDivergent())
1769     return false;
1770 
1771   SDLoc DL(Addr);
1772 
1773   int64_t COffsetVal = 0;
1774 
1775   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1776     COffsetVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1777     SAddr = Addr.getOperand(0);
1778   } else {
1779     SAddr = Addr;
1780   }
1781 
1782   SAddr = SelectSAddrFI(CurDAG, SAddr);
1783 
1784   const SIInstrInfo *TII = Subtarget->getInstrInfo();
1785 
1786   if (!TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::PRIVATE_ADDRESS,
1787                               SIInstrFlags::FlatScratch)) {
1788     int64_t SplitImmOffset, RemainderOffset;
1789     std::tie(SplitImmOffset, RemainderOffset) = TII->splitFlatOffset(
1790         COffsetVal, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch);
1791 
1792     COffsetVal = SplitImmOffset;
1793 
1794     SDValue AddOffset =
1795         SAddr.getOpcode() == ISD::TargetFrameIndex
1796             ? getMaterializedScalarImm32(Lo_32(RemainderOffset), DL)
1797             : CurDAG->getTargetConstant(RemainderOffset, DL, MVT::i32);
1798     SAddr = SDValue(CurDAG->getMachineNode(AMDGPU::S_ADD_I32, DL, MVT::i32,
1799                                            SAddr, AddOffset),
1800                     0);
1801   }
1802 
1803   Offset = CurDAG->getTargetConstant(COffsetVal, DL, MVT::i16);
1804 
1805   return true;
1806 }
1807 
1808 bool AMDGPUDAGToDAGISel::SelectScratchSVAddr(SDNode *N, SDValue Addr,
1809                                              SDValue &VAddr, SDValue &SAddr,
1810                                              SDValue &Offset) const  {
1811   int64_t ImmOffset = 0;
1812 
1813   SDValue LHS, RHS;
1814   if (isBaseWithConstantOffset64(Addr, LHS, RHS)) {
1815     int64_t COffsetVal = cast<ConstantSDNode>(RHS)->getSExtValue();
1816     const SIInstrInfo *TII = Subtarget->getInstrInfo();
1817 
1818     if (TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::PRIVATE_ADDRESS, true)) {
1819       Addr = LHS;
1820       ImmOffset = COffsetVal;
1821     } else if (!LHS->isDivergent() && COffsetVal > 0) {
1822       SDLoc SL(N);
1823       // saddr + large_offset -> saddr + (vaddr = large_offset & ~MaxOffset) +
1824       //                         (large_offset & MaxOffset);
1825       int64_t SplitImmOffset, RemainderOffset;
1826       std::tie(SplitImmOffset, RemainderOffset)
1827         = TII->splitFlatOffset(COffsetVal, AMDGPUAS::PRIVATE_ADDRESS, true);
1828 
1829       if (isUInt<32>(RemainderOffset)) {
1830         SDNode *VMov = CurDAG->getMachineNode(
1831           AMDGPU::V_MOV_B32_e32, SL, MVT::i32,
1832           CurDAG->getTargetConstant(RemainderOffset, SDLoc(), MVT::i32));
1833         VAddr = SDValue(VMov, 0);
1834         SAddr = LHS;
1835         Offset = CurDAG->getTargetConstant(SplitImmOffset, SDLoc(), MVT::i16);
1836         return true;
1837       }
1838     }
1839   }
1840 
1841   if (Addr.getOpcode() != ISD::ADD)
1842     return false;
1843 
1844   LHS = Addr.getOperand(0);
1845   RHS = Addr.getOperand(1);
1846 
1847   if (!LHS->isDivergent() && RHS->isDivergent()) {
1848     SAddr = LHS;
1849     VAddr = RHS;
1850   } else if (!RHS->isDivergent() && LHS->isDivergent()) {
1851     SAddr = RHS;
1852     VAddr = LHS;
1853   } else {
1854     return false;
1855   }
1856 
1857   SAddr = SelectSAddrFI(CurDAG, SAddr);
1858   Offset = CurDAG->getTargetConstant(ImmOffset, SDLoc(), MVT::i16);
1859   return true;
1860 }
1861 
1862 bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode,
1863                                           SDValue &Offset, bool &Imm) const {
1864   ConstantSDNode *C = dyn_cast<ConstantSDNode>(ByteOffsetNode);
1865   if (!C) {
1866     if (ByteOffsetNode.getValueType().isScalarInteger() &&
1867         ByteOffsetNode.getValueType().getSizeInBits() == 32) {
1868       Offset = ByteOffsetNode;
1869       Imm = false;
1870       return true;
1871     }
1872     if (ByteOffsetNode.getOpcode() == ISD::ZERO_EXTEND) {
1873       if (ByteOffsetNode.getOperand(0).getValueType().getSizeInBits() == 32) {
1874         Offset = ByteOffsetNode.getOperand(0);
1875         Imm = false;
1876         return true;
1877       }
1878     }
1879     return false;
1880   }
1881 
1882   SDLoc SL(ByteOffsetNode);
1883   // GFX9 and GFX10 have signed byte immediate offsets.
1884   int64_t ByteOffset = C->getSExtValue();
1885   Optional<int64_t> EncodedOffset =
1886       AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset, false);
1887   if (EncodedOffset) {
1888     Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);
1889     Imm = true;
1890     return true;
1891   }
1892 
1893   // SGPR and literal offsets are unsigned.
1894   if (ByteOffset < 0)
1895     return false;
1896 
1897   EncodedOffset = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget, ByteOffset);
1898   if (EncodedOffset) {
1899     Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);
1900     return true;
1901   }
1902 
1903   if (!isUInt<32>(ByteOffset) && !isInt<32>(ByteOffset))
1904     return false;
1905 
1906   SDValue C32Bit = CurDAG->getTargetConstant(ByteOffset, SL, MVT::i32);
1907   Offset = SDValue(
1908       CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, C32Bit), 0);
1909 
1910   return true;
1911 }
1912 
1913 SDValue AMDGPUDAGToDAGISel::Expand32BitAddress(SDValue Addr) const {
1914   if (Addr.getValueType() != MVT::i32)
1915     return Addr;
1916 
1917   // Zero-extend a 32-bit address.
1918   SDLoc SL(Addr);
1919 
1920   const MachineFunction &MF = CurDAG->getMachineFunction();
1921   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1922   unsigned AddrHiVal = Info->get32BitAddressHighBits();
1923   SDValue AddrHi = CurDAG->getTargetConstant(AddrHiVal, SL, MVT::i32);
1924 
1925   const SDValue Ops[] = {
1926     CurDAG->getTargetConstant(AMDGPU::SReg_64_XEXECRegClassID, SL, MVT::i32),
1927     Addr,
1928     CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32),
1929     SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, AddrHi),
1930             0),
1931     CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32),
1932   };
1933 
1934   return SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, SL, MVT::i64,
1935                                         Ops), 0);
1936 }
1937 
1938 bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase,
1939                                      SDValue &Offset, bool &Imm) const {
1940   SDLoc SL(Addr);
1941 
1942   // A 32-bit (address + offset) should not cause unsigned 32-bit integer
1943   // wraparound, because s_load instructions perform the addition in 64 bits.
1944   if ((Addr.getValueType() != MVT::i32 ||
1945        Addr->getFlags().hasNoUnsignedWrap())) {
1946     SDValue N0, N1;
1947     // Extract the base and offset if possible.
1948     if (CurDAG->isBaseWithConstantOffset(Addr) ||
1949         Addr.getOpcode() == ISD::ADD) {
1950       N0 = Addr.getOperand(0);
1951       N1 = Addr.getOperand(1);
1952     } else if (getBaseWithOffsetUsingSplitOR(*CurDAG, Addr, N0, N1)) {
1953       assert(N0 && N1 && isa<ConstantSDNode>(N1));
1954     }
1955     if (N0 && N1) {
1956       if (SelectSMRDOffset(N1, Offset, Imm)) {
1957         SBase = Expand32BitAddress(N0);
1958         return true;
1959       }
1960     }
1961   }
1962   SBase = Expand32BitAddress(Addr);
1963   Offset = CurDAG->getTargetConstant(0, SL, MVT::i32);
1964   Imm = true;
1965   return true;
1966 }
1967 
1968 bool AMDGPUDAGToDAGISel::SelectSMRDImm(SDValue Addr, SDValue &SBase,
1969                                        SDValue &Offset) const {
1970   bool Imm = false;
1971   return SelectSMRD(Addr, SBase, Offset, Imm) && Imm;
1972 }
1973 
1974 bool AMDGPUDAGToDAGISel::SelectSMRDImm32(SDValue Addr, SDValue &SBase,
1975                                          SDValue &Offset) const {
1976 
1977   assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
1978 
1979   bool Imm = false;
1980   if (!SelectSMRD(Addr, SBase, Offset, Imm))
1981     return false;
1982 
1983   return !Imm && isa<ConstantSDNode>(Offset);
1984 }
1985 
1986 bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDValue Addr, SDValue &SBase,
1987                                         SDValue &Offset) const {
1988   bool Imm = false;
1989   return SelectSMRD(Addr, SBase, Offset, Imm) && !Imm &&
1990          !isa<ConstantSDNode>(Offset);
1991 }
1992 
1993 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm(SDValue Addr,
1994                                              SDValue &Offset) const {
1995   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr)) {
1996     // The immediate offset for S_BUFFER instructions is unsigned.
1997     if (auto Imm =
1998             AMDGPU::getSMRDEncodedOffset(*Subtarget, C->getZExtValue(), true)) {
1999       Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32);
2000       return true;
2001     }
2002   }
2003 
2004   return false;
2005 }
2006 
2007 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(SDValue Addr,
2008                                                SDValue &Offset) const {
2009   assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
2010 
2011   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr)) {
2012     if (auto Imm = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget,
2013                                                          C->getZExtValue())) {
2014       Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32);
2015       return true;
2016     }
2017   }
2018 
2019   return false;
2020 }
2021 
2022 bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index,
2023                                             SDValue &Base,
2024                                             SDValue &Offset) const {
2025   SDLoc DL(Index);
2026 
2027   if (CurDAG->isBaseWithConstantOffset(Index)) {
2028     SDValue N0 = Index.getOperand(0);
2029     SDValue N1 = Index.getOperand(1);
2030     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
2031 
2032     // (add n0, c0)
2033     // Don't peel off the offset (c0) if doing so could possibly lead
2034     // the base (n0) to be negative.
2035     // (or n0, |c0|) can never change a sign given isBaseWithConstantOffset.
2036     if (C1->getSExtValue() <= 0 || CurDAG->SignBitIsZero(N0) ||
2037         (Index->getOpcode() == ISD::OR && C1->getSExtValue() >= 0)) {
2038       Base = N0;
2039       Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32);
2040       return true;
2041     }
2042   }
2043 
2044   if (isa<ConstantSDNode>(Index))
2045     return false;
2046 
2047   Base = Index;
2048   Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
2049   return true;
2050 }
2051 
2052 SDNode *AMDGPUDAGToDAGISel::getBFE32(bool IsSigned, const SDLoc &DL,
2053                                      SDValue Val, uint32_t Offset,
2054                                      uint32_t Width) {
2055   if (Val->isDivergent()) {
2056     unsigned Opcode = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2057     SDValue Off = CurDAG->getTargetConstant(Offset, DL, MVT::i32);
2058     SDValue W = CurDAG->getTargetConstant(Width, DL, MVT::i32);
2059 
2060     return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, Off, W);
2061   }
2062   unsigned Opcode = IsSigned ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2063   // Transformation function, pack the offset and width of a BFE into
2064   // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
2065   // source, bits [5:0] contain the offset and bits [22:16] the width.
2066   uint32_t PackedVal = Offset | (Width << 16);
2067   SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32);
2068 
2069   return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
2070 }
2071 
2072 void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
2073   // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c)
2074   // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c)
2075   // Predicate: 0 < b <= c < 32
2076 
2077   const SDValue &Shl = N->getOperand(0);
2078   ConstantSDNode *B = dyn_cast<ConstantSDNode>(Shl->getOperand(1));
2079   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
2080 
2081   if (B && C) {
2082     uint32_t BVal = B->getZExtValue();
2083     uint32_t CVal = C->getZExtValue();
2084 
2085     if (0 < BVal && BVal <= CVal && CVal < 32) {
2086       bool Signed = N->getOpcode() == ISD::SRA;
2087       ReplaceNode(N, getBFE32(Signed, SDLoc(N), Shl.getOperand(0), CVal - BVal,
2088                   32 - CVal));
2089       return;
2090     }
2091   }
2092   SelectCode(N);
2093 }
2094 
2095 void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
2096   switch (N->getOpcode()) {
2097   case ISD::AND:
2098     if (N->getOperand(0).getOpcode() == ISD::SRL) {
2099       // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)"
2100       // Predicate: isMask(mask)
2101       const SDValue &Srl = N->getOperand(0);
2102       ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1));
2103       ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
2104 
2105       if (Shift && Mask) {
2106         uint32_t ShiftVal = Shift->getZExtValue();
2107         uint32_t MaskVal = Mask->getZExtValue();
2108 
2109         if (isMask_32(MaskVal)) {
2110           uint32_t WidthVal = countPopulation(MaskVal);
2111           ReplaceNode(N, getBFE32(false, SDLoc(N), Srl.getOperand(0), ShiftVal,
2112                                   WidthVal));
2113           return;
2114         }
2115       }
2116     }
2117     break;
2118   case ISD::SRL:
2119     if (N->getOperand(0).getOpcode() == ISD::AND) {
2120       // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)"
2121       // Predicate: isMask(mask >> b)
2122       const SDValue &And = N->getOperand(0);
2123       ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1));
2124       ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1));
2125 
2126       if (Shift && Mask) {
2127         uint32_t ShiftVal = Shift->getZExtValue();
2128         uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal;
2129 
2130         if (isMask_32(MaskVal)) {
2131           uint32_t WidthVal = countPopulation(MaskVal);
2132           ReplaceNode(N, getBFE32(false, SDLoc(N), And.getOperand(0), ShiftVal,
2133                       WidthVal));
2134           return;
2135         }
2136       }
2137     } else if (N->getOperand(0).getOpcode() == ISD::SHL) {
2138       SelectS_BFEFromShifts(N);
2139       return;
2140     }
2141     break;
2142   case ISD::SRA:
2143     if (N->getOperand(0).getOpcode() == ISD::SHL) {
2144       SelectS_BFEFromShifts(N);
2145       return;
2146     }
2147     break;
2148 
2149   case ISD::SIGN_EXTEND_INREG: {
2150     // sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8
2151     SDValue Src = N->getOperand(0);
2152     if (Src.getOpcode() != ISD::SRL)
2153       break;
2154 
2155     const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1));
2156     if (!Amt)
2157       break;
2158 
2159     unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
2160     ReplaceNode(N, getBFE32(true, SDLoc(N), Src.getOperand(0),
2161                             Amt->getZExtValue(), Width));
2162     return;
2163   }
2164   }
2165 
2166   SelectCode(N);
2167 }
2168 
2169 bool AMDGPUDAGToDAGISel::isCBranchSCC(const SDNode *N) const {
2170   assert(N->getOpcode() == ISD::BRCOND);
2171   if (!N->hasOneUse())
2172     return false;
2173 
2174   SDValue Cond = N->getOperand(1);
2175   if (Cond.getOpcode() == ISD::CopyToReg)
2176     Cond = Cond.getOperand(2);
2177 
2178   if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
2179     return false;
2180 
2181   MVT VT = Cond.getOperand(0).getSimpleValueType();
2182   if (VT == MVT::i32)
2183     return true;
2184 
2185   if (VT == MVT::i64) {
2186     auto ST = static_cast<const GCNSubtarget *>(Subtarget);
2187 
2188     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
2189     return (CC == ISD::SETEQ || CC == ISD::SETNE) && ST->hasScalarCompareEq64();
2190   }
2191 
2192   return false;
2193 }
2194 
2195 void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
2196   SDValue Cond = N->getOperand(1);
2197 
2198   if (Cond.isUndef()) {
2199     CurDAG->SelectNodeTo(N, AMDGPU::SI_BR_UNDEF, MVT::Other,
2200                          N->getOperand(2), N->getOperand(0));
2201     return;
2202   }
2203 
2204   const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget);
2205   const SIRegisterInfo *TRI = ST->getRegisterInfo();
2206 
2207   bool UseSCCBr = isCBranchSCC(N) && isUniformBr(N);
2208   unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ;
2209   Register CondReg = UseSCCBr ? AMDGPU::SCC : TRI->getVCC();
2210   SDLoc SL(N);
2211 
2212   if (!UseSCCBr) {
2213     // This is the case that we are selecting to S_CBRANCH_VCCNZ.  We have not
2214     // analyzed what generates the vcc value, so we do not know whether vcc
2215     // bits for disabled lanes are 0.  Thus we need to mask out bits for
2216     // disabled lanes.
2217     //
2218     // For the case that we select S_CBRANCH_SCC1 and it gets
2219     // changed to S_CBRANCH_VCCNZ in SIFixSGPRCopies, SIFixSGPRCopies calls
2220     // SIInstrInfo::moveToVALU which inserts the S_AND).
2221     //
2222     // We could add an analysis of what generates the vcc value here and omit
2223     // the S_AND when is unnecessary. But it would be better to add a separate
2224     // pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it
2225     // catches both cases.
2226     Cond = SDValue(CurDAG->getMachineNode(ST->isWave32() ? AMDGPU::S_AND_B32
2227                                                          : AMDGPU::S_AND_B64,
2228                      SL, MVT::i1,
2229                      CurDAG->getRegister(ST->isWave32() ? AMDGPU::EXEC_LO
2230                                                         : AMDGPU::EXEC,
2231                                          MVT::i1),
2232                     Cond),
2233                    0);
2234   }
2235 
2236   SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, CondReg, Cond);
2237   CurDAG->SelectNodeTo(N, BrOp, MVT::Other,
2238                        N->getOperand(2), // Basic Block
2239                        VCC.getValue(0));
2240 }
2241 
2242 void AMDGPUDAGToDAGISel::SelectFMAD_FMA(SDNode *N) {
2243   MVT VT = N->getSimpleValueType(0);
2244   bool IsFMA = N->getOpcode() == ISD::FMA;
2245   if (VT != MVT::f32 || (!Subtarget->hasMadMixInsts() &&
2246                          !Subtarget->hasFmaMixInsts()) ||
2247       ((IsFMA && Subtarget->hasMadMixInsts()) ||
2248        (!IsFMA && Subtarget->hasFmaMixInsts()))) {
2249     SelectCode(N);
2250     return;
2251   }
2252 
2253   SDValue Src0 = N->getOperand(0);
2254   SDValue Src1 = N->getOperand(1);
2255   SDValue Src2 = N->getOperand(2);
2256   unsigned Src0Mods, Src1Mods, Src2Mods;
2257 
2258   // Avoid using v_mad_mix_f32/v_fma_mix_f32 unless there is actually an operand
2259   // using the conversion from f16.
2260   bool Sel0 = SelectVOP3PMadMixModsImpl(Src0, Src0, Src0Mods);
2261   bool Sel1 = SelectVOP3PMadMixModsImpl(Src1, Src1, Src1Mods);
2262   bool Sel2 = SelectVOP3PMadMixModsImpl(Src2, Src2, Src2Mods);
2263 
2264   assert((IsFMA || !Mode.allFP32Denormals()) &&
2265          "fmad selected with denormals enabled");
2266   // TODO: We can select this with f32 denormals enabled if all the sources are
2267   // converted from f16 (in which case fmad isn't legal).
2268 
2269   if (Sel0 || Sel1 || Sel2) {
2270     // For dummy operands.
2271     SDValue Zero = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
2272     SDValue Ops[] = {
2273       CurDAG->getTargetConstant(Src0Mods, SDLoc(), MVT::i32), Src0,
2274       CurDAG->getTargetConstant(Src1Mods, SDLoc(), MVT::i32), Src1,
2275       CurDAG->getTargetConstant(Src2Mods, SDLoc(), MVT::i32), Src2,
2276       CurDAG->getTargetConstant(0, SDLoc(), MVT::i1),
2277       Zero, Zero
2278     };
2279 
2280     CurDAG->SelectNodeTo(N,
2281                          IsFMA ? AMDGPU::V_FMA_MIX_F32 : AMDGPU::V_MAD_MIX_F32,
2282                          MVT::f32, Ops);
2283   } else {
2284     SelectCode(N);
2285   }
2286 }
2287 
2288 void AMDGPUDAGToDAGISel::SelectDSAppendConsume(SDNode *N, unsigned IntrID) {
2289   // The address is assumed to be uniform, so if it ends up in a VGPR, it will
2290   // be copied to an SGPR with readfirstlane.
2291   unsigned Opc = IntrID == Intrinsic::amdgcn_ds_append ?
2292     AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
2293 
2294   SDValue Chain = N->getOperand(0);
2295   SDValue Ptr = N->getOperand(2);
2296   MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2297   MachineMemOperand *MMO = M->getMemOperand();
2298   bool IsGDS = M->getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
2299 
2300   SDValue Offset;
2301   if (CurDAG->isBaseWithConstantOffset(Ptr)) {
2302     SDValue PtrBase = Ptr.getOperand(0);
2303     SDValue PtrOffset = Ptr.getOperand(1);
2304 
2305     const APInt &OffsetVal = cast<ConstantSDNode>(PtrOffset)->getAPIntValue();
2306     if (isDSOffsetLegal(PtrBase, OffsetVal.getZExtValue())) {
2307       N = glueCopyToM0(N, PtrBase);
2308       Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i32);
2309     }
2310   }
2311 
2312   if (!Offset) {
2313     N = glueCopyToM0(N, Ptr);
2314     Offset = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
2315   }
2316 
2317   SDValue Ops[] = {
2318     Offset,
2319     CurDAG->getTargetConstant(IsGDS, SDLoc(), MVT::i32),
2320     Chain,
2321     N->getOperand(N->getNumOperands() - 1) // New glue
2322   };
2323 
2324   SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2325   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2326 }
2327 
2328 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
2329   switch (IntrID) {
2330   case Intrinsic::amdgcn_ds_gws_init:
2331     return AMDGPU::DS_GWS_INIT;
2332   case Intrinsic::amdgcn_ds_gws_barrier:
2333     return AMDGPU::DS_GWS_BARRIER;
2334   case Intrinsic::amdgcn_ds_gws_sema_v:
2335     return AMDGPU::DS_GWS_SEMA_V;
2336   case Intrinsic::amdgcn_ds_gws_sema_br:
2337     return AMDGPU::DS_GWS_SEMA_BR;
2338   case Intrinsic::amdgcn_ds_gws_sema_p:
2339     return AMDGPU::DS_GWS_SEMA_P;
2340   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2341     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
2342   default:
2343     llvm_unreachable("not a gws intrinsic");
2344   }
2345 }
2346 
2347 void AMDGPUDAGToDAGISel::SelectDS_GWS(SDNode *N, unsigned IntrID) {
2348   if (IntrID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
2349       !Subtarget->hasGWSSemaReleaseAll()) {
2350     // Let this error.
2351     SelectCode(N);
2352     return;
2353   }
2354 
2355   // Chain, intrinsic ID, vsrc, offset
2356   const bool HasVSrc = N->getNumOperands() == 4;
2357   assert(HasVSrc || N->getNumOperands() == 3);
2358 
2359   SDLoc SL(N);
2360   SDValue BaseOffset = N->getOperand(HasVSrc ? 3 : 2);
2361   int ImmOffset = 0;
2362   MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2363   MachineMemOperand *MMO = M->getMemOperand();
2364 
2365   // Don't worry if the offset ends up in a VGPR. Only one lane will have
2366   // effect, so SIFixSGPRCopies will validly insert readfirstlane.
2367 
2368   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
2369   // offset field) % 64. Some versions of the programming guide omit the m0
2370   // part, or claim it's from offset 0.
2371   if (ConstantSDNode *ConstOffset = dyn_cast<ConstantSDNode>(BaseOffset)) {
2372     // If we have a constant offset, try to use the 0 in m0 as the base.
2373     // TODO: Look into changing the default m0 initialization value. If the
2374     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
2375     // the immediate offset.
2376     glueCopyToM0(N, CurDAG->getTargetConstant(0, SL, MVT::i32));
2377     ImmOffset = ConstOffset->getZExtValue();
2378   } else {
2379     if (CurDAG->isBaseWithConstantOffset(BaseOffset)) {
2380       ImmOffset = BaseOffset.getConstantOperandVal(1);
2381       BaseOffset = BaseOffset.getOperand(0);
2382     }
2383 
2384     // Prefer to do the shift in an SGPR since it should be possible to use m0
2385     // as the result directly. If it's already an SGPR, it will be eliminated
2386     // later.
2387     SDNode *SGPROffset
2388       = CurDAG->getMachineNode(AMDGPU::V_READFIRSTLANE_B32, SL, MVT::i32,
2389                                BaseOffset);
2390     // Shift to offset in m0
2391     SDNode *M0Base
2392       = CurDAG->getMachineNode(AMDGPU::S_LSHL_B32, SL, MVT::i32,
2393                                SDValue(SGPROffset, 0),
2394                                CurDAG->getTargetConstant(16, SL, MVT::i32));
2395     glueCopyToM0(N, SDValue(M0Base, 0));
2396   }
2397 
2398   SDValue Chain = N->getOperand(0);
2399   SDValue OffsetField = CurDAG->getTargetConstant(ImmOffset, SL, MVT::i32);
2400 
2401   const unsigned Opc = gwsIntrinToOpcode(IntrID);
2402   SmallVector<SDValue, 5> Ops;
2403   if (HasVSrc)
2404     Ops.push_back(N->getOperand(2));
2405   Ops.push_back(OffsetField);
2406   Ops.push_back(Chain);
2407 
2408   SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2409   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2410 }
2411 
2412 void AMDGPUDAGToDAGISel::SelectInterpP1F16(SDNode *N) {
2413   if (Subtarget->getLDSBankCount() != 16) {
2414     // This is a single instruction with a pattern.
2415     SelectCode(N);
2416     return;
2417   }
2418 
2419   SDLoc DL(N);
2420 
2421   // This requires 2 instructions. It is possible to write a pattern to support
2422   // this, but the generated isel emitter doesn't correctly deal with multiple
2423   // output instructions using the same physical register input. The copy to m0
2424   // is incorrectly placed before the second instruction.
2425   //
2426   // TODO: Match source modifiers.
2427   //
2428   // def : Pat <
2429   //   (int_amdgcn_interp_p1_f16
2430   //    (VOP3Mods f32:$src0, i32:$src0_modifiers),
2431   //                             (i32 timm:$attrchan), (i32 timm:$attr),
2432   //                             (i1 timm:$high), M0),
2433   //   (V_INTERP_P1LV_F16 $src0_modifiers, VGPR_32:$src0, timm:$attr,
2434   //       timm:$attrchan, 0,
2435   //       (V_INTERP_MOV_F32 2, timm:$attr, timm:$attrchan), timm:$high)> {
2436   //   let Predicates = [has16BankLDS];
2437   // }
2438 
2439   // 16 bank LDS
2440   SDValue ToM0 = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, AMDGPU::M0,
2441                                       N->getOperand(5), SDValue());
2442 
2443   SDVTList VTs = CurDAG->getVTList(MVT::f32, MVT::Other);
2444 
2445   SDNode *InterpMov =
2446     CurDAG->getMachineNode(AMDGPU::V_INTERP_MOV_F32, DL, VTs, {
2447         CurDAG->getTargetConstant(2, DL, MVT::i32), // P0
2448         N->getOperand(3),  // Attr
2449         N->getOperand(2),  // Attrchan
2450         ToM0.getValue(1) // In glue
2451   });
2452 
2453   SDNode *InterpP1LV =
2454     CurDAG->getMachineNode(AMDGPU::V_INTERP_P1LV_F16, DL, MVT::f32, {
2455         CurDAG->getTargetConstant(0, DL, MVT::i32), // $src0_modifiers
2456         N->getOperand(1), // Src0
2457         N->getOperand(3), // Attr
2458         N->getOperand(2), // Attrchan
2459         CurDAG->getTargetConstant(0, DL, MVT::i32), // $src2_modifiers
2460         SDValue(InterpMov, 0), // Src2 - holds two f16 values selected by high
2461         N->getOperand(4), // high
2462         CurDAG->getTargetConstant(0, DL, MVT::i1), // $clamp
2463         CurDAG->getTargetConstant(0, DL, MVT::i32), // $omod
2464         SDValue(InterpMov, 1)
2465   });
2466 
2467   CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), SDValue(InterpP1LV, 0));
2468 }
2469 
2470 void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) {
2471   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2472   switch (IntrID) {
2473   case Intrinsic::amdgcn_ds_append:
2474   case Intrinsic::amdgcn_ds_consume: {
2475     if (N->getValueType(0) != MVT::i32)
2476       break;
2477     SelectDSAppendConsume(N, IntrID);
2478     return;
2479   }
2480   }
2481 
2482   SelectCode(N);
2483 }
2484 
2485 void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
2486   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
2487   unsigned Opcode;
2488   switch (IntrID) {
2489   case Intrinsic::amdgcn_wqm:
2490     Opcode = AMDGPU::WQM;
2491     break;
2492   case Intrinsic::amdgcn_softwqm:
2493     Opcode = AMDGPU::SOFT_WQM;
2494     break;
2495   case Intrinsic::amdgcn_wwm:
2496   case Intrinsic::amdgcn_strict_wwm:
2497     Opcode = AMDGPU::STRICT_WWM;
2498     break;
2499   case Intrinsic::amdgcn_strict_wqm:
2500     Opcode = AMDGPU::STRICT_WQM;
2501     break;
2502   case Intrinsic::amdgcn_interp_p1_f16:
2503     SelectInterpP1F16(N);
2504     return;
2505   default:
2506     SelectCode(N);
2507     return;
2508   }
2509 
2510   SDValue Src = N->getOperand(1);
2511   CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), {Src});
2512 }
2513 
2514 void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) {
2515   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2516   switch (IntrID) {
2517   case Intrinsic::amdgcn_ds_gws_init:
2518   case Intrinsic::amdgcn_ds_gws_barrier:
2519   case Intrinsic::amdgcn_ds_gws_sema_v:
2520   case Intrinsic::amdgcn_ds_gws_sema_br:
2521   case Intrinsic::amdgcn_ds_gws_sema_p:
2522   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2523     SelectDS_GWS(N, IntrID);
2524     return;
2525   default:
2526     break;
2527   }
2528 
2529   SelectCode(N);
2530 }
2531 
2532 bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src,
2533                                             unsigned &Mods,
2534                                             bool AllowAbs) const {
2535   Mods = 0;
2536   Src = In;
2537 
2538   if (Src.getOpcode() == ISD::FNEG) {
2539     Mods |= SISrcMods::NEG;
2540     Src = Src.getOperand(0);
2541   }
2542 
2543   if (AllowAbs && Src.getOpcode() == ISD::FABS) {
2544     Mods |= SISrcMods::ABS;
2545     Src = Src.getOperand(0);
2546   }
2547 
2548   return true;
2549 }
2550 
2551 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
2552                                         SDValue &SrcMods) const {
2553   unsigned Mods;
2554   if (SelectVOP3ModsImpl(In, Src, Mods)) {
2555     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2556     return true;
2557   }
2558 
2559   return false;
2560 }
2561 
2562 bool AMDGPUDAGToDAGISel::SelectVOP3BMods(SDValue In, SDValue &Src,
2563                                          SDValue &SrcMods) const {
2564   unsigned Mods;
2565   if (SelectVOP3ModsImpl(In, Src, Mods, /* AllowAbs */ false)) {
2566     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2567     return true;
2568   }
2569 
2570   return false;
2571 }
2572 
2573 bool AMDGPUDAGToDAGISel::SelectVOP3Mods_NNaN(SDValue In, SDValue &Src,
2574                                              SDValue &SrcMods) const {
2575   SelectVOP3Mods(In, Src, SrcMods);
2576   return isNoNanSrc(Src);
2577 }
2578 
2579 bool AMDGPUDAGToDAGISel::SelectVOP3NoMods(SDValue In, SDValue &Src) const {
2580   if (In.getOpcode() == ISD::FABS || In.getOpcode() == ISD::FNEG)
2581     return false;
2582 
2583   Src = In;
2584   return true;
2585 }
2586 
2587 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
2588                                          SDValue &SrcMods, SDValue &Clamp,
2589                                          SDValue &Omod) const {
2590   SDLoc DL(In);
2591   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2592   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2593 
2594   return SelectVOP3Mods(In, Src, SrcMods);
2595 }
2596 
2597 bool AMDGPUDAGToDAGISel::SelectVOP3BMods0(SDValue In, SDValue &Src,
2598                                           SDValue &SrcMods, SDValue &Clamp,
2599                                           SDValue &Omod) const {
2600   SDLoc DL(In);
2601   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2602   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2603 
2604   return SelectVOP3BMods(In, Src, SrcMods);
2605 }
2606 
2607 bool AMDGPUDAGToDAGISel::SelectVOP3OMods(SDValue In, SDValue &Src,
2608                                          SDValue &Clamp, SDValue &Omod) const {
2609   Src = In;
2610 
2611   SDLoc DL(In);
2612   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2613   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2614 
2615   return true;
2616 }
2617 
2618 bool AMDGPUDAGToDAGISel::SelectVOP3PMods(SDValue In, SDValue &Src,
2619                                          SDValue &SrcMods, bool IsDOT) const {
2620   unsigned Mods = 0;
2621   Src = In;
2622 
2623   if (Src.getOpcode() == ISD::FNEG) {
2624     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
2625     Src = Src.getOperand(0);
2626   }
2627 
2628   if (Src.getOpcode() == ISD::BUILD_VECTOR &&
2629       (!IsDOT || !Subtarget->hasDOTOpSelHazard())) {
2630     unsigned VecMods = Mods;
2631 
2632     SDValue Lo = stripBitcast(Src.getOperand(0));
2633     SDValue Hi = stripBitcast(Src.getOperand(1));
2634 
2635     if (Lo.getOpcode() == ISD::FNEG) {
2636       Lo = stripBitcast(Lo.getOperand(0));
2637       Mods ^= SISrcMods::NEG;
2638     }
2639 
2640     if (Hi.getOpcode() == ISD::FNEG) {
2641       Hi = stripBitcast(Hi.getOperand(0));
2642       Mods ^= SISrcMods::NEG_HI;
2643     }
2644 
2645     if (isExtractHiElt(Lo, Lo))
2646       Mods |= SISrcMods::OP_SEL_0;
2647 
2648     if (isExtractHiElt(Hi, Hi))
2649       Mods |= SISrcMods::OP_SEL_1;
2650 
2651     unsigned VecSize = Src.getValueSizeInBits();
2652     Lo = stripExtractLoElt(Lo);
2653     Hi = stripExtractLoElt(Hi);
2654 
2655     if (Lo.getValueSizeInBits() > VecSize) {
2656       Lo = CurDAG->getTargetExtractSubreg(
2657         (VecSize > 32) ? AMDGPU::sub0_sub1 : AMDGPU::sub0, SDLoc(In),
2658         MVT::getIntegerVT(VecSize), Lo);
2659     }
2660 
2661     if (Hi.getValueSizeInBits() > VecSize) {
2662       Hi = CurDAG->getTargetExtractSubreg(
2663         (VecSize > 32) ? AMDGPU::sub0_sub1 : AMDGPU::sub0, SDLoc(In),
2664         MVT::getIntegerVT(VecSize), Hi);
2665     }
2666 
2667     assert(Lo.getValueSizeInBits() <= VecSize &&
2668            Hi.getValueSizeInBits() <= VecSize);
2669 
2670     if (Lo == Hi && !isInlineImmediate(Lo.getNode())) {
2671       // Really a scalar input. Just select from the low half of the register to
2672       // avoid packing.
2673 
2674       if (VecSize == 32 || VecSize == Lo.getValueSizeInBits()) {
2675         Src = Lo;
2676       } else {
2677         assert(Lo.getValueSizeInBits() == 32 && VecSize == 64);
2678 
2679         SDLoc SL(In);
2680         SDValue Undef = SDValue(
2681           CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SL,
2682                                  Lo.getValueType()), 0);
2683         auto RC = Lo->isDivergent() ? AMDGPU::VReg_64RegClassID
2684                                     : AMDGPU::SReg_64RegClassID;
2685         const SDValue Ops[] = {
2686           CurDAG->getTargetConstant(RC, SL, MVT::i32),
2687           Lo, CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32),
2688           Undef, CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32) };
2689 
2690         Src = SDValue(CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SL,
2691                                              Src.getValueType(), Ops), 0);
2692       }
2693       SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2694       return true;
2695     }
2696 
2697     if (VecSize == 64 && Lo == Hi && isa<ConstantFPSDNode>(Lo)) {
2698       uint64_t Lit = cast<ConstantFPSDNode>(Lo)->getValueAPF()
2699                       .bitcastToAPInt().getZExtValue();
2700       if (AMDGPU::isInlinableLiteral32(Lit, Subtarget->hasInv2PiInlineImm())) {
2701         Src = CurDAG->getTargetConstant(Lit, SDLoc(In), MVT::i64);;
2702         SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2703         return true;
2704       }
2705     }
2706 
2707     Mods = VecMods;
2708   }
2709 
2710   // Packed instructions do not have abs modifiers.
2711   Mods |= SISrcMods::OP_SEL_1;
2712 
2713   SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2714   return true;
2715 }
2716 
2717 bool AMDGPUDAGToDAGISel::SelectVOP3PModsDOT(SDValue In, SDValue &Src,
2718                                             SDValue &SrcMods) const {
2719   return SelectVOP3PMods(In, Src, SrcMods, true);
2720 }
2721 
2722 bool AMDGPUDAGToDAGISel::SelectVOP3OpSel(SDValue In, SDValue &Src,
2723                                          SDValue &SrcMods) const {
2724   Src = In;
2725   // FIXME: Handle op_sel
2726   SrcMods = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
2727   return true;
2728 }
2729 
2730 bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods(SDValue In, SDValue &Src,
2731                                              SDValue &SrcMods) const {
2732   // FIXME: Handle op_sel
2733   return SelectVOP3Mods(In, Src, SrcMods);
2734 }
2735 
2736 // The return value is not whether the match is possible (which it always is),
2737 // but whether or not it a conversion is really used.
2738 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src,
2739                                                    unsigned &Mods) const {
2740   Mods = 0;
2741   SelectVOP3ModsImpl(In, Src, Mods);
2742 
2743   if (Src.getOpcode() == ISD::FP_EXTEND) {
2744     Src = Src.getOperand(0);
2745     assert(Src.getValueType() == MVT::f16);
2746     Src = stripBitcast(Src);
2747 
2748     // Be careful about folding modifiers if we already have an abs. fneg is
2749     // applied last, so we don't want to apply an earlier fneg.
2750     if ((Mods & SISrcMods::ABS) == 0) {
2751       unsigned ModsTmp;
2752       SelectVOP3ModsImpl(Src, Src, ModsTmp);
2753 
2754       if ((ModsTmp & SISrcMods::NEG) != 0)
2755         Mods ^= SISrcMods::NEG;
2756 
2757       if ((ModsTmp & SISrcMods::ABS) != 0)
2758         Mods |= SISrcMods::ABS;
2759     }
2760 
2761     // op_sel/op_sel_hi decide the source type and source.
2762     // If the source's op_sel_hi is set, it indicates to do a conversion from fp16.
2763     // If the sources's op_sel is set, it picks the high half of the source
2764     // register.
2765 
2766     Mods |= SISrcMods::OP_SEL_1;
2767     if (isExtractHiElt(Src, Src)) {
2768       Mods |= SISrcMods::OP_SEL_0;
2769 
2770       // TODO: Should we try to look for neg/abs here?
2771     }
2772 
2773     return true;
2774   }
2775 
2776   return false;
2777 }
2778 
2779 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixMods(SDValue In, SDValue &Src,
2780                                                SDValue &SrcMods) const {
2781   unsigned Mods = 0;
2782   SelectVOP3PMadMixModsImpl(In, Src, Mods);
2783   SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2784   return true;
2785 }
2786 
2787 SDValue AMDGPUDAGToDAGISel::getHi16Elt(SDValue In) const {
2788   if (In.isUndef())
2789     return CurDAG->getUNDEF(MVT::i32);
2790 
2791   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(In)) {
2792     SDLoc SL(In);
2793     return CurDAG->getConstant(C->getZExtValue() << 16, SL, MVT::i32);
2794   }
2795 
2796   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(In)) {
2797     SDLoc SL(In);
2798     return CurDAG->getConstant(
2799       C->getValueAPF().bitcastToAPInt().getZExtValue() << 16, SL, MVT::i32);
2800   }
2801 
2802   SDValue Src;
2803   if (isExtractHiElt(In, Src))
2804     return Src;
2805 
2806   return SDValue();
2807 }
2808 
2809 bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const {
2810   assert(CurDAG->getTarget().getTargetTriple().getArch() == Triple::amdgcn);
2811 
2812   const SIRegisterInfo *SIRI =
2813     static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
2814   const SIInstrInfo * SII =
2815     static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
2816 
2817   unsigned Limit = 0;
2818   bool AllUsesAcceptSReg = true;
2819   for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
2820     Limit < 10 && U != E; ++U, ++Limit) {
2821     const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
2822 
2823     // If the register class is unknown, it could be an unknown
2824     // register class that needs to be an SGPR, e.g. an inline asm
2825     // constraint
2826     if (!RC || SIRI->isSGPRClass(RC))
2827       return false;
2828 
2829     if (RC != &AMDGPU::VS_32RegClass) {
2830       AllUsesAcceptSReg = false;
2831       SDNode * User = *U;
2832       if (User->isMachineOpcode()) {
2833         unsigned Opc = User->getMachineOpcode();
2834         MCInstrDesc Desc = SII->get(Opc);
2835         if (Desc.isCommutable()) {
2836           unsigned OpIdx = Desc.getNumDefs() + U.getOperandNo();
2837           unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
2838           if (SII->findCommutedOpIndices(Desc, OpIdx, CommuteIdx1)) {
2839             unsigned CommutedOpNo = CommuteIdx1 - Desc.getNumDefs();
2840             const TargetRegisterClass *CommutedRC = getOperandRegClass(*U, CommutedOpNo);
2841             if (CommutedRC == &AMDGPU::VS_32RegClass)
2842               AllUsesAcceptSReg = true;
2843           }
2844         }
2845       }
2846       // If "AllUsesAcceptSReg == false" so far we haven't succeeded
2847       // commuting current user. This means have at least one use
2848       // that strictly require VGPR. Thus, we will not attempt to commute
2849       // other user instructions.
2850       if (!AllUsesAcceptSReg)
2851         break;
2852     }
2853   }
2854   return !AllUsesAcceptSReg && (Limit < 10);
2855 }
2856 
2857 bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode * N) const {
2858   auto Ld = cast<LoadSDNode>(N);
2859 
2860   return Ld->getAlign() >= Align(4) &&
2861          (((Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
2862             Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
2863            !N->isDivergent()) ||
2864           (Subtarget->getScalarizeGlobalBehavior() &&
2865            Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
2866            Ld->isSimple() && !N->isDivergent() &&
2867            static_cast<const SITargetLowering *>(getTargetLowering())
2868                ->isMemOpHasNoClobberedMemOperand(N)));
2869 }
2870 
2871 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
2872   const AMDGPUTargetLowering& Lowering =
2873     *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
2874   bool IsModified = false;
2875   do {
2876     IsModified = false;
2877 
2878     // Go over all selected nodes and try to fold them a bit more
2879     SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_begin();
2880     while (Position != CurDAG->allnodes_end()) {
2881       SDNode *Node = &*Position++;
2882       MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(Node);
2883       if (!MachineNode)
2884         continue;
2885 
2886       SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
2887       if (ResNode != Node) {
2888         if (ResNode)
2889           ReplaceUses(Node, ResNode);
2890         IsModified = true;
2891       }
2892     }
2893     CurDAG->RemoveDeadNodes();
2894   } while (IsModified);
2895 }
2896