1 //===-- AMDGPUISelDAGToDAG.cpp - A dag to dag inst selector for AMDGPU ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //==-----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Defines an instruction selector for the AMDGPU target.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPU.h"
15 #include "AMDGPUTargetMachine.h"
16 #include "MCTargetDesc/R600MCTargetDesc.h"
17 #include "R600.h"
18 #include "R600Subtarget.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/FunctionLoweringInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/CodeGen/SelectionDAGNodes.h"
26 #include "llvm/IR/IntrinsicsAMDGPU.h"
27 #include "llvm/InitializePasses.h"
28 
29 #ifdef EXPENSIVE_CHECKS
30 #include "llvm/Analysis/LoopInfo.h"
31 #include "llvm/IR/Dominators.h"
32 #endif
33 
34 #define DEBUG_TYPE "isel"
35 
36 using namespace llvm;
37 
38 namespace llvm {
39 
40 class R600InstrInfo;
41 
42 } // end namespace llvm
43 
44 //===----------------------------------------------------------------------===//
45 // Instruction Selector Implementation
46 //===----------------------------------------------------------------------===//
47 
48 namespace {
49 
50 static bool isNullConstantOrUndef(SDValue V) {
51   if (V.isUndef())
52     return true;
53 
54   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
55   return Const != nullptr && Const->isZero();
56 }
57 
58 static bool getConstantValue(SDValue N, uint32_t &Out) {
59   // This is only used for packed vectors, where using 0 for undef should
60   // always be good.
61   if (N.isUndef()) {
62     Out = 0;
63     return true;
64   }
65 
66   if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
67     Out = C->getAPIntValue().getSExtValue();
68     return true;
69   }
70 
71   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N)) {
72     Out = C->getValueAPF().bitcastToAPInt().getSExtValue();
73     return true;
74   }
75 
76   return false;
77 }
78 
79 // TODO: Handle undef as zero
80 static SDNode *packConstantV2I16(const SDNode *N, SelectionDAG &DAG,
81                                  bool Negate = false) {
82   assert(N->getOpcode() == ISD::BUILD_VECTOR && N->getNumOperands() == 2);
83   uint32_t LHSVal, RHSVal;
84   if (getConstantValue(N->getOperand(0), LHSVal) &&
85       getConstantValue(N->getOperand(1), RHSVal)) {
86     SDLoc SL(N);
87     uint32_t K = Negate ?
88       (-LHSVal & 0xffff) | (-RHSVal << 16) :
89       (LHSVal & 0xffff) | (RHSVal << 16);
90     return DAG.getMachineNode(AMDGPU::S_MOV_B32, SL, N->getValueType(0),
91                               DAG.getTargetConstant(K, SL, MVT::i32));
92   }
93 
94   return nullptr;
95 }
96 
97 static SDNode *packNegConstantV2I16(const SDNode *N, SelectionDAG &DAG) {
98   return packConstantV2I16(N, DAG, true);
99 }
100 
101 /// AMDGPU specific code to select AMDGPU machine instructions for
102 /// SelectionDAG operations.
103 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
104   // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
105   // make the right decision when generating code for different targets.
106   const GCNSubtarget *Subtarget;
107 
108   // Default FP mode for the current function.
109   AMDGPU::SIModeRegisterDefaults Mode;
110 
111   bool EnableLateStructurizeCFG;
112 
113   // Instructions that will be lowered with a final instruction that zeros the
114   // high result bits.
115   bool fp16SrcZerosHighBits(unsigned Opc) const;
116 
117 public:
118   explicit AMDGPUDAGToDAGISel(TargetMachine *TM = nullptr,
119                               CodeGenOpt::Level OptLevel = CodeGenOpt::Default)
120     : SelectionDAGISel(*TM, OptLevel) {
121     EnableLateStructurizeCFG = AMDGPUTargetMachine::EnableLateStructurizeCFG;
122   }
123   ~AMDGPUDAGToDAGISel() override = default;
124 
125   void getAnalysisUsage(AnalysisUsage &AU) const override {
126     AU.addRequired<AMDGPUArgumentUsageInfo>();
127     AU.addRequired<LegacyDivergenceAnalysis>();
128 #ifdef EXPENSIVE_CHECKS
129     AU.addRequired<DominatorTreeWrapperPass>();
130     AU.addRequired<LoopInfoWrapperPass>();
131 #endif
132     SelectionDAGISel::getAnalysisUsage(AU);
133   }
134 
135   bool matchLoadD16FromBuildVector(SDNode *N) const;
136 
137   bool runOnMachineFunction(MachineFunction &MF) override;
138   void PreprocessISelDAG() override;
139   void Select(SDNode *N) override;
140   StringRef getPassName() const override;
141   void PostprocessISelDAG() override;
142 
143 protected:
144   void SelectBuildVector(SDNode *N, unsigned RegClassID);
145 
146 private:
147   std::pair<SDValue, SDValue> foldFrameIndex(SDValue N) const;
148   bool isNoNanSrc(SDValue N) const;
149   bool isInlineImmediate(const SDNode *N, bool Negated = false) const;
150   bool isNegInlineImmediate(const SDNode *N) const {
151     return isInlineImmediate(N, true);
152   }
153 
154   bool isInlineImmediate16(int64_t Imm) const {
155     return AMDGPU::isInlinableLiteral16(Imm, Subtarget->hasInv2PiInlineImm());
156   }
157 
158   bool isInlineImmediate32(int64_t Imm) const {
159     return AMDGPU::isInlinableLiteral32(Imm, Subtarget->hasInv2PiInlineImm());
160   }
161 
162   bool isInlineImmediate64(int64_t Imm) const {
163     return AMDGPU::isInlinableLiteral64(Imm, Subtarget->hasInv2PiInlineImm());
164   }
165 
166   bool isInlineImmediate(const APFloat &Imm) const {
167     return Subtarget->getInstrInfo()->isInlineConstant(Imm);
168   }
169 
170   bool isVGPRImm(const SDNode *N) const;
171   bool isUniformLoad(const SDNode *N) const;
172   bool isUniformBr(const SDNode *N) const;
173 
174   bool isBaseWithConstantOffset64(SDValue Addr, SDValue &LHS,
175                                   SDValue &RHS) const;
176 
177   MachineSDNode *buildSMovImm64(SDLoc &DL, uint64_t Val, EVT VT) const;
178 
179   SDNode *glueCopyToOp(SDNode *N, SDValue NewChain, SDValue Glue) const;
180   SDNode *glueCopyToM0(SDNode *N, SDValue Val) const;
181   SDNode *glueCopyToM0LDSInit(SDNode *N) const;
182 
183   const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
184   virtual bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
185   virtual bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
186   bool isDSOffsetLegal(SDValue Base, unsigned Offset) const;
187   bool isDSOffset2Legal(SDValue Base, unsigned Offset0, unsigned Offset1,
188                         unsigned Size) const;
189   bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const;
190   bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
191                                  SDValue &Offset1) const;
192   bool SelectDS128Bit8ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
193                                   SDValue &Offset1) const;
194   bool SelectDSReadWrite2(SDValue Ptr, SDValue &Base, SDValue &Offset0,
195                           SDValue &Offset1, unsigned Size) const;
196   bool SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
197                    SDValue &SOffset, SDValue &Offset, SDValue &Offen,
198                    SDValue &Idxen, SDValue &Addr64) const;
199   bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
200                          SDValue &SOffset, SDValue &Offset) const;
201   bool SelectMUBUFScratchOffen(SDNode *Parent,
202                                SDValue Addr, SDValue &RSrc, SDValue &VAddr,
203                                SDValue &SOffset, SDValue &ImmOffset) const;
204   bool SelectMUBUFScratchOffset(SDNode *Parent,
205                                 SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
206                                 SDValue &Offset) const;
207 
208   bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
209                          SDValue &Offset) const;
210 
211   bool SelectFlatOffsetImpl(SDNode *N, SDValue Addr, SDValue &VAddr,
212                             SDValue &Offset, uint64_t FlatVariant) const;
213   bool SelectFlatOffset(SDNode *N, SDValue Addr, SDValue &VAddr,
214                         SDValue &Offset) const;
215   bool SelectGlobalOffset(SDNode *N, SDValue Addr, SDValue &VAddr,
216                           SDValue &Offset) const;
217   bool SelectScratchOffset(SDNode *N, SDValue Addr, SDValue &VAddr,
218                            SDValue &Offset) const;
219   bool SelectGlobalSAddr(SDNode *N, SDValue Addr, SDValue &SAddr,
220                          SDValue &VOffset, SDValue &Offset) const;
221   bool SelectScratchSAddr(SDNode *N, SDValue Addr, SDValue &SAddr,
222                           SDValue &Offset) const;
223 
224   bool SelectSMRDOffset(SDValue ByteOffsetNode, SDValue &Offset,
225                         bool &Imm) const;
226   SDValue Expand32BitAddress(SDValue Addr) const;
227   bool SelectSMRD(SDValue Addr, SDValue &SBase, SDValue &Offset,
228                   bool &Imm) const;
229   bool SelectSMRDImm(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
230   bool SelectSMRDImm32(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
231   bool SelectSMRDSgpr(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
232   bool SelectSMRDBufferImm(SDValue Addr, SDValue &Offset) const;
233   bool SelectSMRDBufferImm32(SDValue Addr, SDValue &Offset) const;
234   bool SelectMOVRELOffset(SDValue Index, SDValue &Base, SDValue &Offset) const;
235 
236   bool SelectVOP3Mods_NNaN(SDValue In, SDValue &Src, SDValue &SrcMods) const;
237   bool SelectVOP3ModsImpl(SDValue In, SDValue &Src, unsigned &SrcMods,
238                           bool AllowAbs = true) const;
239   bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
240   bool SelectVOP3BMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
241   bool SelectVOP3NoMods(SDValue In, SDValue &Src) const;
242   bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
243                        SDValue &Clamp, SDValue &Omod) const;
244   bool SelectVOP3BMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
245                         SDValue &Clamp, SDValue &Omod) const;
246   bool SelectVOP3NoMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
247                          SDValue &Clamp, SDValue &Omod) const;
248 
249   bool SelectVOP3OMods(SDValue In, SDValue &Src,
250                        SDValue &Clamp, SDValue &Omod) const;
251 
252   bool SelectVOP3PMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
253 
254   bool SelectVOP3OpSel(SDValue In, SDValue &Src, SDValue &SrcMods) const;
255 
256   bool SelectVOP3OpSelMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
257   bool SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src, unsigned &Mods) const;
258   bool SelectVOP3PMadMixMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
259 
260   SDValue getHi16Elt(SDValue In) const;
261 
262   SDValue getMaterializedScalarImm32(int64_t Val, const SDLoc &DL) const;
263 
264   void SelectADD_SUB_I64(SDNode *N);
265   void SelectAddcSubb(SDNode *N);
266   void SelectUADDO_USUBO(SDNode *N);
267   void SelectDIV_SCALE(SDNode *N);
268   void SelectMAD_64_32(SDNode *N);
269   void SelectFMA_W_CHAIN(SDNode *N);
270   void SelectFMUL_W_CHAIN(SDNode *N);
271 
272   SDNode *getS_BFE(unsigned Opcode, const SDLoc &DL, SDValue Val,
273                    uint32_t Offset, uint32_t Width);
274   void SelectS_BFEFromShifts(SDNode *N);
275   void SelectS_BFE(SDNode *N);
276   bool isCBranchSCC(const SDNode *N) const;
277   void SelectBRCOND(SDNode *N);
278   void SelectFMAD_FMA(SDNode *N);
279   void SelectATOMIC_CMP_SWAP(SDNode *N);
280   void SelectDSAppendConsume(SDNode *N, unsigned IntrID);
281   void SelectDS_GWS(SDNode *N, unsigned IntrID);
282   void SelectInterpP1F16(SDNode *N);
283   void SelectINTRINSIC_W_CHAIN(SDNode *N);
284   void SelectINTRINSIC_WO_CHAIN(SDNode *N);
285   void SelectINTRINSIC_VOID(SDNode *N);
286 
287 protected:
288   // Include the pieces autogenerated from the target description.
289 #include "AMDGPUGenDAGISel.inc"
290 };
291 
292 class R600DAGToDAGISel : public AMDGPUDAGToDAGISel {
293   const R600Subtarget *Subtarget;
294 
295   bool isConstantLoad(const MemSDNode *N, int cbID) const;
296   bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
297   bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
298                                        SDValue& Offset);
299 public:
300   explicit R600DAGToDAGISel(TargetMachine *TM, CodeGenOpt::Level OptLevel) :
301       AMDGPUDAGToDAGISel(TM, OptLevel) {}
302 
303   void Select(SDNode *N) override;
304 
305   bool SelectADDRIndirect(SDValue Addr, SDValue &Base,
306                           SDValue &Offset) override;
307   bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
308                           SDValue &Offset) override;
309 
310   bool runOnMachineFunction(MachineFunction &MF) override;
311 
312   void PreprocessISelDAG() override {}
313 
314 protected:
315   // Include the pieces autogenerated from the target description.
316 #include "R600GenDAGISel.inc"
317 };
318 
319 static SDValue stripBitcast(SDValue Val) {
320   return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val;
321 }
322 
323 // Figure out if this is really an extract of the high 16-bits of a dword.
324 static bool isExtractHiElt(SDValue In, SDValue &Out) {
325   In = stripBitcast(In);
326 
327   if (In.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
328     if (ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(In.getOperand(1))) {
329       if (!Idx->isOne())
330         return false;
331       Out = In.getOperand(0);
332       return true;
333     }
334   }
335 
336   if (In.getOpcode() != ISD::TRUNCATE)
337     return false;
338 
339   SDValue Srl = In.getOperand(0);
340   if (Srl.getOpcode() == ISD::SRL) {
341     if (ConstantSDNode *ShiftAmt = dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
342       if (ShiftAmt->getZExtValue() == 16) {
343         Out = stripBitcast(Srl.getOperand(0));
344         return true;
345       }
346     }
347   }
348 
349   return false;
350 }
351 
352 // Look through operations that obscure just looking at the low 16-bits of the
353 // same register.
354 static SDValue stripExtractLoElt(SDValue In) {
355   if (In.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
356     if (ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(In.getOperand(1))) {
357       if (Idx->isZero() && In.getValueSizeInBits() <= 32)
358         return In.getOperand(0);
359     }
360   }
361 
362   if (In.getOpcode() == ISD::TRUNCATE) {
363     SDValue Src = In.getOperand(0);
364     if (Src.getValueType().getSizeInBits() == 32)
365       return stripBitcast(Src);
366   }
367 
368   return In;
369 }
370 
371 }  // end anonymous namespace
372 
373 INITIALIZE_PASS_BEGIN(AMDGPUDAGToDAGISel, "amdgpu-isel",
374                       "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
375 INITIALIZE_PASS_DEPENDENCY(AMDGPUArgumentUsageInfo)
376 INITIALIZE_PASS_DEPENDENCY(AMDGPUPerfHintAnalysis)
377 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
378 #ifdef EXPENSIVE_CHECKS
379 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
380 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
381 #endif
382 INITIALIZE_PASS_END(AMDGPUDAGToDAGISel, "amdgpu-isel",
383                     "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
384 
385 /// This pass converts a legalized DAG into a AMDGPU-specific
386 // DAG, ready for instruction scheduling.
387 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine *TM,
388                                         CodeGenOpt::Level OptLevel) {
389   return new AMDGPUDAGToDAGISel(TM, OptLevel);
390 }
391 
392 /// This pass converts a legalized DAG into a R600-specific
393 // DAG, ready for instruction scheduling.
394 FunctionPass *llvm::createR600ISelDag(TargetMachine *TM,
395                                       CodeGenOpt::Level OptLevel) {
396   return new R600DAGToDAGISel(TM, OptLevel);
397 }
398 
399 bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
400 #ifdef EXPENSIVE_CHECKS
401   DominatorTree & DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
402   LoopInfo * LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
403   for (auto &L : LI->getLoopsInPreorder()) {
404     assert(L->isLCSSAForm(DT));
405   }
406 #endif
407   Subtarget = &MF.getSubtarget<GCNSubtarget>();
408   Mode = AMDGPU::SIModeRegisterDefaults(MF.getFunction());
409   return SelectionDAGISel::runOnMachineFunction(MF);
410 }
411 
412 bool AMDGPUDAGToDAGISel::fp16SrcZerosHighBits(unsigned Opc) const {
413   // XXX - only need to list legal operations.
414   switch (Opc) {
415   case ISD::FADD:
416   case ISD::FSUB:
417   case ISD::FMUL:
418   case ISD::FDIV:
419   case ISD::FREM:
420   case ISD::FCANONICALIZE:
421   case ISD::UINT_TO_FP:
422   case ISD::SINT_TO_FP:
423   case ISD::FABS:
424     // Fabs is lowered to a bit operation, but it's an and which will clear the
425     // high bits anyway.
426   case ISD::FSQRT:
427   case ISD::FSIN:
428   case ISD::FCOS:
429   case ISD::FPOWI:
430   case ISD::FPOW:
431   case ISD::FLOG:
432   case ISD::FLOG2:
433   case ISD::FLOG10:
434   case ISD::FEXP:
435   case ISD::FEXP2:
436   case ISD::FCEIL:
437   case ISD::FTRUNC:
438   case ISD::FRINT:
439   case ISD::FNEARBYINT:
440   case ISD::FROUND:
441   case ISD::FFLOOR:
442   case ISD::FMINNUM:
443   case ISD::FMAXNUM:
444   case AMDGPUISD::FRACT:
445   case AMDGPUISD::CLAMP:
446   case AMDGPUISD::COS_HW:
447   case AMDGPUISD::SIN_HW:
448   case AMDGPUISD::FMIN3:
449   case AMDGPUISD::FMAX3:
450   case AMDGPUISD::FMED3:
451   case AMDGPUISD::FMAD_FTZ:
452   case AMDGPUISD::RCP:
453   case AMDGPUISD::RSQ:
454   case AMDGPUISD::RCP_IFLAG:
455   case AMDGPUISD::LDEXP:
456     // On gfx10, all 16-bit instructions preserve the high bits.
457     return Subtarget->getGeneration() <= AMDGPUSubtarget::GFX9;
458   case ISD::FP_ROUND:
459     // We may select fptrunc (fma/mad) to mad_mixlo, which does not zero the
460     // high bits on gfx9.
461     // TODO: If we had the source node we could see if the source was fma/mad
462     return Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS;
463   case ISD::FMA:
464   case ISD::FMAD:
465   case AMDGPUISD::DIV_FIXUP:
466     return Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS;
467   default:
468     // fcopysign, select and others may be lowered to 32-bit bit operations
469     // which don't zero the high bits.
470     return false;
471   }
472 }
473 
474 bool AMDGPUDAGToDAGISel::matchLoadD16FromBuildVector(SDNode *N) const {
475   assert(Subtarget->d16PreservesUnusedBits());
476   MVT VT = N->getValueType(0).getSimpleVT();
477   if (VT != MVT::v2i16 && VT != MVT::v2f16)
478     return false;
479 
480   SDValue Lo = N->getOperand(0);
481   SDValue Hi = N->getOperand(1);
482 
483   LoadSDNode *LdHi = dyn_cast<LoadSDNode>(stripBitcast(Hi));
484 
485   // build_vector lo, (load ptr) -> load_d16_hi ptr, lo
486   // build_vector lo, (zextload ptr from i8) -> load_d16_hi_u8 ptr, lo
487   // build_vector lo, (sextload ptr from i8) -> load_d16_hi_i8 ptr, lo
488 
489   // Need to check for possible indirect dependencies on the other half of the
490   // vector to avoid introducing a cycle.
491   if (LdHi && Hi.hasOneUse() && !LdHi->isPredecessorOf(Lo.getNode())) {
492     SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
493 
494     SDValue TiedIn = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Lo);
495     SDValue Ops[] = {
496       LdHi->getChain(), LdHi->getBasePtr(), TiedIn
497     };
498 
499     unsigned LoadOp = AMDGPUISD::LOAD_D16_HI;
500     if (LdHi->getMemoryVT() == MVT::i8) {
501       LoadOp = LdHi->getExtensionType() == ISD::SEXTLOAD ?
502         AMDGPUISD::LOAD_D16_HI_I8 : AMDGPUISD::LOAD_D16_HI_U8;
503     } else {
504       assert(LdHi->getMemoryVT() == MVT::i16);
505     }
506 
507     SDValue NewLoadHi =
508       CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdHi), VTList,
509                                   Ops, LdHi->getMemoryVT(),
510                                   LdHi->getMemOperand());
511 
512     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadHi);
513     CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdHi, 1), NewLoadHi.getValue(1));
514     return true;
515   }
516 
517   // build_vector (load ptr), hi -> load_d16_lo ptr, hi
518   // build_vector (zextload ptr from i8), hi -> load_d16_lo_u8 ptr, hi
519   // build_vector (sextload ptr from i8), hi -> load_d16_lo_i8 ptr, hi
520   LoadSDNode *LdLo = dyn_cast<LoadSDNode>(stripBitcast(Lo));
521   if (LdLo && Lo.hasOneUse()) {
522     SDValue TiedIn = getHi16Elt(Hi);
523     if (!TiedIn || LdLo->isPredecessorOf(TiedIn.getNode()))
524       return false;
525 
526     SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
527     unsigned LoadOp = AMDGPUISD::LOAD_D16_LO;
528     if (LdLo->getMemoryVT() == MVT::i8) {
529       LoadOp = LdLo->getExtensionType() == ISD::SEXTLOAD ?
530         AMDGPUISD::LOAD_D16_LO_I8 : AMDGPUISD::LOAD_D16_LO_U8;
531     } else {
532       assert(LdLo->getMemoryVT() == MVT::i16);
533     }
534 
535     TiedIn = CurDAG->getNode(ISD::BITCAST, SDLoc(N), VT, TiedIn);
536 
537     SDValue Ops[] = {
538       LdLo->getChain(), LdLo->getBasePtr(), TiedIn
539     };
540 
541     SDValue NewLoadLo =
542       CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdLo), VTList,
543                                   Ops, LdLo->getMemoryVT(),
544                                   LdLo->getMemOperand());
545 
546     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadLo);
547     CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdLo, 1), NewLoadLo.getValue(1));
548     return true;
549   }
550 
551   return false;
552 }
553 
554 void AMDGPUDAGToDAGISel::PreprocessISelDAG() {
555   if (!Subtarget->d16PreservesUnusedBits())
556     return;
557 
558   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
559 
560   bool MadeChange = false;
561   while (Position != CurDAG->allnodes_begin()) {
562     SDNode *N = &*--Position;
563     if (N->use_empty())
564       continue;
565 
566     switch (N->getOpcode()) {
567     case ISD::BUILD_VECTOR:
568       MadeChange |= matchLoadD16FromBuildVector(N);
569       break;
570     default:
571       break;
572     }
573   }
574 
575   if (MadeChange) {
576     CurDAG->RemoveDeadNodes();
577     LLVM_DEBUG(dbgs() << "After PreProcess:\n";
578                CurDAG->dump(););
579   }
580 }
581 
582 bool AMDGPUDAGToDAGISel::isNoNanSrc(SDValue N) const {
583   if (TM.Options.NoNaNsFPMath)
584     return true;
585 
586   // TODO: Move into isKnownNeverNaN
587   if (N->getFlags().hasNoNaNs())
588     return true;
589 
590   return CurDAG->isKnownNeverNaN(N);
591 }
592 
593 bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N,
594                                            bool Negated) const {
595   if (N->isUndef())
596     return true;
597 
598   const SIInstrInfo *TII = Subtarget->getInstrInfo();
599   if (Negated) {
600     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
601       return TII->isInlineConstant(-C->getAPIntValue());
602 
603     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
604       return TII->isInlineConstant(-C->getValueAPF().bitcastToAPInt());
605 
606   } else {
607     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
608       return TII->isInlineConstant(C->getAPIntValue());
609 
610     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
611       return TII->isInlineConstant(C->getValueAPF().bitcastToAPInt());
612   }
613 
614   return false;
615 }
616 
617 /// Determine the register class for \p OpNo
618 /// \returns The register class of the virtual register that will be used for
619 /// the given operand number \OpNo or NULL if the register class cannot be
620 /// determined.
621 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
622                                                           unsigned OpNo) const {
623   if (!N->isMachineOpcode()) {
624     if (N->getOpcode() == ISD::CopyToReg) {
625       Register Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
626       if (Reg.isVirtual()) {
627         MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo();
628         return MRI.getRegClass(Reg);
629       }
630 
631       const SIRegisterInfo *TRI
632         = static_cast<const GCNSubtarget *>(Subtarget)->getRegisterInfo();
633       return TRI->getPhysRegClass(Reg);
634     }
635 
636     return nullptr;
637   }
638 
639   switch (N->getMachineOpcode()) {
640   default: {
641     const MCInstrDesc &Desc =
642         Subtarget->getInstrInfo()->get(N->getMachineOpcode());
643     unsigned OpIdx = Desc.getNumDefs() + OpNo;
644     if (OpIdx >= Desc.getNumOperands())
645       return nullptr;
646     int RegClass = Desc.OpInfo[OpIdx].RegClass;
647     if (RegClass == -1)
648       return nullptr;
649 
650     return Subtarget->getRegisterInfo()->getRegClass(RegClass);
651   }
652   case AMDGPU::REG_SEQUENCE: {
653     unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
654     const TargetRegisterClass *SuperRC =
655         Subtarget->getRegisterInfo()->getRegClass(RCID);
656 
657     SDValue SubRegOp = N->getOperand(OpNo + 1);
658     unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
659     return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
660                                                               SubRegIdx);
661   }
662   }
663 }
664 
665 SDNode *AMDGPUDAGToDAGISel::glueCopyToOp(SDNode *N, SDValue NewChain,
666                                          SDValue Glue) const {
667   SmallVector <SDValue, 8> Ops;
668   Ops.push_back(NewChain); // Replace the chain.
669   for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
670     Ops.push_back(N->getOperand(i));
671 
672   Ops.push_back(Glue);
673   return CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops);
674 }
675 
676 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N, SDValue Val) const {
677   const SITargetLowering& Lowering =
678     *static_cast<const SITargetLowering*>(getTargetLowering());
679 
680   assert(N->getOperand(0).getValueType() == MVT::Other && "Expected chain");
681 
682   SDValue M0 = Lowering.copyToM0(*CurDAG, N->getOperand(0), SDLoc(N), Val);
683   return glueCopyToOp(N, M0, M0.getValue(1));
684 }
685 
686 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0LDSInit(SDNode *N) const {
687   unsigned AS = cast<MemSDNode>(N)->getAddressSpace();
688   if (AS == AMDGPUAS::LOCAL_ADDRESS) {
689     if (Subtarget->ldsRequiresM0Init())
690       return glueCopyToM0(N, CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32));
691   } else if (AS == AMDGPUAS::REGION_ADDRESS) {
692     MachineFunction &MF = CurDAG->getMachineFunction();
693     unsigned Value = MF.getInfo<SIMachineFunctionInfo>()->getGDSSize();
694     return
695         glueCopyToM0(N, CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i32));
696   }
697   return N;
698 }
699 
700 MachineSDNode *AMDGPUDAGToDAGISel::buildSMovImm64(SDLoc &DL, uint64_t Imm,
701                                                   EVT VT) const {
702   SDNode *Lo = CurDAG->getMachineNode(
703       AMDGPU::S_MOV_B32, DL, MVT::i32,
704       CurDAG->getTargetConstant(Imm & 0xFFFFFFFF, DL, MVT::i32));
705   SDNode *Hi =
706       CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
707                              CurDAG->getTargetConstant(Imm >> 32, DL, MVT::i32));
708   const SDValue Ops[] = {
709       CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
710       SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
711       SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)};
712 
713   return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, VT, Ops);
714 }
715 
716 void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) {
717   EVT VT = N->getValueType(0);
718   unsigned NumVectorElts = VT.getVectorNumElements();
719   EVT EltVT = VT.getVectorElementType();
720   SDLoc DL(N);
721   SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
722 
723   if (NumVectorElts == 1) {
724     CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0),
725                          RegClass);
726     return;
727   }
728 
729   assert(NumVectorElts <= 32 && "Vectors with more than 32 elements not "
730                                   "supported yet");
731   // 32 = Max Num Vector Elements
732   // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
733   // 1 = Vector Register Class
734   SmallVector<SDValue, 32 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
735 
736   bool IsGCN = CurDAG->getSubtarget().getTargetTriple().getArch() ==
737                Triple::amdgcn;
738   RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
739   bool IsRegSeq = true;
740   unsigned NOps = N->getNumOperands();
741   for (unsigned i = 0; i < NOps; i++) {
742     // XXX: Why is this here?
743     if (isa<RegisterSDNode>(N->getOperand(i))) {
744       IsRegSeq = false;
745       break;
746     }
747     unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i)
748                          : R600RegisterInfo::getSubRegFromChannel(i);
749     RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
750     RegSeqArgs[1 + (2 * i) + 1] = CurDAG->getTargetConstant(Sub, DL, MVT::i32);
751   }
752   if (NOps != NumVectorElts) {
753     // Fill in the missing undef elements if this was a scalar_to_vector.
754     assert(N->getOpcode() == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
755     MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
756                                                    DL, EltVT);
757     for (unsigned i = NOps; i < NumVectorElts; ++i) {
758       unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i)
759                            : R600RegisterInfo::getSubRegFromChannel(i);
760       RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
761       RegSeqArgs[1 + (2 * i) + 1] =
762           CurDAG->getTargetConstant(Sub, DL, MVT::i32);
763     }
764   }
765 
766   if (!IsRegSeq)
767     SelectCode(N);
768   CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs);
769 }
770 
771 void AMDGPUDAGToDAGISel::Select(SDNode *N) {
772   unsigned int Opc = N->getOpcode();
773   if (N->isMachineOpcode()) {
774     N->setNodeId(-1);
775     return;   // Already selected.
776   }
777 
778   // isa<MemSDNode> almost works but is slightly too permissive for some DS
779   // intrinsics.
780   if (Opc == ISD::LOAD || Opc == ISD::STORE || isa<AtomicSDNode>(N) ||
781       (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC ||
782        Opc == ISD::ATOMIC_LOAD_FADD ||
783        Opc == AMDGPUISD::ATOMIC_LOAD_FMIN ||
784        Opc == AMDGPUISD::ATOMIC_LOAD_FMAX)) {
785     N = glueCopyToM0LDSInit(N);
786     SelectCode(N);
787     return;
788   }
789 
790   switch (Opc) {
791   default:
792     break;
793   // We are selecting i64 ADD here instead of custom lower it during
794   // DAG legalization, so we can fold some i64 ADDs used for address
795   // calculation into the LOAD and STORE instructions.
796   case ISD::ADDC:
797   case ISD::ADDE:
798   case ISD::SUBC:
799   case ISD::SUBE: {
800     if (N->getValueType(0) != MVT::i64)
801       break;
802 
803     SelectADD_SUB_I64(N);
804     return;
805   }
806   case ISD::ADDCARRY:
807   case ISD::SUBCARRY:
808     if (N->getValueType(0) != MVT::i32)
809       break;
810 
811     SelectAddcSubb(N);
812     return;
813   case ISD::UADDO:
814   case ISD::USUBO: {
815     SelectUADDO_USUBO(N);
816     return;
817   }
818   case AMDGPUISD::FMUL_W_CHAIN: {
819     SelectFMUL_W_CHAIN(N);
820     return;
821   }
822   case AMDGPUISD::FMA_W_CHAIN: {
823     SelectFMA_W_CHAIN(N);
824     return;
825   }
826 
827   case ISD::SCALAR_TO_VECTOR:
828   case ISD::BUILD_VECTOR: {
829     EVT VT = N->getValueType(0);
830     unsigned NumVectorElts = VT.getVectorNumElements();
831     if (VT.getScalarSizeInBits() == 16) {
832       if (Opc == ISD::BUILD_VECTOR && NumVectorElts == 2) {
833         if (SDNode *Packed = packConstantV2I16(N, *CurDAG)) {
834           ReplaceNode(N, Packed);
835           return;
836         }
837       }
838 
839       break;
840     }
841 
842     assert(VT.getVectorElementType().bitsEq(MVT::i32));
843     unsigned RegClassID =
844         SIRegisterInfo::getSGPRClassForBitWidth(NumVectorElts * 32)->getID();
845     SelectBuildVector(N, RegClassID);
846     return;
847   }
848   case ISD::BUILD_PAIR: {
849     SDValue RC, SubReg0, SubReg1;
850     SDLoc DL(N);
851     if (N->getValueType(0) == MVT::i128) {
852       RC = CurDAG->getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32);
853       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32);
854       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32);
855     } else if (N->getValueType(0) == MVT::i64) {
856       RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32);
857       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
858       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
859     } else {
860       llvm_unreachable("Unhandled value type for BUILD_PAIR");
861     }
862     const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
863                             N->getOperand(1), SubReg1 };
864     ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
865                                           N->getValueType(0), Ops));
866     return;
867   }
868 
869   case ISD::Constant:
870   case ISD::ConstantFP: {
871     if (N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
872       break;
873 
874     uint64_t Imm;
875     if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
876       Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
877     else {
878       ConstantSDNode *C = cast<ConstantSDNode>(N);
879       Imm = C->getZExtValue();
880     }
881 
882     SDLoc DL(N);
883     ReplaceNode(N, buildSMovImm64(DL, Imm, N->getValueType(0)));
884     return;
885   }
886   case AMDGPUISD::BFE_I32:
887   case AMDGPUISD::BFE_U32: {
888     // There is a scalar version available, but unlike the vector version which
889     // has a separate operand for the offset and width, the scalar version packs
890     // the width and offset into a single operand. Try to move to the scalar
891     // version if the offsets are constant, so that we can try to keep extended
892     // loads of kernel arguments in SGPRs.
893 
894     // TODO: Technically we could try to pattern match scalar bitshifts of
895     // dynamic values, but it's probably not useful.
896     ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
897     if (!Offset)
898       break;
899 
900     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
901     if (!Width)
902       break;
903 
904     bool Signed = Opc == AMDGPUISD::BFE_I32;
905 
906     uint32_t OffsetVal = Offset->getZExtValue();
907     uint32_t WidthVal = Width->getZExtValue();
908 
909     ReplaceNode(N, getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
910                             SDLoc(N), N->getOperand(0), OffsetVal, WidthVal));
911     return;
912   }
913   case AMDGPUISD::DIV_SCALE: {
914     SelectDIV_SCALE(N);
915     return;
916   }
917   case AMDGPUISD::MAD_I64_I32:
918   case AMDGPUISD::MAD_U64_U32: {
919     SelectMAD_64_32(N);
920     return;
921   }
922   case ISD::CopyToReg: {
923     const SITargetLowering& Lowering =
924       *static_cast<const SITargetLowering*>(getTargetLowering());
925     N = Lowering.legalizeTargetIndependentNode(N, *CurDAG);
926     break;
927   }
928   case ISD::AND:
929   case ISD::SRL:
930   case ISD::SRA:
931   case ISD::SIGN_EXTEND_INREG:
932     if (N->getValueType(0) != MVT::i32)
933       break;
934 
935     SelectS_BFE(N);
936     return;
937   case ISD::BRCOND:
938     SelectBRCOND(N);
939     return;
940   case ISD::FMAD:
941   case ISD::FMA:
942     SelectFMAD_FMA(N);
943     return;
944   case AMDGPUISD::ATOMIC_CMP_SWAP:
945     SelectATOMIC_CMP_SWAP(N);
946     return;
947   case AMDGPUISD::CVT_PKRTZ_F16_F32:
948   case AMDGPUISD::CVT_PKNORM_I16_F32:
949   case AMDGPUISD::CVT_PKNORM_U16_F32:
950   case AMDGPUISD::CVT_PK_U16_U32:
951   case AMDGPUISD::CVT_PK_I16_I32: {
952     // Hack around using a legal type if f16 is illegal.
953     if (N->getValueType(0) == MVT::i32) {
954       MVT NewVT = Opc == AMDGPUISD::CVT_PKRTZ_F16_F32 ? MVT::v2f16 : MVT::v2i16;
955       N = CurDAG->MorphNodeTo(N, N->getOpcode(), CurDAG->getVTList(NewVT),
956                               { N->getOperand(0), N->getOperand(1) });
957       SelectCode(N);
958       return;
959     }
960 
961     break;
962   }
963   case ISD::INTRINSIC_W_CHAIN: {
964     SelectINTRINSIC_W_CHAIN(N);
965     return;
966   }
967   case ISD::INTRINSIC_WO_CHAIN: {
968     SelectINTRINSIC_WO_CHAIN(N);
969     return;
970   }
971   case ISD::INTRINSIC_VOID: {
972     SelectINTRINSIC_VOID(N);
973     return;
974   }
975   }
976 
977   SelectCode(N);
978 }
979 
980 bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const {
981   const BasicBlock *BB = FuncInfo->MBB->getBasicBlock();
982   const Instruction *Term = BB->getTerminator();
983   return Term->getMetadata("amdgpu.uniform") ||
984          Term->getMetadata("structurizecfg.uniform");
985 }
986 
987 static bool getBaseWithOffsetUsingSplitOR(SelectionDAG &DAG, SDValue Addr,
988                                           SDValue &N0, SDValue &N1) {
989   if (Addr.getValueType() == MVT::i64 && Addr.getOpcode() == ISD::BITCAST &&
990       Addr.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
991     // As we split 64-bit `or` earlier, it's complicated pattern to match, i.e.
992     // (i64 (bitcast (v2i32 (build_vector
993     //                        (or (extract_vector_elt V, 0), OFFSET),
994     //                        (extract_vector_elt V, 1)))))
995     SDValue Lo = Addr.getOperand(0).getOperand(0);
996     if (Lo.getOpcode() == ISD::OR && DAG.isBaseWithConstantOffset(Lo)) {
997       SDValue BaseLo = Lo.getOperand(0);
998       SDValue BaseHi = Addr.getOperand(0).getOperand(1);
999       // Check that split base (Lo and Hi) are extracted from the same one.
1000       if (BaseLo.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
1001           BaseHi.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
1002           BaseLo.getOperand(0) == BaseHi.getOperand(0) &&
1003           // Lo is statically extracted from index 0.
1004           isa<ConstantSDNode>(BaseLo.getOperand(1)) &&
1005           BaseLo.getConstantOperandVal(1) == 0 &&
1006           // Hi is statically extracted from index 0.
1007           isa<ConstantSDNode>(BaseHi.getOperand(1)) &&
1008           BaseHi.getConstantOperandVal(1) == 1) {
1009         N0 = BaseLo.getOperand(0).getOperand(0);
1010         N1 = Lo.getOperand(1);
1011         return true;
1012       }
1013     }
1014   }
1015   return false;
1016 }
1017 
1018 bool AMDGPUDAGToDAGISel::isBaseWithConstantOffset64(SDValue Addr, SDValue &LHS,
1019                                                     SDValue &RHS) const {
1020   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1021     LHS = Addr.getOperand(0);
1022     RHS = Addr.getOperand(1);
1023     return true;
1024   }
1025 
1026   if (getBaseWithOffsetUsingSplitOR(*CurDAG, Addr, LHS, RHS)) {
1027     assert(LHS && RHS && isa<ConstantSDNode>(RHS));
1028     return true;
1029   }
1030 
1031   return false;
1032 }
1033 
1034 StringRef AMDGPUDAGToDAGISel::getPassName() const {
1035   return "AMDGPU DAG->DAG Pattern Instruction Selection";
1036 }
1037 
1038 //===----------------------------------------------------------------------===//
1039 // Complex Patterns
1040 //===----------------------------------------------------------------------===//
1041 
1042 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
1043                                             SDValue &Offset) {
1044   return false;
1045 }
1046 
1047 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
1048                                             SDValue &Offset) {
1049   ConstantSDNode *C;
1050   SDLoc DL(Addr);
1051 
1052   if ((C = dyn_cast<ConstantSDNode>(Addr))) {
1053     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
1054     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
1055   } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
1056              (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
1057     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
1058     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
1059   } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
1060             (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
1061     Base = Addr.getOperand(0);
1062     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
1063   } else {
1064     Base = Addr;
1065     Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1066   }
1067 
1068   return true;
1069 }
1070 
1071 SDValue AMDGPUDAGToDAGISel::getMaterializedScalarImm32(int64_t Val,
1072                                                        const SDLoc &DL) const {
1073   SDNode *Mov = CurDAG->getMachineNode(
1074     AMDGPU::S_MOV_B32, DL, MVT::i32,
1075     CurDAG->getTargetConstant(Val, DL, MVT::i32));
1076   return SDValue(Mov, 0);
1077 }
1078 
1079 // FIXME: Should only handle addcarry/subcarry
1080 void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
1081   SDLoc DL(N);
1082   SDValue LHS = N->getOperand(0);
1083   SDValue RHS = N->getOperand(1);
1084 
1085   unsigned Opcode = N->getOpcode();
1086   bool ConsumeCarry = (Opcode == ISD::ADDE || Opcode == ISD::SUBE);
1087   bool ProduceCarry =
1088       ConsumeCarry || Opcode == ISD::ADDC || Opcode == ISD::SUBC;
1089   bool IsAdd = Opcode == ISD::ADD || Opcode == ISD::ADDC || Opcode == ISD::ADDE;
1090 
1091   SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
1092   SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
1093 
1094   SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1095                                        DL, MVT::i32, LHS, Sub0);
1096   SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1097                                        DL, MVT::i32, LHS, Sub1);
1098 
1099   SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1100                                        DL, MVT::i32, RHS, Sub0);
1101   SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1102                                        DL, MVT::i32, RHS, Sub1);
1103 
1104   SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
1105 
1106   static const unsigned OpcMap[2][2][2] = {
1107       {{AMDGPU::S_SUB_U32, AMDGPU::S_ADD_U32},
1108        {AMDGPU::V_SUB_CO_U32_e32, AMDGPU::V_ADD_CO_U32_e32}},
1109       {{AMDGPU::S_SUBB_U32, AMDGPU::S_ADDC_U32},
1110        {AMDGPU::V_SUBB_U32_e32, AMDGPU::V_ADDC_U32_e32}}};
1111 
1112   unsigned Opc = OpcMap[0][N->isDivergent()][IsAdd];
1113   unsigned CarryOpc = OpcMap[1][N->isDivergent()][IsAdd];
1114 
1115   SDNode *AddLo;
1116   if (!ConsumeCarry) {
1117     SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
1118     AddLo = CurDAG->getMachineNode(Opc, DL, VTList, Args);
1119   } else {
1120     SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0), N->getOperand(2) };
1121     AddLo = CurDAG->getMachineNode(CarryOpc, DL, VTList, Args);
1122   }
1123   SDValue AddHiArgs[] = {
1124     SDValue(Hi0, 0),
1125     SDValue(Hi1, 0),
1126     SDValue(AddLo, 1)
1127   };
1128   SDNode *AddHi = CurDAG->getMachineNode(CarryOpc, DL, VTList, AddHiArgs);
1129 
1130   SDValue RegSequenceArgs[] = {
1131     CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
1132     SDValue(AddLo,0),
1133     Sub0,
1134     SDValue(AddHi,0),
1135     Sub1,
1136   };
1137   SDNode *RegSequence = CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
1138                                                MVT::i64, RegSequenceArgs);
1139 
1140   if (ProduceCarry) {
1141     // Replace the carry-use
1142     ReplaceUses(SDValue(N, 1), SDValue(AddHi, 1));
1143   }
1144 
1145   // Replace the remaining uses.
1146   ReplaceNode(N, RegSequence);
1147 }
1148 
1149 void AMDGPUDAGToDAGISel::SelectAddcSubb(SDNode *N) {
1150   SDLoc DL(N);
1151   SDValue LHS = N->getOperand(0);
1152   SDValue RHS = N->getOperand(1);
1153   SDValue CI = N->getOperand(2);
1154 
1155   if (N->isDivergent()) {
1156     unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::V_ADDC_U32_e64
1157                                                    : AMDGPU::V_SUBB_U32_e64;
1158     CurDAG->SelectNodeTo(
1159         N, Opc, N->getVTList(),
1160         {LHS, RHS, CI,
1161          CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
1162   } else {
1163     unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::S_ADD_CO_PSEUDO
1164                                                    : AMDGPU::S_SUB_CO_PSEUDO;
1165     CurDAG->SelectNodeTo(N, Opc, N->getVTList(), {LHS, RHS, CI});
1166   }
1167 }
1168 
1169 void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) {
1170   // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
1171   // carry out despite the _i32 name. These were renamed in VI to _U32.
1172   // FIXME: We should probably rename the opcodes here.
1173   bool IsAdd = N->getOpcode() == ISD::UADDO;
1174   bool IsVALU = N->isDivergent();
1175 
1176   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); UI != E;
1177        ++UI)
1178     if (UI.getUse().getResNo() == 1) {
1179       if ((IsAdd && (UI->getOpcode() != ISD::ADDCARRY)) ||
1180           (!IsAdd && (UI->getOpcode() != ISD::SUBCARRY))) {
1181         IsVALU = true;
1182         break;
1183       }
1184     }
1185 
1186   if (IsVALU) {
1187     unsigned Opc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
1188 
1189     CurDAG->SelectNodeTo(
1190         N, Opc, N->getVTList(),
1191         {N->getOperand(0), N->getOperand(1),
1192          CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
1193   } else {
1194     unsigned Opc = N->getOpcode() == ISD::UADDO ? AMDGPU::S_UADDO_PSEUDO
1195                                                 : AMDGPU::S_USUBO_PSEUDO;
1196 
1197     CurDAG->SelectNodeTo(N, Opc, N->getVTList(),
1198                          {N->getOperand(0), N->getOperand(1)});
1199   }
1200 }
1201 
1202 void AMDGPUDAGToDAGISel::SelectFMA_W_CHAIN(SDNode *N) {
1203   SDLoc SL(N);
1204   //  src0_modifiers, src0,  src1_modifiers, src1, src2_modifiers, src2, clamp, omod
1205   SDValue Ops[10];
1206 
1207   SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[6], Ops[7]);
1208   SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
1209   SelectVOP3Mods(N->getOperand(3), Ops[5], Ops[4]);
1210   Ops[8] = N->getOperand(0);
1211   Ops[9] = N->getOperand(4);
1212 
1213   CurDAG->SelectNodeTo(N, AMDGPU::V_FMA_F32_e64, N->getVTList(), Ops);
1214 }
1215 
1216 void AMDGPUDAGToDAGISel::SelectFMUL_W_CHAIN(SDNode *N) {
1217   SDLoc SL(N);
1218   //    src0_modifiers, src0,  src1_modifiers, src1, clamp, omod
1219   SDValue Ops[8];
1220 
1221   SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[4], Ops[5]);
1222   SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
1223   Ops[6] = N->getOperand(0);
1224   Ops[7] = N->getOperand(3);
1225 
1226   CurDAG->SelectNodeTo(N, AMDGPU::V_MUL_F32_e64, N->getVTList(), Ops);
1227 }
1228 
1229 // We need to handle this here because tablegen doesn't support matching
1230 // instructions with multiple outputs.
1231 void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
1232   SDLoc SL(N);
1233   EVT VT = N->getValueType(0);
1234 
1235   assert(VT == MVT::f32 || VT == MVT::f64);
1236 
1237   unsigned Opc
1238     = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64_e64 : AMDGPU::V_DIV_SCALE_F32_e64;
1239 
1240   // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp,
1241   // omod
1242   SDValue Ops[8];
1243   SelectVOP3BMods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
1244   SelectVOP3BMods(N->getOperand(1), Ops[3], Ops[2]);
1245   SelectVOP3BMods(N->getOperand(2), Ops[5], Ops[4]);
1246   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1247 }
1248 
1249 // We need to handle this here because tablegen doesn't support matching
1250 // instructions with multiple outputs.
1251 void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) {
1252   SDLoc SL(N);
1253   bool Signed = N->getOpcode() == AMDGPUISD::MAD_I64_I32;
1254   unsigned Opc = Signed ? AMDGPU::V_MAD_I64_I32_e64 : AMDGPU::V_MAD_U64_U32_e64;
1255 
1256   SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1);
1257   SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
1258                     Clamp };
1259   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1260 }
1261 
1262 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(SDValue Base, unsigned Offset) const {
1263   if (!isUInt<16>(Offset))
1264     return false;
1265 
1266   if (!Base || Subtarget->hasUsableDSOffset() ||
1267       Subtarget->unsafeDSOffsetFoldingEnabled())
1268     return true;
1269 
1270   // On Southern Islands instruction with a negative base value and an offset
1271   // don't seem to work.
1272   return CurDAG->SignBitIsZero(Base);
1273 }
1274 
1275 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
1276                                               SDValue &Offset) const {
1277   SDLoc DL(Addr);
1278   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1279     SDValue N0 = Addr.getOperand(0);
1280     SDValue N1 = Addr.getOperand(1);
1281     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1282     if (isDSOffsetLegal(N0, C1->getSExtValue())) {
1283       // (add n0, c0)
1284       Base = N0;
1285       Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1286       return true;
1287     }
1288   } else if (Addr.getOpcode() == ISD::SUB) {
1289     // sub C, x -> add (sub 0, x), C
1290     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1291       int64_t ByteOffset = C->getSExtValue();
1292       if (isDSOffsetLegal(SDValue(), ByteOffset)) {
1293         SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1294 
1295         // XXX - This is kind of hacky. Create a dummy sub node so we can check
1296         // the known bits in isDSOffsetLegal. We need to emit the selected node
1297         // here, so this is thrown away.
1298         SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
1299                                       Zero, Addr.getOperand(1));
1300 
1301         if (isDSOffsetLegal(Sub, ByteOffset)) {
1302           SmallVector<SDValue, 3> Opnds;
1303           Opnds.push_back(Zero);
1304           Opnds.push_back(Addr.getOperand(1));
1305 
1306           // FIXME: Select to VOP3 version for with-carry.
1307           unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32;
1308           if (Subtarget->hasAddNoCarry()) {
1309             SubOp = AMDGPU::V_SUB_U32_e64;
1310             Opnds.push_back(
1311                 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
1312           }
1313 
1314           MachineSDNode *MachineSub =
1315               CurDAG->getMachineNode(SubOp, DL, MVT::i32, Opnds);
1316 
1317           Base = SDValue(MachineSub, 0);
1318           Offset = CurDAG->getTargetConstant(ByteOffset, DL, MVT::i16);
1319           return true;
1320         }
1321       }
1322     }
1323   } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1324     // If we have a constant address, prefer to put the constant into the
1325     // offset. This can save moves to load the constant address since multiple
1326     // operations can share the zero base address register, and enables merging
1327     // into read2 / write2 instructions.
1328 
1329     SDLoc DL(Addr);
1330 
1331     if (isDSOffsetLegal(SDValue(), CAddr->getZExtValue())) {
1332       SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1333       MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
1334                                  DL, MVT::i32, Zero);
1335       Base = SDValue(MovZero, 0);
1336       Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1337       return true;
1338     }
1339   }
1340 
1341   // default case
1342   Base = Addr;
1343   Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i16);
1344   return true;
1345 }
1346 
1347 bool AMDGPUDAGToDAGISel::isDSOffset2Legal(SDValue Base, unsigned Offset0,
1348                                           unsigned Offset1,
1349                                           unsigned Size) const {
1350   if (Offset0 % Size != 0 || Offset1 % Size != 0)
1351     return false;
1352   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
1353     return false;
1354 
1355   if (!Base || Subtarget->hasUsableDSOffset() ||
1356       Subtarget->unsafeDSOffsetFoldingEnabled())
1357     return true;
1358 
1359   // On Southern Islands instruction with a negative base value and an offset
1360   // don't seem to work.
1361   return CurDAG->SignBitIsZero(Base);
1362 }
1363 
1364 // TODO: If offset is too big, put low 16-bit into offset.
1365 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
1366                                                    SDValue &Offset0,
1367                                                    SDValue &Offset1) const {
1368   return SelectDSReadWrite2(Addr, Base, Offset0, Offset1, 4);
1369 }
1370 
1371 bool AMDGPUDAGToDAGISel::SelectDS128Bit8ByteAligned(SDValue Addr, SDValue &Base,
1372                                                     SDValue &Offset0,
1373                                                     SDValue &Offset1) const {
1374   return SelectDSReadWrite2(Addr, Base, Offset0, Offset1, 8);
1375 }
1376 
1377 bool AMDGPUDAGToDAGISel::SelectDSReadWrite2(SDValue Addr, SDValue &Base,
1378                                             SDValue &Offset0, SDValue &Offset1,
1379                                             unsigned Size) const {
1380   SDLoc DL(Addr);
1381 
1382   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1383     SDValue N0 = Addr.getOperand(0);
1384     SDValue N1 = Addr.getOperand(1);
1385     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1386     unsigned OffsetValue0 = C1->getZExtValue();
1387     unsigned OffsetValue1 = OffsetValue0 + Size;
1388 
1389     // (add n0, c0)
1390     if (isDSOffset2Legal(N0, OffsetValue0, OffsetValue1, Size)) {
1391       Base = N0;
1392       Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1393       Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1394       return true;
1395     }
1396   } else if (Addr.getOpcode() == ISD::SUB) {
1397     // sub C, x -> add (sub 0, x), C
1398     if (const ConstantSDNode *C =
1399             dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1400       unsigned OffsetValue0 = C->getZExtValue();
1401       unsigned OffsetValue1 = OffsetValue0 + Size;
1402 
1403       if (isDSOffset2Legal(SDValue(), OffsetValue0, OffsetValue1, Size)) {
1404         SDLoc DL(Addr);
1405         SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1406 
1407         // XXX - This is kind of hacky. Create a dummy sub node so we can check
1408         // the known bits in isDSOffsetLegal. We need to emit the selected node
1409         // here, so this is thrown away.
1410         SDValue Sub =
1411             CurDAG->getNode(ISD::SUB, DL, MVT::i32, Zero, Addr.getOperand(1));
1412 
1413         if (isDSOffset2Legal(Sub, OffsetValue0, OffsetValue1, Size)) {
1414           SmallVector<SDValue, 3> Opnds;
1415           Opnds.push_back(Zero);
1416           Opnds.push_back(Addr.getOperand(1));
1417           unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32;
1418           if (Subtarget->hasAddNoCarry()) {
1419             SubOp = AMDGPU::V_SUB_U32_e64;
1420             Opnds.push_back(
1421                 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
1422           }
1423 
1424           MachineSDNode *MachineSub = CurDAG->getMachineNode(
1425               SubOp, DL, MVT::getIntegerVT(Size * 8), Opnds);
1426 
1427           Base = SDValue(MachineSub, 0);
1428           Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1429           Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1430           return true;
1431         }
1432       }
1433     }
1434   } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1435     unsigned OffsetValue0 = CAddr->getZExtValue();
1436     unsigned OffsetValue1 = OffsetValue0 + Size;
1437 
1438     if (isDSOffset2Legal(SDValue(), OffsetValue0, OffsetValue1, Size)) {
1439       SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1440       MachineSDNode *MovZero =
1441           CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32, Zero);
1442       Base = SDValue(MovZero, 0);
1443       Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1444       Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1445       return true;
1446     }
1447   }
1448 
1449   // default case
1450 
1451   Base = Addr;
1452   Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
1453   Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);
1454   return true;
1455 }
1456 
1457 bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr, SDValue &VAddr,
1458                                      SDValue &SOffset, SDValue &Offset,
1459                                      SDValue &Offen, SDValue &Idxen,
1460                                      SDValue &Addr64) const {
1461   // Subtarget prefers to use flat instruction
1462   // FIXME: This should be a pattern predicate and not reach here
1463   if (Subtarget->useFlatForGlobal())
1464     return false;
1465 
1466   SDLoc DL(Addr);
1467 
1468   Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1469   Offen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1470   Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1);
1471   SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1472 
1473   ConstantSDNode *C1 = nullptr;
1474   SDValue N0 = Addr;
1475   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1476     C1 = cast<ConstantSDNode>(Addr.getOperand(1));
1477     if (isUInt<32>(C1->getZExtValue()))
1478       N0 = Addr.getOperand(0);
1479     else
1480       C1 = nullptr;
1481   }
1482 
1483   if (N0.getOpcode() == ISD::ADD) {
1484     // (add N2, N3) -> addr64, or
1485     // (add (add N2, N3), C1) -> addr64
1486     SDValue N2 = N0.getOperand(0);
1487     SDValue N3 = N0.getOperand(1);
1488     Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1489 
1490     if (N2->isDivergent()) {
1491       if (N3->isDivergent()) {
1492         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
1493         // addr64, and construct the resource from a 0 address.
1494         Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1495         VAddr = N0;
1496       } else {
1497         // N2 is divergent, N3 is not.
1498         Ptr = N3;
1499         VAddr = N2;
1500       }
1501     } else {
1502       // N2 is not divergent.
1503       Ptr = N2;
1504       VAddr = N3;
1505     }
1506     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1507   } else if (N0->isDivergent()) {
1508     // N0 is divergent. Use it as the addr64, and construct the resource from a
1509     // 0 address.
1510     Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1511     VAddr = N0;
1512     Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1513   } else {
1514     // N0 -> offset, or
1515     // (N0 + C1) -> offset
1516     VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
1517     Ptr = N0;
1518   }
1519 
1520   if (!C1) {
1521     // No offset.
1522     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1523     return true;
1524   }
1525 
1526   if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue())) {
1527     // Legal offset for instruction.
1528     Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1529     return true;
1530   }
1531 
1532   // Illegal offset, store it in soffset.
1533   Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1534   SOffset =
1535       SDValue(CurDAG->getMachineNode(
1536                   AMDGPU::S_MOV_B32, DL, MVT::i32,
1537                   CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)),
1538               0);
1539   return true;
1540 }
1541 
1542 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
1543                                            SDValue &VAddr, SDValue &SOffset,
1544                                            SDValue &Offset) const {
1545   SDValue Ptr, Offen, Idxen, Addr64;
1546 
1547   // addr64 bit was removed for volcanic islands.
1548   // FIXME: This should be a pattern predicate and not reach here
1549   if (!Subtarget->hasAddr64())
1550     return false;
1551 
1552   if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64))
1553     return false;
1554 
1555   ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
1556   if (C->getSExtValue()) {
1557     SDLoc DL(Addr);
1558 
1559     const SITargetLowering& Lowering =
1560       *static_cast<const SITargetLowering*>(getTargetLowering());
1561 
1562     SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0);
1563     return true;
1564   }
1565 
1566   return false;
1567 }
1568 
1569 std::pair<SDValue, SDValue> AMDGPUDAGToDAGISel::foldFrameIndex(SDValue N) const {
1570   SDLoc DL(N);
1571 
1572   auto *FI = dyn_cast<FrameIndexSDNode>(N);
1573   SDValue TFI =
1574       FI ? CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0)) : N;
1575 
1576   // We rebase the base address into an absolute stack address and hence
1577   // use constant 0 for soffset. This value must be retained until
1578   // frame elimination and eliminateFrameIndex will choose the appropriate
1579   // frame register if need be.
1580   return std::make_pair(TFI, CurDAG->getTargetConstant(0, DL, MVT::i32));
1581 }
1582 
1583 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(SDNode *Parent,
1584                                                  SDValue Addr, SDValue &Rsrc,
1585                                                  SDValue &VAddr, SDValue &SOffset,
1586                                                  SDValue &ImmOffset) const {
1587 
1588   SDLoc DL(Addr);
1589   MachineFunction &MF = CurDAG->getMachineFunction();
1590   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1591 
1592   Rsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1593 
1594   if (ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1595     int64_t Imm = CAddr->getSExtValue();
1596     const int64_t NullPtr =
1597         AMDGPUTargetMachine::getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS);
1598     // Don't fold null pointer.
1599     if (Imm != NullPtr) {
1600       SDValue HighBits = CurDAG->getTargetConstant(Imm & ~4095, DL, MVT::i32);
1601       MachineSDNode *MovHighBits = CurDAG->getMachineNode(
1602         AMDGPU::V_MOV_B32_e32, DL, MVT::i32, HighBits);
1603       VAddr = SDValue(MovHighBits, 0);
1604 
1605       SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1606       ImmOffset = CurDAG->getTargetConstant(Imm & 4095, DL, MVT::i16);
1607       return true;
1608     }
1609   }
1610 
1611   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1612     // (add n0, c1)
1613 
1614     SDValue N0 = Addr.getOperand(0);
1615     SDValue N1 = Addr.getOperand(1);
1616 
1617     // Offsets in vaddr must be positive if range checking is enabled.
1618     //
1619     // The total computation of vaddr + soffset + offset must not overflow.  If
1620     // vaddr is negative, even if offset is 0 the sgpr offset add will end up
1621     // overflowing.
1622     //
1623     // Prior to gfx9, MUBUF instructions with the vaddr offset enabled would
1624     // always perform a range check. If a negative vaddr base index was used,
1625     // this would fail the range check. The overall address computation would
1626     // compute a valid address, but this doesn't happen due to the range
1627     // check. For out-of-bounds MUBUF loads, a 0 is returned.
1628     //
1629     // Therefore it should be safe to fold any VGPR offset on gfx9 into the
1630     // MUBUF vaddr, but not on older subtargets which can only do this if the
1631     // sign bit is known 0.
1632     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1633     if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue()) &&
1634         (!Subtarget->privateMemoryResourceIsRangeChecked() ||
1635          CurDAG->SignBitIsZero(N0))) {
1636       std::tie(VAddr, SOffset) = foldFrameIndex(N0);
1637       ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1638       return true;
1639     }
1640   }
1641 
1642   // (node)
1643   std::tie(VAddr, SOffset) = foldFrameIndex(Addr);
1644   ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1645   return true;
1646 }
1647 
1648 static bool IsCopyFromSGPR(const SIRegisterInfo &TRI, SDValue Val) {
1649   if (Val.getOpcode() != ISD::CopyFromReg)
1650     return false;
1651   auto RC =
1652       TRI.getPhysRegClass(cast<RegisterSDNode>(Val.getOperand(1))->getReg());
1653   return RC && TRI.isSGPRClass(RC);
1654 }
1655 
1656 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffset(SDNode *Parent,
1657                                                   SDValue Addr,
1658                                                   SDValue &SRsrc,
1659                                                   SDValue &SOffset,
1660                                                   SDValue &Offset) const {
1661   const SIRegisterInfo *TRI =
1662       static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
1663   MachineFunction &MF = CurDAG->getMachineFunction();
1664   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1665   SDLoc DL(Addr);
1666 
1667   // CopyFromReg <sgpr>
1668   if (IsCopyFromSGPR(*TRI, Addr)) {
1669     SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1670     SOffset = Addr;
1671     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1672     return true;
1673   }
1674 
1675   ConstantSDNode *CAddr;
1676   if (Addr.getOpcode() == ISD::ADD) {
1677     // Add (CopyFromReg <sgpr>) <constant>
1678     CAddr = dyn_cast<ConstantSDNode>(Addr.getOperand(1));
1679     if (!CAddr || !SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue()))
1680       return false;
1681     if (!IsCopyFromSGPR(*TRI, Addr.getOperand(0)))
1682       return false;
1683 
1684     SOffset = Addr.getOperand(0);
1685   } else if ((CAddr = dyn_cast<ConstantSDNode>(Addr)) &&
1686              SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue())) {
1687     // <constant>
1688     SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1689   } else {
1690     return false;
1691   }
1692 
1693   SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1694 
1695   Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1696   return true;
1697 }
1698 
1699 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1700                                            SDValue &SOffset, SDValue &Offset
1701                                            ) const {
1702   SDValue Ptr, VAddr, Offen, Idxen, Addr64;
1703   const SIInstrInfo *TII =
1704     static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
1705 
1706   if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64))
1707     return false;
1708 
1709   if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
1710       !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
1711       !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
1712     uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
1713                     APInt::getAllOnes(32).getZExtValue(); // Size
1714     SDLoc DL(Addr);
1715 
1716     const SITargetLowering& Lowering =
1717       *static_cast<const SITargetLowering*>(getTargetLowering());
1718 
1719     SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0);
1720     return true;
1721   }
1722   return false;
1723 }
1724 
1725 // Find a load or store from corresponding pattern root.
1726 // Roots may be build_vector, bitconvert or their combinations.
1727 static MemSDNode* findMemSDNode(SDNode *N) {
1728   N = AMDGPUTargetLowering::stripBitcast(SDValue(N,0)).getNode();
1729   if (MemSDNode *MN = dyn_cast<MemSDNode>(N))
1730     return MN;
1731   assert(isa<BuildVectorSDNode>(N));
1732   for (SDValue V : N->op_values())
1733     if (MemSDNode *MN =
1734           dyn_cast<MemSDNode>(AMDGPUTargetLowering::stripBitcast(V)))
1735       return MN;
1736   llvm_unreachable("cannot find MemSDNode in the pattern!");
1737 }
1738 
1739 bool AMDGPUDAGToDAGISel::SelectFlatOffsetImpl(SDNode *N, SDValue Addr,
1740                                               SDValue &VAddr, SDValue &Offset,
1741                                               uint64_t FlatVariant) const {
1742   int64_t OffsetVal = 0;
1743 
1744   unsigned AS = findMemSDNode(N)->getAddressSpace();
1745 
1746   bool CanHaveFlatSegmentOffsetBug =
1747       Subtarget->hasFlatSegmentOffsetBug() &&
1748       FlatVariant == SIInstrFlags::FLAT &&
1749       (AS == AMDGPUAS::FLAT_ADDRESS || AS == AMDGPUAS::GLOBAL_ADDRESS);
1750 
1751   if (Subtarget->hasFlatInstOffsets() && !CanHaveFlatSegmentOffsetBug) {
1752     SDValue N0, N1;
1753     if (isBaseWithConstantOffset64(Addr, N0, N1)) {
1754       int64_t COffsetVal = cast<ConstantSDNode>(N1)->getSExtValue();
1755 
1756       const SIInstrInfo *TII = Subtarget->getInstrInfo();
1757       if (TII->isLegalFLATOffset(COffsetVal, AS, FlatVariant)) {
1758         Addr = N0;
1759         OffsetVal = COffsetVal;
1760       } else {
1761         // If the offset doesn't fit, put the low bits into the offset field and
1762         // add the rest.
1763         //
1764         // For a FLAT instruction the hardware decides whether to access
1765         // global/scratch/shared memory based on the high bits of vaddr,
1766         // ignoring the offset field, so we have to ensure that when we add
1767         // remainder to vaddr it still points into the same underlying object.
1768         // The easiest way to do that is to make sure that we split the offset
1769         // into two pieces that are both >= 0 or both <= 0.
1770 
1771         SDLoc DL(N);
1772         uint64_t RemainderOffset;
1773 
1774         std::tie(OffsetVal, RemainderOffset) =
1775             TII->splitFlatOffset(COffsetVal, AS, FlatVariant);
1776 
1777         SDValue AddOffsetLo =
1778             getMaterializedScalarImm32(Lo_32(RemainderOffset), DL);
1779         SDValue Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
1780 
1781         if (Addr.getValueType().getSizeInBits() == 32) {
1782           SmallVector<SDValue, 3> Opnds;
1783           Opnds.push_back(N0);
1784           Opnds.push_back(AddOffsetLo);
1785           unsigned AddOp = AMDGPU::V_ADD_CO_U32_e32;
1786           if (Subtarget->hasAddNoCarry()) {
1787             AddOp = AMDGPU::V_ADD_U32_e64;
1788             Opnds.push_back(Clamp);
1789           }
1790           Addr = SDValue(CurDAG->getMachineNode(AddOp, DL, MVT::i32, Opnds), 0);
1791         } else {
1792           // TODO: Should this try to use a scalar add pseudo if the base address
1793           // is uniform and saddr is usable?
1794           SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
1795           SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
1796 
1797           SDNode *N0Lo = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1798                                                 DL, MVT::i32, N0, Sub0);
1799           SDNode *N0Hi = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1800                                                 DL, MVT::i32, N0, Sub1);
1801 
1802           SDValue AddOffsetHi =
1803               getMaterializedScalarImm32(Hi_32(RemainderOffset), DL);
1804 
1805           SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i1);
1806 
1807           SDNode *Add =
1808               CurDAG->getMachineNode(AMDGPU::V_ADD_CO_U32_e64, DL, VTs,
1809                                      {AddOffsetLo, SDValue(N0Lo, 0), Clamp});
1810 
1811           SDNode *Addc = CurDAG->getMachineNode(
1812               AMDGPU::V_ADDC_U32_e64, DL, VTs,
1813               {AddOffsetHi, SDValue(N0Hi, 0), SDValue(Add, 1), Clamp});
1814 
1815           SDValue RegSequenceArgs[] = {
1816               CurDAG->getTargetConstant(AMDGPU::VReg_64RegClassID, DL, MVT::i32),
1817               SDValue(Add, 0), Sub0, SDValue(Addc, 0), Sub1};
1818 
1819           Addr = SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
1820                                                 MVT::i64, RegSequenceArgs),
1821                          0);
1822         }
1823       }
1824     }
1825   }
1826 
1827   VAddr = Addr;
1828   Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i16);
1829   return true;
1830 }
1831 
1832 bool AMDGPUDAGToDAGISel::SelectFlatOffset(SDNode *N, SDValue Addr,
1833                                           SDValue &VAddr,
1834                                           SDValue &Offset) const {
1835   return SelectFlatOffsetImpl(N, Addr, VAddr, Offset, SIInstrFlags::FLAT);
1836 }
1837 
1838 bool AMDGPUDAGToDAGISel::SelectGlobalOffset(SDNode *N, SDValue Addr,
1839                                             SDValue &VAddr,
1840                                             SDValue &Offset) const {
1841   return SelectFlatOffsetImpl(N, Addr, VAddr, Offset, SIInstrFlags::FlatGlobal);
1842 }
1843 
1844 bool AMDGPUDAGToDAGISel::SelectScratchOffset(SDNode *N, SDValue Addr,
1845                                              SDValue &VAddr,
1846                                              SDValue &Offset) const {
1847   return SelectFlatOffsetImpl(N, Addr, VAddr, Offset,
1848                               SIInstrFlags::FlatScratch);
1849 }
1850 
1851 // If this matches zero_extend i32:x, return x
1852 static SDValue matchZExtFromI32(SDValue Op) {
1853   if (Op.getOpcode() != ISD::ZERO_EXTEND)
1854     return SDValue();
1855 
1856   SDValue ExtSrc = Op.getOperand(0);
1857   return (ExtSrc.getValueType() == MVT::i32) ? ExtSrc : SDValue();
1858 }
1859 
1860 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
1861 bool AMDGPUDAGToDAGISel::SelectGlobalSAddr(SDNode *N,
1862                                            SDValue Addr,
1863                                            SDValue &SAddr,
1864                                            SDValue &VOffset,
1865                                            SDValue &Offset) const {
1866   int64_t ImmOffset = 0;
1867 
1868   // Match the immediate offset first, which canonically is moved as low as
1869   // possible.
1870 
1871   SDValue LHS, RHS;
1872   if (isBaseWithConstantOffset64(Addr, LHS, RHS)) {
1873     int64_t COffsetVal = cast<ConstantSDNode>(RHS)->getSExtValue();
1874     const SIInstrInfo *TII = Subtarget->getInstrInfo();
1875 
1876     if (TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::GLOBAL_ADDRESS,
1877                                SIInstrFlags::FlatGlobal)) {
1878       Addr = LHS;
1879       ImmOffset = COffsetVal;
1880     } else if (!LHS->isDivergent()) {
1881       if (COffsetVal > 0) {
1882         SDLoc SL(N);
1883         // saddr + large_offset -> saddr +
1884         //                         (voffset = large_offset & ~MaxOffset) +
1885         //                         (large_offset & MaxOffset);
1886         int64_t SplitImmOffset, RemainderOffset;
1887         std::tie(SplitImmOffset, RemainderOffset) = TII->splitFlatOffset(
1888             COffsetVal, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
1889 
1890         if (isUInt<32>(RemainderOffset)) {
1891           SDNode *VMov = CurDAG->getMachineNode(
1892               AMDGPU::V_MOV_B32_e32, SL, MVT::i32,
1893               CurDAG->getTargetConstant(RemainderOffset, SDLoc(), MVT::i32));
1894           VOffset = SDValue(VMov, 0);
1895           SAddr = LHS;
1896           Offset = CurDAG->getTargetConstant(SplitImmOffset, SDLoc(), MVT::i16);
1897           return true;
1898         }
1899       }
1900 
1901       // We are adding a 64 bit SGPR and a constant. If constant bus limit
1902       // is 1 we would need to perform 1 or 2 extra moves for each half of
1903       // the constant and it is better to do a scalar add and then issue a
1904       // single VALU instruction to materialize zero. Otherwise it is less
1905       // instructions to perform VALU adds with immediates or inline literals.
1906       unsigned NumLiterals =
1907           !TII->isInlineConstant(APInt(32, COffsetVal & 0xffffffff)) +
1908           !TII->isInlineConstant(APInt(32, COffsetVal >> 32));
1909       if (Subtarget->getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
1910         return false;
1911     }
1912   }
1913 
1914   // Match the variable offset.
1915   if (Addr.getOpcode() == ISD::ADD) {
1916     LHS = Addr.getOperand(0);
1917     RHS = Addr.getOperand(1);
1918 
1919     if (!LHS->isDivergent()) {
1920       // add (i64 sgpr), (zero_extend (i32 vgpr))
1921       if (SDValue ZextRHS = matchZExtFromI32(RHS)) {
1922         SAddr = LHS;
1923         VOffset = ZextRHS;
1924       }
1925     }
1926 
1927     if (!SAddr && !RHS->isDivergent()) {
1928       // add (zero_extend (i32 vgpr)), (i64 sgpr)
1929       if (SDValue ZextLHS = matchZExtFromI32(LHS)) {
1930         SAddr = RHS;
1931         VOffset = ZextLHS;
1932       }
1933     }
1934 
1935     if (SAddr) {
1936       Offset = CurDAG->getTargetConstant(ImmOffset, SDLoc(), MVT::i16);
1937       return true;
1938     }
1939   }
1940 
1941   if (Addr->isDivergent() || Addr.getOpcode() == ISD::UNDEF ||
1942       isa<ConstantSDNode>(Addr))
1943     return false;
1944 
1945   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
1946   // moves required to copy a 64-bit SGPR to VGPR.
1947   SAddr = Addr;
1948   SDNode *VMov =
1949       CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, SDLoc(Addr), MVT::i32,
1950                              CurDAG->getTargetConstant(0, SDLoc(), MVT::i32));
1951   VOffset = SDValue(VMov, 0);
1952   Offset = CurDAG->getTargetConstant(ImmOffset, SDLoc(), MVT::i16);
1953   return true;
1954 }
1955 
1956 static SDValue SelectSAddrFI(SelectionDAG *CurDAG, SDValue SAddr) {
1957   if (auto FI = dyn_cast<FrameIndexSDNode>(SAddr)) {
1958     SAddr = CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0));
1959   } else if (SAddr.getOpcode() == ISD::ADD &&
1960              isa<FrameIndexSDNode>(SAddr.getOperand(0))) {
1961     // Materialize this into a scalar move for scalar address to avoid
1962     // readfirstlane.
1963     auto FI = cast<FrameIndexSDNode>(SAddr.getOperand(0));
1964     SDValue TFI = CurDAG->getTargetFrameIndex(FI->getIndex(),
1965                                               FI->getValueType(0));
1966     SAddr = SDValue(CurDAG->getMachineNode(AMDGPU::S_ADD_I32, SDLoc(SAddr),
1967                                            MVT::i32, TFI, SAddr.getOperand(1)),
1968                     0);
1969   }
1970 
1971   return SAddr;
1972 }
1973 
1974 // Match (32-bit SGPR base) + sext(imm offset)
1975 bool AMDGPUDAGToDAGISel::SelectScratchSAddr(SDNode *Parent, SDValue Addr,
1976                                             SDValue &SAddr,
1977                                             SDValue &Offset) const {
1978   if (Addr->isDivergent())
1979     return false;
1980 
1981   SDLoc DL(Addr);
1982 
1983   int64_t COffsetVal = 0;
1984 
1985   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1986     COffsetVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1987     SAddr = Addr.getOperand(0);
1988   } else {
1989     SAddr = Addr;
1990   }
1991 
1992   SAddr = SelectSAddrFI(CurDAG, SAddr);
1993 
1994   const SIInstrInfo *TII = Subtarget->getInstrInfo();
1995 
1996   if (!TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::PRIVATE_ADDRESS,
1997                               SIInstrFlags::FlatScratch)) {
1998     int64_t SplitImmOffset, RemainderOffset;
1999     std::tie(SplitImmOffset, RemainderOffset) = TII->splitFlatOffset(
2000         COffsetVal, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch);
2001 
2002     COffsetVal = SplitImmOffset;
2003 
2004     SDValue AddOffset =
2005         SAddr.getOpcode() == ISD::TargetFrameIndex
2006             ? getMaterializedScalarImm32(Lo_32(RemainderOffset), DL)
2007             : CurDAG->getTargetConstant(RemainderOffset, DL, MVT::i32);
2008     SAddr = SDValue(CurDAG->getMachineNode(AMDGPU::S_ADD_I32, DL, MVT::i32,
2009                                            SAddr, AddOffset),
2010                     0);
2011   }
2012 
2013   Offset = CurDAG->getTargetConstant(COffsetVal, DL, MVT::i16);
2014 
2015   return true;
2016 }
2017 
2018 bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode,
2019                                           SDValue &Offset, bool &Imm) const {
2020   ConstantSDNode *C = dyn_cast<ConstantSDNode>(ByteOffsetNode);
2021   if (!C) {
2022     if (ByteOffsetNode.getValueType().isScalarInteger() &&
2023         ByteOffsetNode.getValueType().getSizeInBits() == 32) {
2024       Offset = ByteOffsetNode;
2025       Imm = false;
2026       return true;
2027     }
2028     if (ByteOffsetNode.getOpcode() == ISD::ZERO_EXTEND) {
2029       if (ByteOffsetNode.getOperand(0).getValueType().getSizeInBits() == 32) {
2030         Offset = ByteOffsetNode.getOperand(0);
2031         Imm = false;
2032         return true;
2033       }
2034     }
2035     return false;
2036   }
2037 
2038   SDLoc SL(ByteOffsetNode);
2039   // GFX9 and GFX10 have signed byte immediate offsets.
2040   int64_t ByteOffset = C->getSExtValue();
2041   Optional<int64_t> EncodedOffset =
2042       AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset, false);
2043   if (EncodedOffset) {
2044     Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);
2045     Imm = true;
2046     return true;
2047   }
2048 
2049   // SGPR and literal offsets are unsigned.
2050   if (ByteOffset < 0)
2051     return false;
2052 
2053   EncodedOffset = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget, ByteOffset);
2054   if (EncodedOffset) {
2055     Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);
2056     return true;
2057   }
2058 
2059   if (!isUInt<32>(ByteOffset) && !isInt<32>(ByteOffset))
2060     return false;
2061 
2062   SDValue C32Bit = CurDAG->getTargetConstant(ByteOffset, SL, MVT::i32);
2063   Offset = SDValue(
2064       CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, C32Bit), 0);
2065 
2066   return true;
2067 }
2068 
2069 SDValue AMDGPUDAGToDAGISel::Expand32BitAddress(SDValue Addr) const {
2070   if (Addr.getValueType() != MVT::i32)
2071     return Addr;
2072 
2073   // Zero-extend a 32-bit address.
2074   SDLoc SL(Addr);
2075 
2076   const MachineFunction &MF = CurDAG->getMachineFunction();
2077   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2078   unsigned AddrHiVal = Info->get32BitAddressHighBits();
2079   SDValue AddrHi = CurDAG->getTargetConstant(AddrHiVal, SL, MVT::i32);
2080 
2081   const SDValue Ops[] = {
2082     CurDAG->getTargetConstant(AMDGPU::SReg_64_XEXECRegClassID, SL, MVT::i32),
2083     Addr,
2084     CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32),
2085     SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, AddrHi),
2086             0),
2087     CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32),
2088   };
2089 
2090   return SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, SL, MVT::i64,
2091                                         Ops), 0);
2092 }
2093 
2094 bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase,
2095                                      SDValue &Offset, bool &Imm) const {
2096   SDLoc SL(Addr);
2097 
2098   // A 32-bit (address + offset) should not cause unsigned 32-bit integer
2099   // wraparound, because s_load instructions perform the addition in 64 bits.
2100   if ((Addr.getValueType() != MVT::i32 ||
2101        Addr->getFlags().hasNoUnsignedWrap())) {
2102     SDValue N0, N1;
2103     // Extract the base and offset if possible.
2104     if (CurDAG->isBaseWithConstantOffset(Addr) ||
2105         Addr.getOpcode() == ISD::ADD) {
2106       N0 = Addr.getOperand(0);
2107       N1 = Addr.getOperand(1);
2108     } else if (getBaseWithOffsetUsingSplitOR(*CurDAG, Addr, N0, N1)) {
2109       assert(N0 && N1 && isa<ConstantSDNode>(N1));
2110     }
2111     if (N0 && N1) {
2112       if (SelectSMRDOffset(N1, Offset, Imm)) {
2113         SBase = Expand32BitAddress(N0);
2114         return true;
2115       }
2116     }
2117   }
2118   SBase = Expand32BitAddress(Addr);
2119   Offset = CurDAG->getTargetConstant(0, SL, MVT::i32);
2120   Imm = true;
2121   return true;
2122 }
2123 
2124 bool AMDGPUDAGToDAGISel::SelectSMRDImm(SDValue Addr, SDValue &SBase,
2125                                        SDValue &Offset) const {
2126   bool Imm = false;
2127   return SelectSMRD(Addr, SBase, Offset, Imm) && Imm;
2128 }
2129 
2130 bool AMDGPUDAGToDAGISel::SelectSMRDImm32(SDValue Addr, SDValue &SBase,
2131                                          SDValue &Offset) const {
2132 
2133   assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
2134 
2135   bool Imm = false;
2136   if (!SelectSMRD(Addr, SBase, Offset, Imm))
2137     return false;
2138 
2139   return !Imm && isa<ConstantSDNode>(Offset);
2140 }
2141 
2142 bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDValue Addr, SDValue &SBase,
2143                                         SDValue &Offset) const {
2144   bool Imm = false;
2145   return SelectSMRD(Addr, SBase, Offset, Imm) && !Imm &&
2146          !isa<ConstantSDNode>(Offset);
2147 }
2148 
2149 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm(SDValue Addr,
2150                                              SDValue &Offset) const {
2151   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr)) {
2152     // The immediate offset for S_BUFFER instructions is unsigned.
2153     if (auto Imm =
2154             AMDGPU::getSMRDEncodedOffset(*Subtarget, C->getZExtValue(), true)) {
2155       Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32);
2156       return true;
2157     }
2158   }
2159 
2160   return false;
2161 }
2162 
2163 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(SDValue Addr,
2164                                                SDValue &Offset) const {
2165   assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
2166 
2167   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr)) {
2168     if (auto Imm = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget,
2169                                                          C->getZExtValue())) {
2170       Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32);
2171       return true;
2172     }
2173   }
2174 
2175   return false;
2176 }
2177 
2178 bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index,
2179                                             SDValue &Base,
2180                                             SDValue &Offset) const {
2181   SDLoc DL(Index);
2182 
2183   if (CurDAG->isBaseWithConstantOffset(Index)) {
2184     SDValue N0 = Index.getOperand(0);
2185     SDValue N1 = Index.getOperand(1);
2186     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
2187 
2188     // (add n0, c0)
2189     // Don't peel off the offset (c0) if doing so could possibly lead
2190     // the base (n0) to be negative.
2191     // (or n0, |c0|) can never change a sign given isBaseWithConstantOffset.
2192     if (C1->getSExtValue() <= 0 || CurDAG->SignBitIsZero(N0) ||
2193         (Index->getOpcode() == ISD::OR && C1->getSExtValue() >= 0)) {
2194       Base = N0;
2195       Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32);
2196       return true;
2197     }
2198   }
2199 
2200   if (isa<ConstantSDNode>(Index))
2201     return false;
2202 
2203   Base = Index;
2204   Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
2205   return true;
2206 }
2207 
2208 SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, const SDLoc &DL,
2209                                      SDValue Val, uint32_t Offset,
2210                                      uint32_t Width) {
2211   // Transformation function, pack the offset and width of a BFE into
2212   // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
2213   // source, bits [5:0] contain the offset and bits [22:16] the width.
2214   uint32_t PackedVal = Offset | (Width << 16);
2215   SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32);
2216 
2217   return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
2218 }
2219 
2220 void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
2221   // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c)
2222   // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c)
2223   // Predicate: 0 < b <= c < 32
2224 
2225   const SDValue &Shl = N->getOperand(0);
2226   ConstantSDNode *B = dyn_cast<ConstantSDNode>(Shl->getOperand(1));
2227   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
2228 
2229   if (B && C) {
2230     uint32_t BVal = B->getZExtValue();
2231     uint32_t CVal = C->getZExtValue();
2232 
2233     if (0 < BVal && BVal <= CVal && CVal < 32) {
2234       bool Signed = N->getOpcode() == ISD::SRA;
2235       unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2236 
2237       ReplaceNode(N, getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0), CVal - BVal,
2238                               32 - CVal));
2239       return;
2240     }
2241   }
2242   SelectCode(N);
2243 }
2244 
2245 void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
2246   switch (N->getOpcode()) {
2247   case ISD::AND:
2248     if (N->getOperand(0).getOpcode() == ISD::SRL) {
2249       // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)"
2250       // Predicate: isMask(mask)
2251       const SDValue &Srl = N->getOperand(0);
2252       ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1));
2253       ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
2254 
2255       if (Shift && Mask) {
2256         uint32_t ShiftVal = Shift->getZExtValue();
2257         uint32_t MaskVal = Mask->getZExtValue();
2258 
2259         if (isMask_32(MaskVal)) {
2260           uint32_t WidthVal = countPopulation(MaskVal);
2261 
2262           ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
2263                                   Srl.getOperand(0), ShiftVal, WidthVal));
2264           return;
2265         }
2266       }
2267     }
2268     break;
2269   case ISD::SRL:
2270     if (N->getOperand(0).getOpcode() == ISD::AND) {
2271       // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)"
2272       // Predicate: isMask(mask >> b)
2273       const SDValue &And = N->getOperand(0);
2274       ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1));
2275       ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1));
2276 
2277       if (Shift && Mask) {
2278         uint32_t ShiftVal = Shift->getZExtValue();
2279         uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal;
2280 
2281         if (isMask_32(MaskVal)) {
2282           uint32_t WidthVal = countPopulation(MaskVal);
2283 
2284           ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
2285                                   And.getOperand(0), ShiftVal, WidthVal));
2286           return;
2287         }
2288       }
2289     } else if (N->getOperand(0).getOpcode() == ISD::SHL) {
2290       SelectS_BFEFromShifts(N);
2291       return;
2292     }
2293     break;
2294   case ISD::SRA:
2295     if (N->getOperand(0).getOpcode() == ISD::SHL) {
2296       SelectS_BFEFromShifts(N);
2297       return;
2298     }
2299     break;
2300 
2301   case ISD::SIGN_EXTEND_INREG: {
2302     // sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8
2303     SDValue Src = N->getOperand(0);
2304     if (Src.getOpcode() != ISD::SRL)
2305       break;
2306 
2307     const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1));
2308     if (!Amt)
2309       break;
2310 
2311     unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
2312     ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_I32, SDLoc(N), Src.getOperand(0),
2313                             Amt->getZExtValue(), Width));
2314     return;
2315   }
2316   }
2317 
2318   SelectCode(N);
2319 }
2320 
2321 bool AMDGPUDAGToDAGISel::isCBranchSCC(const SDNode *N) const {
2322   assert(N->getOpcode() == ISD::BRCOND);
2323   if (!N->hasOneUse())
2324     return false;
2325 
2326   SDValue Cond = N->getOperand(1);
2327   if (Cond.getOpcode() == ISD::CopyToReg)
2328     Cond = Cond.getOperand(2);
2329 
2330   if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
2331     return false;
2332 
2333   MVT VT = Cond.getOperand(0).getSimpleValueType();
2334   if (VT == MVT::i32)
2335     return true;
2336 
2337   if (VT == MVT::i64) {
2338     auto ST = static_cast<const GCNSubtarget *>(Subtarget);
2339 
2340     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
2341     return (CC == ISD::SETEQ || CC == ISD::SETNE) && ST->hasScalarCompareEq64();
2342   }
2343 
2344   return false;
2345 }
2346 
2347 void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
2348   SDValue Cond = N->getOperand(1);
2349 
2350   if (Cond.isUndef()) {
2351     CurDAG->SelectNodeTo(N, AMDGPU::SI_BR_UNDEF, MVT::Other,
2352                          N->getOperand(2), N->getOperand(0));
2353     return;
2354   }
2355 
2356   const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget);
2357   const SIRegisterInfo *TRI = ST->getRegisterInfo();
2358 
2359   bool UseSCCBr = isCBranchSCC(N) && isUniformBr(N);
2360   unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ;
2361   Register CondReg = UseSCCBr ? AMDGPU::SCC : TRI->getVCC();
2362   SDLoc SL(N);
2363 
2364   if (!UseSCCBr) {
2365     // This is the case that we are selecting to S_CBRANCH_VCCNZ.  We have not
2366     // analyzed what generates the vcc value, so we do not know whether vcc
2367     // bits for disabled lanes are 0.  Thus we need to mask out bits for
2368     // disabled lanes.
2369     //
2370     // For the case that we select S_CBRANCH_SCC1 and it gets
2371     // changed to S_CBRANCH_VCCNZ in SIFixSGPRCopies, SIFixSGPRCopies calls
2372     // SIInstrInfo::moveToVALU which inserts the S_AND).
2373     //
2374     // We could add an analysis of what generates the vcc value here and omit
2375     // the S_AND when is unnecessary. But it would be better to add a separate
2376     // pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it
2377     // catches both cases.
2378     Cond = SDValue(CurDAG->getMachineNode(ST->isWave32() ? AMDGPU::S_AND_B32
2379                                                          : AMDGPU::S_AND_B64,
2380                      SL, MVT::i1,
2381                      CurDAG->getRegister(ST->isWave32() ? AMDGPU::EXEC_LO
2382                                                         : AMDGPU::EXEC,
2383                                          MVT::i1),
2384                     Cond),
2385                    0);
2386   }
2387 
2388   SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, CondReg, Cond);
2389   CurDAG->SelectNodeTo(N, BrOp, MVT::Other,
2390                        N->getOperand(2), // Basic Block
2391                        VCC.getValue(0));
2392 }
2393 
2394 void AMDGPUDAGToDAGISel::SelectFMAD_FMA(SDNode *N) {
2395   MVT VT = N->getSimpleValueType(0);
2396   bool IsFMA = N->getOpcode() == ISD::FMA;
2397   if (VT != MVT::f32 || (!Subtarget->hasMadMixInsts() &&
2398                          !Subtarget->hasFmaMixInsts()) ||
2399       ((IsFMA && Subtarget->hasMadMixInsts()) ||
2400        (!IsFMA && Subtarget->hasFmaMixInsts()))) {
2401     SelectCode(N);
2402     return;
2403   }
2404 
2405   SDValue Src0 = N->getOperand(0);
2406   SDValue Src1 = N->getOperand(1);
2407   SDValue Src2 = N->getOperand(2);
2408   unsigned Src0Mods, Src1Mods, Src2Mods;
2409 
2410   // Avoid using v_mad_mix_f32/v_fma_mix_f32 unless there is actually an operand
2411   // using the conversion from f16.
2412   bool Sel0 = SelectVOP3PMadMixModsImpl(Src0, Src0, Src0Mods);
2413   bool Sel1 = SelectVOP3PMadMixModsImpl(Src1, Src1, Src1Mods);
2414   bool Sel2 = SelectVOP3PMadMixModsImpl(Src2, Src2, Src2Mods);
2415 
2416   assert((IsFMA || !Mode.allFP32Denormals()) &&
2417          "fmad selected with denormals enabled");
2418   // TODO: We can select this with f32 denormals enabled if all the sources are
2419   // converted from f16 (in which case fmad isn't legal).
2420 
2421   if (Sel0 || Sel1 || Sel2) {
2422     // For dummy operands.
2423     SDValue Zero = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
2424     SDValue Ops[] = {
2425       CurDAG->getTargetConstant(Src0Mods, SDLoc(), MVT::i32), Src0,
2426       CurDAG->getTargetConstant(Src1Mods, SDLoc(), MVT::i32), Src1,
2427       CurDAG->getTargetConstant(Src2Mods, SDLoc(), MVT::i32), Src2,
2428       CurDAG->getTargetConstant(0, SDLoc(), MVT::i1),
2429       Zero, Zero
2430     };
2431 
2432     CurDAG->SelectNodeTo(N,
2433                          IsFMA ? AMDGPU::V_FMA_MIX_F32 : AMDGPU::V_MAD_MIX_F32,
2434                          MVT::f32, Ops);
2435   } else {
2436     SelectCode(N);
2437   }
2438 }
2439 
2440 // This is here because there isn't a way to use the generated sub0_sub1 as the
2441 // subreg index to EXTRACT_SUBREG in tablegen.
2442 void AMDGPUDAGToDAGISel::SelectATOMIC_CMP_SWAP(SDNode *N) {
2443   MemSDNode *Mem = cast<MemSDNode>(N);
2444   unsigned AS = Mem->getAddressSpace();
2445   if (AS == AMDGPUAS::FLAT_ADDRESS) {
2446     SelectCode(N);
2447     return;
2448   }
2449 
2450   MVT VT = N->getSimpleValueType(0);
2451   bool Is32 = (VT == MVT::i32);
2452   SDLoc SL(N);
2453 
2454   MachineSDNode *CmpSwap = nullptr;
2455   if (Subtarget->hasAddr64()) {
2456     SDValue SRsrc, VAddr, SOffset, Offset;
2457 
2458     if (SelectMUBUFAddr64(Mem->getBasePtr(), SRsrc, VAddr, SOffset, Offset)) {
2459       unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN :
2460         AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN;
2461       SDValue CmpVal = Mem->getOperand(2);
2462       SDValue CPol = CurDAG->getTargetConstant(AMDGPU::CPol::GLC, SL, MVT::i32);
2463 
2464       // XXX - Do we care about glue operands?
2465 
2466       SDValue Ops[] = {CmpVal, VAddr, SRsrc, SOffset, Offset, CPol,
2467                        Mem->getChain()};
2468 
2469       CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
2470     }
2471   }
2472 
2473   if (!CmpSwap) {
2474     SDValue SRsrc, SOffset, Offset;
2475     if (SelectMUBUFOffset(Mem->getBasePtr(), SRsrc, SOffset, Offset)) {
2476       unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN :
2477         AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN;
2478 
2479       SDValue CmpVal = Mem->getOperand(2);
2480       SDValue CPol = CurDAG->getTargetConstant(AMDGPU::CPol::GLC, SL, MVT::i32);
2481       SDValue Ops[] = {CmpVal, SRsrc, SOffset, Offset, CPol, Mem->getChain()};
2482 
2483       CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
2484     }
2485   }
2486 
2487   if (!CmpSwap) {
2488     SelectCode(N);
2489     return;
2490   }
2491 
2492   MachineMemOperand *MMO = Mem->getMemOperand();
2493   CurDAG->setNodeMemRefs(CmpSwap, {MMO});
2494 
2495   unsigned SubReg = Is32 ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
2496   SDValue Extract
2497     = CurDAG->getTargetExtractSubreg(SubReg, SL, VT, SDValue(CmpSwap, 0));
2498 
2499   ReplaceUses(SDValue(N, 0), Extract);
2500   ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 1));
2501   CurDAG->RemoveDeadNode(N);
2502 }
2503 
2504 void AMDGPUDAGToDAGISel::SelectDSAppendConsume(SDNode *N, unsigned IntrID) {
2505   // The address is assumed to be uniform, so if it ends up in a VGPR, it will
2506   // be copied to an SGPR with readfirstlane.
2507   unsigned Opc = IntrID == Intrinsic::amdgcn_ds_append ?
2508     AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
2509 
2510   SDValue Chain = N->getOperand(0);
2511   SDValue Ptr = N->getOperand(2);
2512   MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2513   MachineMemOperand *MMO = M->getMemOperand();
2514   bool IsGDS = M->getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
2515 
2516   SDValue Offset;
2517   if (CurDAG->isBaseWithConstantOffset(Ptr)) {
2518     SDValue PtrBase = Ptr.getOperand(0);
2519     SDValue PtrOffset = Ptr.getOperand(1);
2520 
2521     const APInt &OffsetVal = cast<ConstantSDNode>(PtrOffset)->getAPIntValue();
2522     if (isDSOffsetLegal(PtrBase, OffsetVal.getZExtValue())) {
2523       N = glueCopyToM0(N, PtrBase);
2524       Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i32);
2525     }
2526   }
2527 
2528   if (!Offset) {
2529     N = glueCopyToM0(N, Ptr);
2530     Offset = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
2531   }
2532 
2533   SDValue Ops[] = {
2534     Offset,
2535     CurDAG->getTargetConstant(IsGDS, SDLoc(), MVT::i32),
2536     Chain,
2537     N->getOperand(N->getNumOperands() - 1) // New glue
2538   };
2539 
2540   SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2541   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2542 }
2543 
2544 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
2545   switch (IntrID) {
2546   case Intrinsic::amdgcn_ds_gws_init:
2547     return AMDGPU::DS_GWS_INIT;
2548   case Intrinsic::amdgcn_ds_gws_barrier:
2549     return AMDGPU::DS_GWS_BARRIER;
2550   case Intrinsic::amdgcn_ds_gws_sema_v:
2551     return AMDGPU::DS_GWS_SEMA_V;
2552   case Intrinsic::amdgcn_ds_gws_sema_br:
2553     return AMDGPU::DS_GWS_SEMA_BR;
2554   case Intrinsic::amdgcn_ds_gws_sema_p:
2555     return AMDGPU::DS_GWS_SEMA_P;
2556   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2557     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
2558   default:
2559     llvm_unreachable("not a gws intrinsic");
2560   }
2561 }
2562 
2563 void AMDGPUDAGToDAGISel::SelectDS_GWS(SDNode *N, unsigned IntrID) {
2564   if (IntrID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
2565       !Subtarget->hasGWSSemaReleaseAll()) {
2566     // Let this error.
2567     SelectCode(N);
2568     return;
2569   }
2570 
2571   // Chain, intrinsic ID, vsrc, offset
2572   const bool HasVSrc = N->getNumOperands() == 4;
2573   assert(HasVSrc || N->getNumOperands() == 3);
2574 
2575   SDLoc SL(N);
2576   SDValue BaseOffset = N->getOperand(HasVSrc ? 3 : 2);
2577   int ImmOffset = 0;
2578   MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2579   MachineMemOperand *MMO = M->getMemOperand();
2580 
2581   // Don't worry if the offset ends up in a VGPR. Only one lane will have
2582   // effect, so SIFixSGPRCopies will validly insert readfirstlane.
2583 
2584   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
2585   // offset field) % 64. Some versions of the programming guide omit the m0
2586   // part, or claim it's from offset 0.
2587   if (ConstantSDNode *ConstOffset = dyn_cast<ConstantSDNode>(BaseOffset)) {
2588     // If we have a constant offset, try to use the 0 in m0 as the base.
2589     // TODO: Look into changing the default m0 initialization value. If the
2590     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
2591     // the immediate offset.
2592     glueCopyToM0(N, CurDAG->getTargetConstant(0, SL, MVT::i32));
2593     ImmOffset = ConstOffset->getZExtValue();
2594   } else {
2595     if (CurDAG->isBaseWithConstantOffset(BaseOffset)) {
2596       ImmOffset = BaseOffset.getConstantOperandVal(1);
2597       BaseOffset = BaseOffset.getOperand(0);
2598     }
2599 
2600     // Prefer to do the shift in an SGPR since it should be possible to use m0
2601     // as the result directly. If it's already an SGPR, it will be eliminated
2602     // later.
2603     SDNode *SGPROffset
2604       = CurDAG->getMachineNode(AMDGPU::V_READFIRSTLANE_B32, SL, MVT::i32,
2605                                BaseOffset);
2606     // Shift to offset in m0
2607     SDNode *M0Base
2608       = CurDAG->getMachineNode(AMDGPU::S_LSHL_B32, SL, MVT::i32,
2609                                SDValue(SGPROffset, 0),
2610                                CurDAG->getTargetConstant(16, SL, MVT::i32));
2611     glueCopyToM0(N, SDValue(M0Base, 0));
2612   }
2613 
2614   SDValue Chain = N->getOperand(0);
2615   SDValue OffsetField = CurDAG->getTargetConstant(ImmOffset, SL, MVT::i32);
2616 
2617   const unsigned Opc = gwsIntrinToOpcode(IntrID);
2618   SmallVector<SDValue, 5> Ops;
2619   if (HasVSrc)
2620     Ops.push_back(N->getOperand(2));
2621   Ops.push_back(OffsetField);
2622   Ops.push_back(Chain);
2623 
2624   SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2625   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2626 }
2627 
2628 void AMDGPUDAGToDAGISel::SelectInterpP1F16(SDNode *N) {
2629   if (Subtarget->getLDSBankCount() != 16) {
2630     // This is a single instruction with a pattern.
2631     SelectCode(N);
2632     return;
2633   }
2634 
2635   SDLoc DL(N);
2636 
2637   // This requires 2 instructions. It is possible to write a pattern to support
2638   // this, but the generated isel emitter doesn't correctly deal with multiple
2639   // output instructions using the same physical register input. The copy to m0
2640   // is incorrectly placed before the second instruction.
2641   //
2642   // TODO: Match source modifiers.
2643   //
2644   // def : Pat <
2645   //   (int_amdgcn_interp_p1_f16
2646   //    (VOP3Mods f32:$src0, i32:$src0_modifiers),
2647   //                             (i32 timm:$attrchan), (i32 timm:$attr),
2648   //                             (i1 timm:$high), M0),
2649   //   (V_INTERP_P1LV_F16 $src0_modifiers, VGPR_32:$src0, timm:$attr,
2650   //       timm:$attrchan, 0,
2651   //       (V_INTERP_MOV_F32 2, timm:$attr, timm:$attrchan), timm:$high)> {
2652   //   let Predicates = [has16BankLDS];
2653   // }
2654 
2655   // 16 bank LDS
2656   SDValue ToM0 = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, AMDGPU::M0,
2657                                       N->getOperand(5), SDValue());
2658 
2659   SDVTList VTs = CurDAG->getVTList(MVT::f32, MVT::Other);
2660 
2661   SDNode *InterpMov =
2662     CurDAG->getMachineNode(AMDGPU::V_INTERP_MOV_F32, DL, VTs, {
2663         CurDAG->getTargetConstant(2, DL, MVT::i32), // P0
2664         N->getOperand(3),  // Attr
2665         N->getOperand(2),  // Attrchan
2666         ToM0.getValue(1) // In glue
2667   });
2668 
2669   SDNode *InterpP1LV =
2670     CurDAG->getMachineNode(AMDGPU::V_INTERP_P1LV_F16, DL, MVT::f32, {
2671         CurDAG->getTargetConstant(0, DL, MVT::i32), // $src0_modifiers
2672         N->getOperand(1), // Src0
2673         N->getOperand(3), // Attr
2674         N->getOperand(2), // Attrchan
2675         CurDAG->getTargetConstant(0, DL, MVT::i32), // $src2_modifiers
2676         SDValue(InterpMov, 0), // Src2 - holds two f16 values selected by high
2677         N->getOperand(4), // high
2678         CurDAG->getTargetConstant(0, DL, MVT::i1), // $clamp
2679         CurDAG->getTargetConstant(0, DL, MVT::i32), // $omod
2680         SDValue(InterpMov, 1)
2681   });
2682 
2683   CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), SDValue(InterpP1LV, 0));
2684 }
2685 
2686 void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) {
2687   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2688   switch (IntrID) {
2689   case Intrinsic::amdgcn_ds_append:
2690   case Intrinsic::amdgcn_ds_consume: {
2691     if (N->getValueType(0) != MVT::i32)
2692       break;
2693     SelectDSAppendConsume(N, IntrID);
2694     return;
2695   }
2696   }
2697 
2698   SelectCode(N);
2699 }
2700 
2701 void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
2702   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
2703   unsigned Opcode;
2704   switch (IntrID) {
2705   case Intrinsic::amdgcn_wqm:
2706     Opcode = AMDGPU::WQM;
2707     break;
2708   case Intrinsic::amdgcn_softwqm:
2709     Opcode = AMDGPU::SOFT_WQM;
2710     break;
2711   case Intrinsic::amdgcn_wwm:
2712   case Intrinsic::amdgcn_strict_wwm:
2713     Opcode = AMDGPU::STRICT_WWM;
2714     break;
2715   case Intrinsic::amdgcn_strict_wqm:
2716     Opcode = AMDGPU::STRICT_WQM;
2717     break;
2718   case Intrinsic::amdgcn_interp_p1_f16:
2719     SelectInterpP1F16(N);
2720     return;
2721   default:
2722     SelectCode(N);
2723     return;
2724   }
2725 
2726   SDValue Src = N->getOperand(1);
2727   CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), {Src});
2728 }
2729 
2730 void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) {
2731   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2732   switch (IntrID) {
2733   case Intrinsic::amdgcn_ds_gws_init:
2734   case Intrinsic::amdgcn_ds_gws_barrier:
2735   case Intrinsic::amdgcn_ds_gws_sema_v:
2736   case Intrinsic::amdgcn_ds_gws_sema_br:
2737   case Intrinsic::amdgcn_ds_gws_sema_p:
2738   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2739     SelectDS_GWS(N, IntrID);
2740     return;
2741   default:
2742     break;
2743   }
2744 
2745   SelectCode(N);
2746 }
2747 
2748 bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src,
2749                                             unsigned &Mods,
2750                                             bool AllowAbs) const {
2751   Mods = 0;
2752   Src = In;
2753 
2754   if (Src.getOpcode() == ISD::FNEG) {
2755     Mods |= SISrcMods::NEG;
2756     Src = Src.getOperand(0);
2757   }
2758 
2759   if (AllowAbs && Src.getOpcode() == ISD::FABS) {
2760     Mods |= SISrcMods::ABS;
2761     Src = Src.getOperand(0);
2762   }
2763 
2764   return true;
2765 }
2766 
2767 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
2768                                         SDValue &SrcMods) const {
2769   unsigned Mods;
2770   if (SelectVOP3ModsImpl(In, Src, Mods)) {
2771     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2772     return true;
2773   }
2774 
2775   return false;
2776 }
2777 
2778 bool AMDGPUDAGToDAGISel::SelectVOP3BMods(SDValue In, SDValue &Src,
2779                                          SDValue &SrcMods) const {
2780   unsigned Mods;
2781   if (SelectVOP3ModsImpl(In, Src, Mods, /* AllowAbs */ false)) {
2782     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2783     return true;
2784   }
2785 
2786   return false;
2787 }
2788 
2789 bool AMDGPUDAGToDAGISel::SelectVOP3Mods_NNaN(SDValue In, SDValue &Src,
2790                                              SDValue &SrcMods) const {
2791   SelectVOP3Mods(In, Src, SrcMods);
2792   return isNoNanSrc(Src);
2793 }
2794 
2795 bool AMDGPUDAGToDAGISel::SelectVOP3NoMods(SDValue In, SDValue &Src) const {
2796   if (In.getOpcode() == ISD::FABS || In.getOpcode() == ISD::FNEG)
2797     return false;
2798 
2799   Src = In;
2800   return true;
2801 }
2802 
2803 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
2804                                          SDValue &SrcMods, SDValue &Clamp,
2805                                          SDValue &Omod) const {
2806   SDLoc DL(In);
2807   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2808   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2809 
2810   return SelectVOP3Mods(In, Src, SrcMods);
2811 }
2812 
2813 bool AMDGPUDAGToDAGISel::SelectVOP3BMods0(SDValue In, SDValue &Src,
2814                                           SDValue &SrcMods, SDValue &Clamp,
2815                                           SDValue &Omod) const {
2816   SDLoc DL(In);
2817   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2818   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2819 
2820   return SelectVOP3BMods(In, Src, SrcMods);
2821 }
2822 
2823 bool AMDGPUDAGToDAGISel::SelectVOP3OMods(SDValue In, SDValue &Src,
2824                                          SDValue &Clamp, SDValue &Omod) const {
2825   Src = In;
2826 
2827   SDLoc DL(In);
2828   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2829   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2830 
2831   return true;
2832 }
2833 
2834 bool AMDGPUDAGToDAGISel::SelectVOP3PMods(SDValue In, SDValue &Src,
2835                                          SDValue &SrcMods) const {
2836   unsigned Mods = 0;
2837   Src = In;
2838 
2839   if (Src.getOpcode() == ISD::FNEG) {
2840     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
2841     Src = Src.getOperand(0);
2842   }
2843 
2844   if (Src.getOpcode() == ISD::BUILD_VECTOR) {
2845     unsigned VecMods = Mods;
2846 
2847     SDValue Lo = stripBitcast(Src.getOperand(0));
2848     SDValue Hi = stripBitcast(Src.getOperand(1));
2849 
2850     if (Lo.getOpcode() == ISD::FNEG) {
2851       Lo = stripBitcast(Lo.getOperand(0));
2852       Mods ^= SISrcMods::NEG;
2853     }
2854 
2855     if (Hi.getOpcode() == ISD::FNEG) {
2856       Hi = stripBitcast(Hi.getOperand(0));
2857       Mods ^= SISrcMods::NEG_HI;
2858     }
2859 
2860     if (isExtractHiElt(Lo, Lo))
2861       Mods |= SISrcMods::OP_SEL_0;
2862 
2863     if (isExtractHiElt(Hi, Hi))
2864       Mods |= SISrcMods::OP_SEL_1;
2865 
2866     unsigned VecSize = Src.getValueSizeInBits();
2867     Lo = stripExtractLoElt(Lo);
2868     Hi = stripExtractLoElt(Hi);
2869 
2870     if (Lo.getValueSizeInBits() > VecSize) {
2871       Lo = CurDAG->getTargetExtractSubreg(
2872         (VecSize > 32) ? AMDGPU::sub0_sub1 : AMDGPU::sub0, SDLoc(In),
2873         MVT::getIntegerVT(VecSize), Lo);
2874     }
2875 
2876     if (Hi.getValueSizeInBits() > VecSize) {
2877       Hi = CurDAG->getTargetExtractSubreg(
2878         (VecSize > 32) ? AMDGPU::sub0_sub1 : AMDGPU::sub0, SDLoc(In),
2879         MVT::getIntegerVT(VecSize), Hi);
2880     }
2881 
2882     assert(Lo.getValueSizeInBits() <= VecSize &&
2883            Hi.getValueSizeInBits() <= VecSize);
2884 
2885     if (Lo == Hi && !isInlineImmediate(Lo.getNode())) {
2886       // Really a scalar input. Just select from the low half of the register to
2887       // avoid packing.
2888 
2889       if (VecSize == 32 || VecSize == Lo.getValueSizeInBits()) {
2890         Src = Lo;
2891       } else {
2892         assert(Lo.getValueSizeInBits() == 32 && VecSize == 64);
2893 
2894         SDLoc SL(In);
2895         SDValue Undef = SDValue(
2896           CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SL,
2897                                  Lo.getValueType()), 0);
2898         auto RC = Lo->isDivergent() ? AMDGPU::VReg_64RegClassID
2899                                     : AMDGPU::SReg_64RegClassID;
2900         const SDValue Ops[] = {
2901           CurDAG->getTargetConstant(RC, SL, MVT::i32),
2902           Lo, CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32),
2903           Undef, CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32) };
2904 
2905         Src = SDValue(CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SL,
2906                                              Src.getValueType(), Ops), 0);
2907       }
2908       SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2909       return true;
2910     }
2911 
2912     if (VecSize == 64 && Lo == Hi && isa<ConstantFPSDNode>(Lo)) {
2913       uint64_t Lit = cast<ConstantFPSDNode>(Lo)->getValueAPF()
2914                       .bitcastToAPInt().getZExtValue();
2915       if (AMDGPU::isInlinableLiteral32(Lit, Subtarget->hasInv2PiInlineImm())) {
2916         Src = CurDAG->getTargetConstant(Lit, SDLoc(In), MVT::i64);;
2917         SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2918         return true;
2919       }
2920     }
2921 
2922     Mods = VecMods;
2923   }
2924 
2925   // Packed instructions do not have abs modifiers.
2926   Mods |= SISrcMods::OP_SEL_1;
2927 
2928   SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2929   return true;
2930 }
2931 
2932 bool AMDGPUDAGToDAGISel::SelectVOP3OpSel(SDValue In, SDValue &Src,
2933                                          SDValue &SrcMods) const {
2934   Src = In;
2935   // FIXME: Handle op_sel
2936   SrcMods = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
2937   return true;
2938 }
2939 
2940 bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods(SDValue In, SDValue &Src,
2941                                              SDValue &SrcMods) const {
2942   // FIXME: Handle op_sel
2943   return SelectVOP3Mods(In, Src, SrcMods);
2944 }
2945 
2946 // The return value is not whether the match is possible (which it always is),
2947 // but whether or not it a conversion is really used.
2948 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src,
2949                                                    unsigned &Mods) const {
2950   Mods = 0;
2951   SelectVOP3ModsImpl(In, Src, Mods);
2952 
2953   if (Src.getOpcode() == ISD::FP_EXTEND) {
2954     Src = Src.getOperand(0);
2955     assert(Src.getValueType() == MVT::f16);
2956     Src = stripBitcast(Src);
2957 
2958     // Be careful about folding modifiers if we already have an abs. fneg is
2959     // applied last, so we don't want to apply an earlier fneg.
2960     if ((Mods & SISrcMods::ABS) == 0) {
2961       unsigned ModsTmp;
2962       SelectVOP3ModsImpl(Src, Src, ModsTmp);
2963 
2964       if ((ModsTmp & SISrcMods::NEG) != 0)
2965         Mods ^= SISrcMods::NEG;
2966 
2967       if ((ModsTmp & SISrcMods::ABS) != 0)
2968         Mods |= SISrcMods::ABS;
2969     }
2970 
2971     // op_sel/op_sel_hi decide the source type and source.
2972     // If the source's op_sel_hi is set, it indicates to do a conversion from fp16.
2973     // If the sources's op_sel is set, it picks the high half of the source
2974     // register.
2975 
2976     Mods |= SISrcMods::OP_SEL_1;
2977     if (isExtractHiElt(Src, Src)) {
2978       Mods |= SISrcMods::OP_SEL_0;
2979 
2980       // TODO: Should we try to look for neg/abs here?
2981     }
2982 
2983     return true;
2984   }
2985 
2986   return false;
2987 }
2988 
2989 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixMods(SDValue In, SDValue &Src,
2990                                                SDValue &SrcMods) const {
2991   unsigned Mods = 0;
2992   SelectVOP3PMadMixModsImpl(In, Src, Mods);
2993   SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2994   return true;
2995 }
2996 
2997 SDValue AMDGPUDAGToDAGISel::getHi16Elt(SDValue In) const {
2998   if (In.isUndef())
2999     return CurDAG->getUNDEF(MVT::i32);
3000 
3001   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(In)) {
3002     SDLoc SL(In);
3003     return CurDAG->getConstant(C->getZExtValue() << 16, SL, MVT::i32);
3004   }
3005 
3006   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(In)) {
3007     SDLoc SL(In);
3008     return CurDAG->getConstant(
3009       C->getValueAPF().bitcastToAPInt().getZExtValue() << 16, SL, MVT::i32);
3010   }
3011 
3012   SDValue Src;
3013   if (isExtractHiElt(In, Src))
3014     return Src;
3015 
3016   return SDValue();
3017 }
3018 
3019 bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const {
3020   assert(CurDAG->getTarget().getTargetTriple().getArch() == Triple::amdgcn);
3021 
3022   const SIRegisterInfo *SIRI =
3023     static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
3024   const SIInstrInfo * SII =
3025     static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
3026 
3027   unsigned Limit = 0;
3028   bool AllUsesAcceptSReg = true;
3029   for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
3030     Limit < 10 && U != E; ++U, ++Limit) {
3031     const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
3032 
3033     // If the register class is unknown, it could be an unknown
3034     // register class that needs to be an SGPR, e.g. an inline asm
3035     // constraint
3036     if (!RC || SIRI->isSGPRClass(RC))
3037       return false;
3038 
3039     if (RC != &AMDGPU::VS_32RegClass) {
3040       AllUsesAcceptSReg = false;
3041       SDNode * User = *U;
3042       if (User->isMachineOpcode()) {
3043         unsigned Opc = User->getMachineOpcode();
3044         MCInstrDesc Desc = SII->get(Opc);
3045         if (Desc.isCommutable()) {
3046           unsigned OpIdx = Desc.getNumDefs() + U.getOperandNo();
3047           unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
3048           if (SII->findCommutedOpIndices(Desc, OpIdx, CommuteIdx1)) {
3049             unsigned CommutedOpNo = CommuteIdx1 - Desc.getNumDefs();
3050             const TargetRegisterClass *CommutedRC = getOperandRegClass(*U, CommutedOpNo);
3051             if (CommutedRC == &AMDGPU::VS_32RegClass)
3052               AllUsesAcceptSReg = true;
3053           }
3054         }
3055       }
3056       // If "AllUsesAcceptSReg == false" so far we haven't suceeded
3057       // commuting current user. This means have at least one use
3058       // that strictly require VGPR. Thus, we will not attempt to commute
3059       // other user instructions.
3060       if (!AllUsesAcceptSReg)
3061         break;
3062     }
3063   }
3064   return !AllUsesAcceptSReg && (Limit < 10);
3065 }
3066 
3067 bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode * N) const {
3068   auto Ld = cast<LoadSDNode>(N);
3069 
3070   return Ld->getAlignment() >= 4 &&
3071         (
3072           (
3073             (
3074               Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS       ||
3075               Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT
3076             )
3077             &&
3078             !N->isDivergent()
3079           )
3080           ||
3081           (
3082             Subtarget->getScalarizeGlobalBehavior() &&
3083             Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
3084             Ld->isSimple() &&
3085             !N->isDivergent() &&
3086             static_cast<const SITargetLowering *>(
3087               getTargetLowering())->isMemOpHasNoClobberedMemOperand(N)
3088           )
3089         );
3090 }
3091 
3092 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
3093   const AMDGPUTargetLowering& Lowering =
3094     *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
3095   bool IsModified = false;
3096   do {
3097     IsModified = false;
3098 
3099     // Go over all selected nodes and try to fold them a bit more
3100     SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_begin();
3101     while (Position != CurDAG->allnodes_end()) {
3102       SDNode *Node = &*Position++;
3103       MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(Node);
3104       if (!MachineNode)
3105         continue;
3106 
3107       SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
3108       if (ResNode != Node) {
3109         if (ResNode)
3110           ReplaceUses(Node, ResNode);
3111         IsModified = true;
3112       }
3113     }
3114     CurDAG->RemoveDeadNodes();
3115   } while (IsModified);
3116 }
3117 
3118 bool R600DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
3119   Subtarget = &MF.getSubtarget<R600Subtarget>();
3120   return SelectionDAGISel::runOnMachineFunction(MF);
3121 }
3122 
3123 bool R600DAGToDAGISel::isConstantLoad(const MemSDNode *N, int CbId) const {
3124   if (!N->readMem())
3125     return false;
3126   if (CbId == -1)
3127     return N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
3128            N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
3129 
3130   return N->getAddressSpace() == AMDGPUAS::CONSTANT_BUFFER_0 + CbId;
3131 }
3132 
3133 bool R600DAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
3134                                                          SDValue& IntPtr) {
3135   if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
3136     IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr),
3137                                        true);
3138     return true;
3139   }
3140   return false;
3141 }
3142 
3143 bool R600DAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
3144     SDValue& BaseReg, SDValue &Offset) {
3145   if (!isa<ConstantSDNode>(Addr)) {
3146     BaseReg = Addr;
3147     Offset = CurDAG->getIntPtrConstant(0, SDLoc(Addr), true);
3148     return true;
3149   }
3150   return false;
3151 }
3152 
3153 void R600DAGToDAGISel::Select(SDNode *N) {
3154   unsigned int Opc = N->getOpcode();
3155   if (N->isMachineOpcode()) {
3156     N->setNodeId(-1);
3157     return;   // Already selected.
3158   }
3159 
3160   switch (Opc) {
3161   default: break;
3162   case AMDGPUISD::BUILD_VERTICAL_VECTOR:
3163   case ISD::SCALAR_TO_VECTOR:
3164   case ISD::BUILD_VECTOR: {
3165     EVT VT = N->getValueType(0);
3166     unsigned NumVectorElts = VT.getVectorNumElements();
3167     unsigned RegClassID;
3168     // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
3169     // that adds a 128 bits reg copy when going through TwoAddressInstructions
3170     // pass. We want to avoid 128 bits copies as much as possible because they
3171     // can't be bundled by our scheduler.
3172     switch(NumVectorElts) {
3173     case 2: RegClassID = R600::R600_Reg64RegClassID; break;
3174     case 4:
3175       if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
3176         RegClassID = R600::R600_Reg128VerticalRegClassID;
3177       else
3178         RegClassID = R600::R600_Reg128RegClassID;
3179       break;
3180     default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
3181     }
3182     SelectBuildVector(N, RegClassID);
3183     return;
3184   }
3185   }
3186 
3187   SelectCode(N);
3188 }
3189 
3190 bool R600DAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
3191                                           SDValue &Offset) {
3192   ConstantSDNode *C;
3193   SDLoc DL(Addr);
3194 
3195   if ((C = dyn_cast<ConstantSDNode>(Addr))) {
3196     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
3197     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
3198   } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
3199              (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
3200     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
3201     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
3202   } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
3203             (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
3204     Base = Addr.getOperand(0);
3205     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
3206   } else {
3207     Base = Addr;
3208     Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
3209   }
3210 
3211   return true;
3212 }
3213 
3214 bool R600DAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
3215                                           SDValue &Offset) {
3216   ConstantSDNode *IMMOffset;
3217 
3218   if (Addr.getOpcode() == ISD::ADD
3219       && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
3220       && isInt<16>(IMMOffset->getZExtValue())) {
3221 
3222       Base = Addr.getOperand(0);
3223       Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
3224                                          MVT::i32);
3225       return true;
3226   // If the pointer address is constant, we can move it to the offset field.
3227   } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
3228              && isInt<16>(IMMOffset->getZExtValue())) {
3229     Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
3230                                   SDLoc(CurDAG->getEntryNode()),
3231                                   R600::ZERO, MVT::i32);
3232     Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
3233                                        MVT::i32);
3234     return true;
3235   }
3236 
3237   // Default case, no offset
3238   Base = Addr;
3239   Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
3240   return true;
3241 }
3242