1 //===-- AMDGPUISelDAGToDAG.cpp - A dag to dag inst selector for AMDGPU ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //==-----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Defines an instruction selector for the AMDGPU target.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPU.h"
15 #include "AMDGPUArgumentUsageInfo.h"
16 #include "AMDGPUISelLowering.h" // For AMDGPUISD
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPUPerfHintAnalysis.h"
19 #include "AMDGPUSubtarget.h"
20 #include "AMDGPUTargetMachine.h"
21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
22 #include "SIDefines.h"
23 #include "SIISelLowering.h"
24 #include "SIInstrInfo.h"
25 #include "SIMachineFunctionInfo.h"
26 #include "SIRegisterInfo.h"
27 #include "llvm/ADT/APInt.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/CodeGen/FunctionLoweringInfo.h"
34 #include "llvm/CodeGen/ISDOpcodes.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/SelectionDAG.h"
38 #include "llvm/CodeGen/SelectionDAGISel.h"
39 #include "llvm/CodeGen/SelectionDAGNodes.h"
40 #include "llvm/CodeGen/ValueTypes.h"
41 #include "llvm/IR/BasicBlock.h"
42 #include "llvm/InitializePasses.h"
43 #ifdef EXPENSIVE_CHECKS
44 #include "llvm/IR/Dominators.h"
45 #endif
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/MC/MCInstrDesc.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CodeGen.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/MachineValueType.h"
52 #include "llvm/Support/MathExtras.h"
53 #include <cassert>
54 #include <cstdint>
55 #include <new>
56 #include <vector>
57 
58 #define DEBUG_TYPE "isel"
59 
60 using namespace llvm;
61 
62 namespace llvm {
63 
64 class R600InstrInfo;
65 
66 } // end namespace llvm
67 
68 //===----------------------------------------------------------------------===//
69 // Instruction Selector Implementation
70 //===----------------------------------------------------------------------===//
71 
72 namespace {
73 
74 static bool isNullConstantOrUndef(SDValue V) {
75   if (V.isUndef())
76     return true;
77 
78   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
79   return Const != nullptr && Const->isNullValue();
80 }
81 
82 static bool getConstantValue(SDValue N, uint32_t &Out) {
83   // This is only used for packed vectors, where ussing 0 for undef should
84   // always be good.
85   if (N.isUndef()) {
86     Out = 0;
87     return true;
88   }
89 
90   if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
91     Out = C->getAPIntValue().getSExtValue();
92     return true;
93   }
94 
95   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N)) {
96     Out = C->getValueAPF().bitcastToAPInt().getSExtValue();
97     return true;
98   }
99 
100   return false;
101 }
102 
103 // TODO: Handle undef as zero
104 static SDNode *packConstantV2I16(const SDNode *N, SelectionDAG &DAG,
105                                  bool Negate = false) {
106   assert(N->getOpcode() == ISD::BUILD_VECTOR && N->getNumOperands() == 2);
107   uint32_t LHSVal, RHSVal;
108   if (getConstantValue(N->getOperand(0), LHSVal) &&
109       getConstantValue(N->getOperand(1), RHSVal)) {
110     SDLoc SL(N);
111     uint32_t K = Negate ?
112       (-LHSVal & 0xffff) | (-RHSVal << 16) :
113       (LHSVal & 0xffff) | (RHSVal << 16);
114     return DAG.getMachineNode(AMDGPU::S_MOV_B32, SL, N->getValueType(0),
115                               DAG.getTargetConstant(K, SL, MVT::i32));
116   }
117 
118   return nullptr;
119 }
120 
121 static SDNode *packNegConstantV2I16(const SDNode *N, SelectionDAG &DAG) {
122   return packConstantV2I16(N, DAG, true);
123 }
124 
125 /// AMDGPU specific code to select AMDGPU machine instructions for
126 /// SelectionDAG operations.
127 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
128   // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
129   // make the right decision when generating code for different targets.
130   const GCNSubtarget *Subtarget;
131 
132   // Default FP mode for the current function.
133   AMDGPU::SIModeRegisterDefaults Mode;
134 
135   bool EnableLateStructurizeCFG;
136 
137 public:
138   explicit AMDGPUDAGToDAGISel(TargetMachine *TM = nullptr,
139                               CodeGenOpt::Level OptLevel = CodeGenOpt::Default)
140     : SelectionDAGISel(*TM, OptLevel) {
141     EnableLateStructurizeCFG = AMDGPUTargetMachine::EnableLateStructurizeCFG;
142   }
143   ~AMDGPUDAGToDAGISel() override = default;
144 
145   void getAnalysisUsage(AnalysisUsage &AU) const override {
146     AU.addRequired<AMDGPUArgumentUsageInfo>();
147     AU.addRequired<LegacyDivergenceAnalysis>();
148 #ifdef EXPENSIVE_CHECKS
149     AU.addRequired<DominatorTreeWrapperPass>();
150     AU.addRequired<LoopInfoWrapperPass>();
151 #endif
152     SelectionDAGISel::getAnalysisUsage(AU);
153   }
154 
155   bool matchLoadD16FromBuildVector(SDNode *N) const;
156 
157   bool runOnMachineFunction(MachineFunction &MF) override;
158   void PreprocessISelDAG() override;
159   void Select(SDNode *N) override;
160   StringRef getPassName() const override;
161   void PostprocessISelDAG() override;
162 
163 protected:
164   void SelectBuildVector(SDNode *N, unsigned RegClassID);
165 
166 private:
167   std::pair<SDValue, SDValue> foldFrameIndex(SDValue N) const;
168   bool isNoNanSrc(SDValue N) const;
169   bool isInlineImmediate(const SDNode *N, bool Negated = false) const;
170   bool isNegInlineImmediate(const SDNode *N) const {
171     return isInlineImmediate(N, true);
172   }
173 
174   bool isInlineImmediate16(int64_t Imm) const {
175     return AMDGPU::isInlinableLiteral16(Imm, Subtarget->hasInv2PiInlineImm());
176   }
177 
178   bool isInlineImmediate32(int64_t Imm) const {
179     return AMDGPU::isInlinableLiteral32(Imm, Subtarget->hasInv2PiInlineImm());
180   }
181 
182   bool isInlineImmediate64(int64_t Imm) const {
183     return AMDGPU::isInlinableLiteral64(Imm, Subtarget->hasInv2PiInlineImm());
184   }
185 
186   bool isInlineImmediate(const APFloat &Imm) const {
187     return Subtarget->getInstrInfo()->isInlineConstant(Imm);
188   }
189 
190   bool isVGPRImm(const SDNode *N) const;
191   bool isUniformLoad(const SDNode *N) const;
192   bool isUniformBr(const SDNode *N) const;
193 
194   MachineSDNode *buildSMovImm64(SDLoc &DL, uint64_t Val, EVT VT) const;
195 
196   SDNode *glueCopyToOp(SDNode *N, SDValue NewChain, SDValue Glue) const;
197   SDNode *glueCopyToM0(SDNode *N, SDValue Val) const;
198   SDNode *glueCopyToM0LDSInit(SDNode *N) const;
199 
200   const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
201   virtual bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
202   virtual bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
203   bool isDSOffsetLegal(SDValue Base, unsigned Offset) const;
204   bool isDSOffset2Legal(SDValue Base, unsigned Offset0, unsigned Offset1,
205                         unsigned Size) const;
206   bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const;
207   bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
208                                  SDValue &Offset1) const;
209   bool SelectDS128Bit8ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
210                                   SDValue &Offset1) const;
211   bool SelectDSReadWrite2(SDValue Ptr, SDValue &Base, SDValue &Offset0,
212                           SDValue &Offset1, unsigned Size) const;
213   bool SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
214                    SDValue &SOffset, SDValue &Offset, SDValue &Offen,
215                    SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC,
216                    SDValue &TFE, SDValue &DLC, SDValue &SWZ) const;
217   bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
218                          SDValue &SOffset, SDValue &Offset, SDValue &GLC,
219                          SDValue &SLC, SDValue &TFE, SDValue &DLC,
220                          SDValue &SWZ) const;
221   bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
222                          SDValue &VAddr, SDValue &SOffset, SDValue &Offset,
223                          SDValue &SLC) const;
224   bool SelectMUBUFScratchOffen(SDNode *Parent,
225                                SDValue Addr, SDValue &RSrc, SDValue &VAddr,
226                                SDValue &SOffset, SDValue &ImmOffset) const;
227   bool SelectMUBUFScratchOffset(SDNode *Parent,
228                                 SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
229                                 SDValue &Offset) const;
230 
231   bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset,
232                          SDValue &Offset, SDValue &GLC, SDValue &SLC,
233                          SDValue &TFE, SDValue &DLC, SDValue &SWZ) const;
234   bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
235                          SDValue &Offset, SDValue &SLC) const;
236   bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
237                          SDValue &Offset) const;
238 
239   template <bool IsSigned>
240   bool SelectFlatOffset(SDNode *N, SDValue Addr, SDValue &VAddr,
241                         SDValue &Offset) const;
242   bool SelectGlobalSAddr(SDNode *N, SDValue Addr, SDValue &SAddr,
243                          SDValue &VOffset, SDValue &Offset) const;
244   bool SelectScratchSAddr(SDNode *N, SDValue Addr, SDValue &SAddr,
245                           SDValue &Offset) const;
246 
247   bool SelectSMRDOffset(SDValue ByteOffsetNode, SDValue &Offset,
248                         bool &Imm) const;
249   SDValue Expand32BitAddress(SDValue Addr) const;
250   bool SelectSMRD(SDValue Addr, SDValue &SBase, SDValue &Offset,
251                   bool &Imm) const;
252   bool SelectSMRDImm(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
253   bool SelectSMRDImm32(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
254   bool SelectSMRDSgpr(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
255   bool SelectSMRDBufferImm(SDValue Addr, SDValue &Offset) const;
256   bool SelectSMRDBufferImm32(SDValue Addr, SDValue &Offset) const;
257   bool SelectMOVRELOffset(SDValue Index, SDValue &Base, SDValue &Offset) const;
258 
259   bool SelectVOP3Mods_NNaN(SDValue In, SDValue &Src, SDValue &SrcMods) const;
260   bool SelectVOP3ModsImpl(SDValue In, SDValue &Src, unsigned &SrcMods,
261                           bool AllowAbs = true) const;
262   bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
263   bool SelectVOP3BMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
264   bool SelectVOP3NoMods(SDValue In, SDValue &Src) const;
265   bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
266                        SDValue &Clamp, SDValue &Omod) const;
267   bool SelectVOP3BMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
268                         SDValue &Clamp, SDValue &Omod) const;
269   bool SelectVOP3NoMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
270                          SDValue &Clamp, SDValue &Omod) const;
271 
272   bool SelectVOP3OMods(SDValue In, SDValue &Src,
273                        SDValue &Clamp, SDValue &Omod) const;
274 
275   bool SelectVOP3PMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
276 
277   bool SelectVOP3OpSel(SDValue In, SDValue &Src, SDValue &SrcMods) const;
278 
279   bool SelectVOP3OpSelMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
280   bool SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src, unsigned &Mods) const;
281   bool SelectVOP3PMadMixMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
282 
283   SDValue getHi16Elt(SDValue In) const;
284 
285   SDValue getMaterializedScalarImm32(int64_t Val, const SDLoc &DL) const;
286 
287   void SelectADD_SUB_I64(SDNode *N);
288   void SelectAddcSubb(SDNode *N);
289   void SelectUADDO_USUBO(SDNode *N);
290   void SelectDIV_SCALE(SDNode *N);
291   void SelectMAD_64_32(SDNode *N);
292   void SelectFMA_W_CHAIN(SDNode *N);
293   void SelectFMUL_W_CHAIN(SDNode *N);
294 
295   SDNode *getS_BFE(unsigned Opcode, const SDLoc &DL, SDValue Val,
296                    uint32_t Offset, uint32_t Width);
297   void SelectS_BFEFromShifts(SDNode *N);
298   void SelectS_BFE(SDNode *N);
299   bool isCBranchSCC(const SDNode *N) const;
300   void SelectBRCOND(SDNode *N);
301   void SelectFMAD_FMA(SDNode *N);
302   void SelectATOMIC_CMP_SWAP(SDNode *N);
303   void SelectDSAppendConsume(SDNode *N, unsigned IntrID);
304   void SelectDS_GWS(SDNode *N, unsigned IntrID);
305   void SelectInterpP1F16(SDNode *N);
306   void SelectINTRINSIC_W_CHAIN(SDNode *N);
307   void SelectINTRINSIC_WO_CHAIN(SDNode *N);
308   void SelectINTRINSIC_VOID(SDNode *N);
309 
310 protected:
311   // Include the pieces autogenerated from the target description.
312 #include "AMDGPUGenDAGISel.inc"
313 };
314 
315 class R600DAGToDAGISel : public AMDGPUDAGToDAGISel {
316   const R600Subtarget *Subtarget;
317 
318   bool isConstantLoad(const MemSDNode *N, int cbID) const;
319   bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
320   bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
321                                        SDValue& Offset);
322 public:
323   explicit R600DAGToDAGISel(TargetMachine *TM, CodeGenOpt::Level OptLevel) :
324       AMDGPUDAGToDAGISel(TM, OptLevel) {}
325 
326   void Select(SDNode *N) override;
327 
328   bool SelectADDRIndirect(SDValue Addr, SDValue &Base,
329                           SDValue &Offset) override;
330   bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
331                           SDValue &Offset) override;
332 
333   bool runOnMachineFunction(MachineFunction &MF) override;
334 
335   void PreprocessISelDAG() override {}
336 
337 protected:
338   // Include the pieces autogenerated from the target description.
339 #include "R600GenDAGISel.inc"
340 };
341 
342 static SDValue stripBitcast(SDValue Val) {
343   return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val;
344 }
345 
346 // Figure out if this is really an extract of the high 16-bits of a dword.
347 static bool isExtractHiElt(SDValue In, SDValue &Out) {
348   In = stripBitcast(In);
349   if (In.getOpcode() != ISD::TRUNCATE)
350     return false;
351 
352   SDValue Srl = In.getOperand(0);
353   if (Srl.getOpcode() == ISD::SRL) {
354     if (ConstantSDNode *ShiftAmt = dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
355       if (ShiftAmt->getZExtValue() == 16) {
356         Out = stripBitcast(Srl.getOperand(0));
357         return true;
358       }
359     }
360   }
361 
362   return false;
363 }
364 
365 // Look through operations that obscure just looking at the low 16-bits of the
366 // same register.
367 static SDValue stripExtractLoElt(SDValue In) {
368   if (In.getOpcode() == ISD::TRUNCATE) {
369     SDValue Src = In.getOperand(0);
370     if (Src.getValueType().getSizeInBits() == 32)
371       return stripBitcast(Src);
372   }
373 
374   return In;
375 }
376 
377 }  // end anonymous namespace
378 
379 INITIALIZE_PASS_BEGIN(AMDGPUDAGToDAGISel, "amdgpu-isel",
380                       "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
381 INITIALIZE_PASS_DEPENDENCY(AMDGPUArgumentUsageInfo)
382 INITIALIZE_PASS_DEPENDENCY(AMDGPUPerfHintAnalysis)
383 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
384 #ifdef EXPENSIVE_CHECKS
385 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
386 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
387 #endif
388 INITIALIZE_PASS_END(AMDGPUDAGToDAGISel, "amdgpu-isel",
389                     "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
390 
391 /// This pass converts a legalized DAG into a AMDGPU-specific
392 // DAG, ready for instruction scheduling.
393 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine *TM,
394                                         CodeGenOpt::Level OptLevel) {
395   return new AMDGPUDAGToDAGISel(TM, OptLevel);
396 }
397 
398 /// This pass converts a legalized DAG into a R600-specific
399 // DAG, ready for instruction scheduling.
400 FunctionPass *llvm::createR600ISelDag(TargetMachine *TM,
401                                       CodeGenOpt::Level OptLevel) {
402   return new R600DAGToDAGISel(TM, OptLevel);
403 }
404 
405 bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
406 #ifdef EXPENSIVE_CHECKS
407   DominatorTree & DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
408   LoopInfo * LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
409   for (auto &L : LI->getLoopsInPreorder()) {
410     assert(L->isLCSSAForm(DT));
411   }
412 #endif
413   Subtarget = &MF.getSubtarget<GCNSubtarget>();
414   Mode = AMDGPU::SIModeRegisterDefaults(MF.getFunction());
415   return SelectionDAGISel::runOnMachineFunction(MF);
416 }
417 
418 bool AMDGPUDAGToDAGISel::matchLoadD16FromBuildVector(SDNode *N) const {
419   assert(Subtarget->d16PreservesUnusedBits());
420   MVT VT = N->getValueType(0).getSimpleVT();
421   if (VT != MVT::v2i16 && VT != MVT::v2f16)
422     return false;
423 
424   SDValue Lo = N->getOperand(0);
425   SDValue Hi = N->getOperand(1);
426 
427   LoadSDNode *LdHi = dyn_cast<LoadSDNode>(stripBitcast(Hi));
428 
429   // build_vector lo, (load ptr) -> load_d16_hi ptr, lo
430   // build_vector lo, (zextload ptr from i8) -> load_d16_hi_u8 ptr, lo
431   // build_vector lo, (sextload ptr from i8) -> load_d16_hi_i8 ptr, lo
432 
433   // Need to check for possible indirect dependencies on the other half of the
434   // vector to avoid introducing a cycle.
435   if (LdHi && Hi.hasOneUse() && !LdHi->isPredecessorOf(Lo.getNode())) {
436     SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
437 
438     SDValue TiedIn = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Lo);
439     SDValue Ops[] = {
440       LdHi->getChain(), LdHi->getBasePtr(), TiedIn
441     };
442 
443     unsigned LoadOp = AMDGPUISD::LOAD_D16_HI;
444     if (LdHi->getMemoryVT() == MVT::i8) {
445       LoadOp = LdHi->getExtensionType() == ISD::SEXTLOAD ?
446         AMDGPUISD::LOAD_D16_HI_I8 : AMDGPUISD::LOAD_D16_HI_U8;
447     } else {
448       assert(LdHi->getMemoryVT() == MVT::i16);
449     }
450 
451     SDValue NewLoadHi =
452       CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdHi), VTList,
453                                   Ops, LdHi->getMemoryVT(),
454                                   LdHi->getMemOperand());
455 
456     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadHi);
457     CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdHi, 1), NewLoadHi.getValue(1));
458     return true;
459   }
460 
461   // build_vector (load ptr), hi -> load_d16_lo ptr, hi
462   // build_vector (zextload ptr from i8), hi -> load_d16_lo_u8 ptr, hi
463   // build_vector (sextload ptr from i8), hi -> load_d16_lo_i8 ptr, hi
464   LoadSDNode *LdLo = dyn_cast<LoadSDNode>(stripBitcast(Lo));
465   if (LdLo && Lo.hasOneUse()) {
466     SDValue TiedIn = getHi16Elt(Hi);
467     if (!TiedIn || LdLo->isPredecessorOf(TiedIn.getNode()))
468       return false;
469 
470     SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
471     unsigned LoadOp = AMDGPUISD::LOAD_D16_LO;
472     if (LdLo->getMemoryVT() == MVT::i8) {
473       LoadOp = LdLo->getExtensionType() == ISD::SEXTLOAD ?
474         AMDGPUISD::LOAD_D16_LO_I8 : AMDGPUISD::LOAD_D16_LO_U8;
475     } else {
476       assert(LdLo->getMemoryVT() == MVT::i16);
477     }
478 
479     TiedIn = CurDAG->getNode(ISD::BITCAST, SDLoc(N), VT, TiedIn);
480 
481     SDValue Ops[] = {
482       LdLo->getChain(), LdLo->getBasePtr(), TiedIn
483     };
484 
485     SDValue NewLoadLo =
486       CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdLo), VTList,
487                                   Ops, LdLo->getMemoryVT(),
488                                   LdLo->getMemOperand());
489 
490     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadLo);
491     CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdLo, 1), NewLoadLo.getValue(1));
492     return true;
493   }
494 
495   return false;
496 }
497 
498 void AMDGPUDAGToDAGISel::PreprocessISelDAG() {
499   if (!Subtarget->d16PreservesUnusedBits())
500     return;
501 
502   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
503 
504   bool MadeChange = false;
505   while (Position != CurDAG->allnodes_begin()) {
506     SDNode *N = &*--Position;
507     if (N->use_empty())
508       continue;
509 
510     switch (N->getOpcode()) {
511     case ISD::BUILD_VECTOR:
512       MadeChange |= matchLoadD16FromBuildVector(N);
513       break;
514     default:
515       break;
516     }
517   }
518 
519   if (MadeChange) {
520     CurDAG->RemoveDeadNodes();
521     LLVM_DEBUG(dbgs() << "After PreProcess:\n";
522                CurDAG->dump(););
523   }
524 }
525 
526 bool AMDGPUDAGToDAGISel::isNoNanSrc(SDValue N) const {
527   if (TM.Options.NoNaNsFPMath)
528     return true;
529 
530   // TODO: Move into isKnownNeverNaN
531   if (N->getFlags().hasNoNaNs())
532     return true;
533 
534   return CurDAG->isKnownNeverNaN(N);
535 }
536 
537 bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N,
538                                            bool Negated) const {
539   if (N->isUndef())
540     return true;
541 
542   const SIInstrInfo *TII = Subtarget->getInstrInfo();
543   if (Negated) {
544     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
545       return TII->isInlineConstant(-C->getAPIntValue());
546 
547     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
548       return TII->isInlineConstant(-C->getValueAPF().bitcastToAPInt());
549 
550   } else {
551     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
552       return TII->isInlineConstant(C->getAPIntValue());
553 
554     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
555       return TII->isInlineConstant(C->getValueAPF().bitcastToAPInt());
556   }
557 
558   return false;
559 }
560 
561 /// Determine the register class for \p OpNo
562 /// \returns The register class of the virtual register that will be used for
563 /// the given operand number \OpNo or NULL if the register class cannot be
564 /// determined.
565 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
566                                                           unsigned OpNo) const {
567   if (!N->isMachineOpcode()) {
568     if (N->getOpcode() == ISD::CopyToReg) {
569       Register Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
570       if (Reg.isVirtual()) {
571         MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo();
572         return MRI.getRegClass(Reg);
573       }
574 
575       const SIRegisterInfo *TRI
576         = static_cast<const GCNSubtarget *>(Subtarget)->getRegisterInfo();
577       return TRI->getPhysRegClass(Reg);
578     }
579 
580     return nullptr;
581   }
582 
583   switch (N->getMachineOpcode()) {
584   default: {
585     const MCInstrDesc &Desc =
586         Subtarget->getInstrInfo()->get(N->getMachineOpcode());
587     unsigned OpIdx = Desc.getNumDefs() + OpNo;
588     if (OpIdx >= Desc.getNumOperands())
589       return nullptr;
590     int RegClass = Desc.OpInfo[OpIdx].RegClass;
591     if (RegClass == -1)
592       return nullptr;
593 
594     return Subtarget->getRegisterInfo()->getRegClass(RegClass);
595   }
596   case AMDGPU::REG_SEQUENCE: {
597     unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
598     const TargetRegisterClass *SuperRC =
599         Subtarget->getRegisterInfo()->getRegClass(RCID);
600 
601     SDValue SubRegOp = N->getOperand(OpNo + 1);
602     unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
603     return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
604                                                               SubRegIdx);
605   }
606   }
607 }
608 
609 SDNode *AMDGPUDAGToDAGISel::glueCopyToOp(SDNode *N, SDValue NewChain,
610                                          SDValue Glue) const {
611   SmallVector <SDValue, 8> Ops;
612   Ops.push_back(NewChain); // Replace the chain.
613   for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
614     Ops.push_back(N->getOperand(i));
615 
616   Ops.push_back(Glue);
617   return CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops);
618 }
619 
620 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N, SDValue Val) const {
621   const SITargetLowering& Lowering =
622     *static_cast<const SITargetLowering*>(getTargetLowering());
623 
624   assert(N->getOperand(0).getValueType() == MVT::Other && "Expected chain");
625 
626   SDValue M0 = Lowering.copyToM0(*CurDAG, N->getOperand(0), SDLoc(N), Val);
627   return glueCopyToOp(N, M0, M0.getValue(1));
628 }
629 
630 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0LDSInit(SDNode *N) const {
631   unsigned AS = cast<MemSDNode>(N)->getAddressSpace();
632   if (AS == AMDGPUAS::LOCAL_ADDRESS) {
633     if (Subtarget->ldsRequiresM0Init())
634       return glueCopyToM0(N, CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32));
635   } else if (AS == AMDGPUAS::REGION_ADDRESS) {
636     MachineFunction &MF = CurDAG->getMachineFunction();
637     unsigned Value = MF.getInfo<SIMachineFunctionInfo>()->getGDSSize();
638     return
639         glueCopyToM0(N, CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i32));
640   }
641   return N;
642 }
643 
644 MachineSDNode *AMDGPUDAGToDAGISel::buildSMovImm64(SDLoc &DL, uint64_t Imm,
645                                                   EVT VT) const {
646   SDNode *Lo = CurDAG->getMachineNode(
647       AMDGPU::S_MOV_B32, DL, MVT::i32,
648       CurDAG->getTargetConstant(Imm & 0xFFFFFFFF, DL, MVT::i32));
649   SDNode *Hi =
650       CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
651                              CurDAG->getTargetConstant(Imm >> 32, DL, MVT::i32));
652   const SDValue Ops[] = {
653       CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
654       SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
655       SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)};
656 
657   return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, VT, Ops);
658 }
659 
660 void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) {
661   EVT VT = N->getValueType(0);
662   unsigned NumVectorElts = VT.getVectorNumElements();
663   EVT EltVT = VT.getVectorElementType();
664   SDLoc DL(N);
665   SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
666 
667   if (NumVectorElts == 1) {
668     CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0),
669                          RegClass);
670     return;
671   }
672 
673   assert(NumVectorElts <= 32 && "Vectors with more than 32 elements not "
674                                   "supported yet");
675   // 32 = Max Num Vector Elements
676   // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
677   // 1 = Vector Register Class
678   SmallVector<SDValue, 32 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
679 
680   bool IsGCN = CurDAG->getSubtarget().getTargetTriple().getArch() ==
681                Triple::amdgcn;
682   RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
683   bool IsRegSeq = true;
684   unsigned NOps = N->getNumOperands();
685   for (unsigned i = 0; i < NOps; i++) {
686     // XXX: Why is this here?
687     if (isa<RegisterSDNode>(N->getOperand(i))) {
688       IsRegSeq = false;
689       break;
690     }
691     unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i)
692                          : R600RegisterInfo::getSubRegFromChannel(i);
693     RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
694     RegSeqArgs[1 + (2 * i) + 1] = CurDAG->getTargetConstant(Sub, DL, MVT::i32);
695   }
696   if (NOps != NumVectorElts) {
697     // Fill in the missing undef elements if this was a scalar_to_vector.
698     assert(N->getOpcode() == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
699     MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
700                                                    DL, EltVT);
701     for (unsigned i = NOps; i < NumVectorElts; ++i) {
702       unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i)
703                            : R600RegisterInfo::getSubRegFromChannel(i);
704       RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
705       RegSeqArgs[1 + (2 * i) + 1] =
706           CurDAG->getTargetConstant(Sub, DL, MVT::i32);
707     }
708   }
709 
710   if (!IsRegSeq)
711     SelectCode(N);
712   CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs);
713 }
714 
715 void AMDGPUDAGToDAGISel::Select(SDNode *N) {
716   unsigned int Opc = N->getOpcode();
717   if (N->isMachineOpcode()) {
718     N->setNodeId(-1);
719     return;   // Already selected.
720   }
721 
722   // isa<MemSDNode> almost works but is slightly too permissive for some DS
723   // intrinsics.
724   if (Opc == ISD::LOAD || Opc == ISD::STORE || isa<AtomicSDNode>(N) ||
725       (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC ||
726        Opc == ISD::ATOMIC_LOAD_FADD ||
727        Opc == AMDGPUISD::ATOMIC_LOAD_FMIN ||
728        Opc == AMDGPUISD::ATOMIC_LOAD_FMAX)) {
729     N = glueCopyToM0LDSInit(N);
730     SelectCode(N);
731     return;
732   }
733 
734   switch (Opc) {
735   default:
736     break;
737   // We are selecting i64 ADD here instead of custom lower it during
738   // DAG legalization, so we can fold some i64 ADDs used for address
739   // calculation into the LOAD and STORE instructions.
740   case ISD::ADDC:
741   case ISD::ADDE:
742   case ISD::SUBC:
743   case ISD::SUBE: {
744     if (N->getValueType(0) != MVT::i64)
745       break;
746 
747     SelectADD_SUB_I64(N);
748     return;
749   }
750   case ISD::ADDCARRY:
751   case ISD::SUBCARRY:
752     if (N->getValueType(0) != MVT::i32)
753       break;
754 
755     SelectAddcSubb(N);
756     return;
757   case ISD::UADDO:
758   case ISD::USUBO: {
759     SelectUADDO_USUBO(N);
760     return;
761   }
762   case AMDGPUISD::FMUL_W_CHAIN: {
763     SelectFMUL_W_CHAIN(N);
764     return;
765   }
766   case AMDGPUISD::FMA_W_CHAIN: {
767     SelectFMA_W_CHAIN(N);
768     return;
769   }
770 
771   case ISD::SCALAR_TO_VECTOR:
772   case ISD::BUILD_VECTOR: {
773     EVT VT = N->getValueType(0);
774     unsigned NumVectorElts = VT.getVectorNumElements();
775     if (VT.getScalarSizeInBits() == 16) {
776       if (Opc == ISD::BUILD_VECTOR && NumVectorElts == 2) {
777         if (SDNode *Packed = packConstantV2I16(N, *CurDAG)) {
778           ReplaceNode(N, Packed);
779           return;
780         }
781       }
782 
783       break;
784     }
785 
786     assert(VT.getVectorElementType().bitsEq(MVT::i32));
787     unsigned RegClassID =
788         SIRegisterInfo::getSGPRClassForBitWidth(NumVectorElts * 32)->getID();
789     SelectBuildVector(N, RegClassID);
790     return;
791   }
792   case ISD::BUILD_PAIR: {
793     SDValue RC, SubReg0, SubReg1;
794     SDLoc DL(N);
795     if (N->getValueType(0) == MVT::i128) {
796       RC = CurDAG->getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32);
797       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32);
798       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32);
799     } else if (N->getValueType(0) == MVT::i64) {
800       RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32);
801       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
802       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
803     } else {
804       llvm_unreachable("Unhandled value type for BUILD_PAIR");
805     }
806     const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
807                             N->getOperand(1), SubReg1 };
808     ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
809                                           N->getValueType(0), Ops));
810     return;
811   }
812 
813   case ISD::Constant:
814   case ISD::ConstantFP: {
815     if (N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
816       break;
817 
818     uint64_t Imm;
819     if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
820       Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
821     else {
822       ConstantSDNode *C = cast<ConstantSDNode>(N);
823       Imm = C->getZExtValue();
824     }
825 
826     SDLoc DL(N);
827     ReplaceNode(N, buildSMovImm64(DL, Imm, N->getValueType(0)));
828     return;
829   }
830   case AMDGPUISD::BFE_I32:
831   case AMDGPUISD::BFE_U32: {
832     // There is a scalar version available, but unlike the vector version which
833     // has a separate operand for the offset and width, the scalar version packs
834     // the width and offset into a single operand. Try to move to the scalar
835     // version if the offsets are constant, so that we can try to keep extended
836     // loads of kernel arguments in SGPRs.
837 
838     // TODO: Technically we could try to pattern match scalar bitshifts of
839     // dynamic values, but it's probably not useful.
840     ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
841     if (!Offset)
842       break;
843 
844     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
845     if (!Width)
846       break;
847 
848     bool Signed = Opc == AMDGPUISD::BFE_I32;
849 
850     uint32_t OffsetVal = Offset->getZExtValue();
851     uint32_t WidthVal = Width->getZExtValue();
852 
853     ReplaceNode(N, getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
854                             SDLoc(N), N->getOperand(0), OffsetVal, WidthVal));
855     return;
856   }
857   case AMDGPUISD::DIV_SCALE: {
858     SelectDIV_SCALE(N);
859     return;
860   }
861   case AMDGPUISD::MAD_I64_I32:
862   case AMDGPUISD::MAD_U64_U32: {
863     SelectMAD_64_32(N);
864     return;
865   }
866   case ISD::CopyToReg: {
867     const SITargetLowering& Lowering =
868       *static_cast<const SITargetLowering*>(getTargetLowering());
869     N = Lowering.legalizeTargetIndependentNode(N, *CurDAG);
870     break;
871   }
872   case ISD::AND:
873   case ISD::SRL:
874   case ISD::SRA:
875   case ISD::SIGN_EXTEND_INREG:
876     if (N->getValueType(0) != MVT::i32)
877       break;
878 
879     SelectS_BFE(N);
880     return;
881   case ISD::BRCOND:
882     SelectBRCOND(N);
883     return;
884   case ISD::FMAD:
885   case ISD::FMA:
886     SelectFMAD_FMA(N);
887     return;
888   case AMDGPUISD::ATOMIC_CMP_SWAP:
889     SelectATOMIC_CMP_SWAP(N);
890     return;
891   case AMDGPUISD::CVT_PKRTZ_F16_F32:
892   case AMDGPUISD::CVT_PKNORM_I16_F32:
893   case AMDGPUISD::CVT_PKNORM_U16_F32:
894   case AMDGPUISD::CVT_PK_U16_U32:
895   case AMDGPUISD::CVT_PK_I16_I32: {
896     // Hack around using a legal type if f16 is illegal.
897     if (N->getValueType(0) == MVT::i32) {
898       MVT NewVT = Opc == AMDGPUISD::CVT_PKRTZ_F16_F32 ? MVT::v2f16 : MVT::v2i16;
899       N = CurDAG->MorphNodeTo(N, N->getOpcode(), CurDAG->getVTList(NewVT),
900                               { N->getOperand(0), N->getOperand(1) });
901       SelectCode(N);
902       return;
903     }
904 
905     break;
906   }
907   case ISD::INTRINSIC_W_CHAIN: {
908     SelectINTRINSIC_W_CHAIN(N);
909     return;
910   }
911   case ISD::INTRINSIC_WO_CHAIN: {
912     SelectINTRINSIC_WO_CHAIN(N);
913     return;
914   }
915   case ISD::INTRINSIC_VOID: {
916     SelectINTRINSIC_VOID(N);
917     return;
918   }
919   }
920 
921   SelectCode(N);
922 }
923 
924 bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const {
925   const BasicBlock *BB = FuncInfo->MBB->getBasicBlock();
926   const Instruction *Term = BB->getTerminator();
927   return Term->getMetadata("amdgpu.uniform") ||
928          Term->getMetadata("structurizecfg.uniform");
929 }
930 
931 StringRef AMDGPUDAGToDAGISel::getPassName() const {
932   return "AMDGPU DAG->DAG Pattern Instruction Selection";
933 }
934 
935 //===----------------------------------------------------------------------===//
936 // Complex Patterns
937 //===----------------------------------------------------------------------===//
938 
939 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
940                                             SDValue &Offset) {
941   return false;
942 }
943 
944 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
945                                             SDValue &Offset) {
946   ConstantSDNode *C;
947   SDLoc DL(Addr);
948 
949   if ((C = dyn_cast<ConstantSDNode>(Addr))) {
950     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
951     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
952   } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
953              (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
954     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
955     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
956   } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
957             (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
958     Base = Addr.getOperand(0);
959     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
960   } else {
961     Base = Addr;
962     Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
963   }
964 
965   return true;
966 }
967 
968 SDValue AMDGPUDAGToDAGISel::getMaterializedScalarImm32(int64_t Val,
969                                                        const SDLoc &DL) const {
970   SDNode *Mov = CurDAG->getMachineNode(
971     AMDGPU::S_MOV_B32, DL, MVT::i32,
972     CurDAG->getTargetConstant(Val, DL, MVT::i32));
973   return SDValue(Mov, 0);
974 }
975 
976 // FIXME: Should only handle addcarry/subcarry
977 void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
978   SDLoc DL(N);
979   SDValue LHS = N->getOperand(0);
980   SDValue RHS = N->getOperand(1);
981 
982   unsigned Opcode = N->getOpcode();
983   bool ConsumeCarry = (Opcode == ISD::ADDE || Opcode == ISD::SUBE);
984   bool ProduceCarry =
985       ConsumeCarry || Opcode == ISD::ADDC || Opcode == ISD::SUBC;
986   bool IsAdd = Opcode == ISD::ADD || Opcode == ISD::ADDC || Opcode == ISD::ADDE;
987 
988   SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
989   SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
990 
991   SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
992                                        DL, MVT::i32, LHS, Sub0);
993   SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
994                                        DL, MVT::i32, LHS, Sub1);
995 
996   SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
997                                        DL, MVT::i32, RHS, Sub0);
998   SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
999                                        DL, MVT::i32, RHS, Sub1);
1000 
1001   SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
1002 
1003   static const unsigned OpcMap[2][2][2] = {
1004       {{AMDGPU::S_SUB_U32, AMDGPU::S_ADD_U32},
1005        {AMDGPU::V_SUB_CO_U32_e32, AMDGPU::V_ADD_CO_U32_e32}},
1006       {{AMDGPU::S_SUBB_U32, AMDGPU::S_ADDC_U32},
1007        {AMDGPU::V_SUBB_U32_e32, AMDGPU::V_ADDC_U32_e32}}};
1008 
1009   unsigned Opc = OpcMap[0][N->isDivergent()][IsAdd];
1010   unsigned CarryOpc = OpcMap[1][N->isDivergent()][IsAdd];
1011 
1012   SDNode *AddLo;
1013   if (!ConsumeCarry) {
1014     SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
1015     AddLo = CurDAG->getMachineNode(Opc, DL, VTList, Args);
1016   } else {
1017     SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0), N->getOperand(2) };
1018     AddLo = CurDAG->getMachineNode(CarryOpc, DL, VTList, Args);
1019   }
1020   SDValue AddHiArgs[] = {
1021     SDValue(Hi0, 0),
1022     SDValue(Hi1, 0),
1023     SDValue(AddLo, 1)
1024   };
1025   SDNode *AddHi = CurDAG->getMachineNode(CarryOpc, DL, VTList, AddHiArgs);
1026 
1027   SDValue RegSequenceArgs[] = {
1028     CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
1029     SDValue(AddLo,0),
1030     Sub0,
1031     SDValue(AddHi,0),
1032     Sub1,
1033   };
1034   SDNode *RegSequence = CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
1035                                                MVT::i64, RegSequenceArgs);
1036 
1037   if (ProduceCarry) {
1038     // Replace the carry-use
1039     ReplaceUses(SDValue(N, 1), SDValue(AddHi, 1));
1040   }
1041 
1042   // Replace the remaining uses.
1043   ReplaceNode(N, RegSequence);
1044 }
1045 
1046 void AMDGPUDAGToDAGISel::SelectAddcSubb(SDNode *N) {
1047   SDLoc DL(N);
1048   SDValue LHS = N->getOperand(0);
1049   SDValue RHS = N->getOperand(1);
1050   SDValue CI = N->getOperand(2);
1051 
1052   if (N->isDivergent()) {
1053     unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::V_ADDC_U32_e64
1054                                                    : AMDGPU::V_SUBB_U32_e64;
1055     CurDAG->SelectNodeTo(
1056         N, Opc, N->getVTList(),
1057         {LHS, RHS, CI,
1058          CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
1059   } else {
1060     unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::S_ADD_CO_PSEUDO
1061                                                    : AMDGPU::S_SUB_CO_PSEUDO;
1062     CurDAG->SelectNodeTo(N, Opc, N->getVTList(), {LHS, RHS, CI});
1063   }
1064 }
1065 
1066 void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) {
1067   // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
1068   // carry out despite the _i32 name. These were renamed in VI to _U32.
1069   // FIXME: We should probably rename the opcodes here.
1070   bool IsAdd = N->getOpcode() == ISD::UADDO;
1071   bool IsVALU = N->isDivergent();
1072 
1073   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); UI != E;
1074        ++UI)
1075     if (UI.getUse().getResNo() == 1) {
1076       if ((IsAdd && (UI->getOpcode() != ISD::ADDCARRY)) ||
1077           (!IsAdd && (UI->getOpcode() != ISD::SUBCARRY))) {
1078         IsVALU = true;
1079         break;
1080       }
1081     }
1082 
1083   if (IsVALU) {
1084     unsigned Opc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
1085 
1086     CurDAG->SelectNodeTo(
1087         N, Opc, N->getVTList(),
1088         {N->getOperand(0), N->getOperand(1),
1089          CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
1090   } else {
1091     unsigned Opc = N->getOpcode() == ISD::UADDO ? AMDGPU::S_UADDO_PSEUDO
1092                                                 : AMDGPU::S_USUBO_PSEUDO;
1093 
1094     CurDAG->SelectNodeTo(N, Opc, N->getVTList(),
1095                          {N->getOperand(0), N->getOperand(1)});
1096   }
1097 }
1098 
1099 void AMDGPUDAGToDAGISel::SelectFMA_W_CHAIN(SDNode *N) {
1100   SDLoc SL(N);
1101   //  src0_modifiers, src0,  src1_modifiers, src1, src2_modifiers, src2, clamp, omod
1102   SDValue Ops[10];
1103 
1104   SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[6], Ops[7]);
1105   SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
1106   SelectVOP3Mods(N->getOperand(3), Ops[5], Ops[4]);
1107   Ops[8] = N->getOperand(0);
1108   Ops[9] = N->getOperand(4);
1109 
1110   CurDAG->SelectNodeTo(N, AMDGPU::V_FMA_F32, N->getVTList(), Ops);
1111 }
1112 
1113 void AMDGPUDAGToDAGISel::SelectFMUL_W_CHAIN(SDNode *N) {
1114   SDLoc SL(N);
1115   //    src0_modifiers, src0,  src1_modifiers, src1, clamp, omod
1116   SDValue Ops[8];
1117 
1118   SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[4], Ops[5]);
1119   SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
1120   Ops[6] = N->getOperand(0);
1121   Ops[7] = N->getOperand(3);
1122 
1123   CurDAG->SelectNodeTo(N, AMDGPU::V_MUL_F32_e64, N->getVTList(), Ops);
1124 }
1125 
1126 // We need to handle this here because tablegen doesn't support matching
1127 // instructions with multiple outputs.
1128 void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
1129   SDLoc SL(N);
1130   EVT VT = N->getValueType(0);
1131 
1132   assert(VT == MVT::f32 || VT == MVT::f64);
1133 
1134   unsigned Opc
1135     = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
1136 
1137   // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp,
1138   // omod
1139   SDValue Ops[8];
1140   SelectVOP3BMods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
1141   SelectVOP3BMods(N->getOperand(1), Ops[3], Ops[2]);
1142   SelectVOP3BMods(N->getOperand(2), Ops[5], Ops[4]);
1143   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1144 }
1145 
1146 // We need to handle this here because tablegen doesn't support matching
1147 // instructions with multiple outputs.
1148 void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) {
1149   SDLoc SL(N);
1150   bool Signed = N->getOpcode() == AMDGPUISD::MAD_I64_I32;
1151   unsigned Opc = Signed ? AMDGPU::V_MAD_I64_I32 : AMDGPU::V_MAD_U64_U32;
1152 
1153   SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1);
1154   SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
1155                     Clamp };
1156   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1157 }
1158 
1159 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(SDValue Base, unsigned Offset) const {
1160   if (!isUInt<16>(Offset))
1161     return false;
1162 
1163   if (!Base || Subtarget->hasUsableDSOffset() ||
1164       Subtarget->unsafeDSOffsetFoldingEnabled())
1165     return true;
1166 
1167   // On Southern Islands instruction with a negative base value and an offset
1168   // don't seem to work.
1169   return CurDAG->SignBitIsZero(Base);
1170 }
1171 
1172 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
1173                                               SDValue &Offset) const {
1174   SDLoc DL(Addr);
1175   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1176     SDValue N0 = Addr.getOperand(0);
1177     SDValue N1 = Addr.getOperand(1);
1178     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1179     if (isDSOffsetLegal(N0, C1->getSExtValue())) {
1180       // (add n0, c0)
1181       Base = N0;
1182       Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1183       return true;
1184     }
1185   } else if (Addr.getOpcode() == ISD::SUB) {
1186     // sub C, x -> add (sub 0, x), C
1187     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1188       int64_t ByteOffset = C->getSExtValue();
1189       if (isDSOffsetLegal(SDValue(), ByteOffset)) {
1190         SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1191 
1192         // XXX - This is kind of hacky. Create a dummy sub node so we can check
1193         // the known bits in isDSOffsetLegal. We need to emit the selected node
1194         // here, so this is thrown away.
1195         SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
1196                                       Zero, Addr.getOperand(1));
1197 
1198         if (isDSOffsetLegal(Sub, ByteOffset)) {
1199           SmallVector<SDValue, 3> Opnds;
1200           Opnds.push_back(Zero);
1201           Opnds.push_back(Addr.getOperand(1));
1202 
1203           // FIXME: Select to VOP3 version for with-carry.
1204           unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32;
1205           if (Subtarget->hasAddNoCarry()) {
1206             SubOp = AMDGPU::V_SUB_U32_e64;
1207             Opnds.push_back(
1208                 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
1209           }
1210 
1211           MachineSDNode *MachineSub =
1212               CurDAG->getMachineNode(SubOp, DL, MVT::i32, Opnds);
1213 
1214           Base = SDValue(MachineSub, 0);
1215           Offset = CurDAG->getTargetConstant(ByteOffset, DL, MVT::i16);
1216           return true;
1217         }
1218       }
1219     }
1220   } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1221     // If we have a constant address, prefer to put the constant into the
1222     // offset. This can save moves to load the constant address since multiple
1223     // operations can share the zero base address register, and enables merging
1224     // into read2 / write2 instructions.
1225 
1226     SDLoc DL(Addr);
1227 
1228     if (isDSOffsetLegal(SDValue(), CAddr->getZExtValue())) {
1229       SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1230       MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
1231                                  DL, MVT::i32, Zero);
1232       Base = SDValue(MovZero, 0);
1233       Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1234       return true;
1235     }
1236   }
1237 
1238   // default case
1239   Base = Addr;
1240   Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i16);
1241   return true;
1242 }
1243 
1244 bool AMDGPUDAGToDAGISel::isDSOffset2Legal(SDValue Base, unsigned Offset0,
1245                                           unsigned Offset1,
1246                                           unsigned Size) const {
1247   if (Offset0 % Size != 0 || Offset1 % Size != 0)
1248     return false;
1249   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
1250     return false;
1251 
1252   if (!Base || Subtarget->hasUsableDSOffset() ||
1253       Subtarget->unsafeDSOffsetFoldingEnabled())
1254     return true;
1255 
1256   // On Southern Islands instruction with a negative base value and an offset
1257   // don't seem to work.
1258   return CurDAG->SignBitIsZero(Base);
1259 }
1260 
1261 // TODO: If offset is too big, put low 16-bit into offset.
1262 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
1263                                                    SDValue &Offset0,
1264                                                    SDValue &Offset1) const {
1265   return SelectDSReadWrite2(Addr, Base, Offset0, Offset1, 4);
1266 }
1267 
1268 bool AMDGPUDAGToDAGISel::SelectDS128Bit8ByteAligned(SDValue Addr, SDValue &Base,
1269                                                     SDValue &Offset0,
1270                                                     SDValue &Offset1) const {
1271   return SelectDSReadWrite2(Addr, Base, Offset0, Offset1, 8);
1272 }
1273 
1274 bool AMDGPUDAGToDAGISel::SelectDSReadWrite2(SDValue Addr, SDValue &Base,
1275                                             SDValue &Offset0, SDValue &Offset1,
1276                                             unsigned Size) const {
1277   SDLoc DL(Addr);
1278 
1279   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1280     SDValue N0 = Addr.getOperand(0);
1281     SDValue N1 = Addr.getOperand(1);
1282     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1283     unsigned OffsetValue0 = C1->getZExtValue();
1284     unsigned OffsetValue1 = OffsetValue0 + Size;
1285 
1286     // (add n0, c0)
1287     if (isDSOffset2Legal(N0, OffsetValue0, OffsetValue1, Size)) {
1288       Base = N0;
1289       Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1290       Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1291       return true;
1292     }
1293   } else if (Addr.getOpcode() == ISD::SUB) {
1294     // sub C, x -> add (sub 0, x), C
1295     if (const ConstantSDNode *C =
1296             dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1297       unsigned OffsetValue0 = C->getZExtValue();
1298       unsigned OffsetValue1 = OffsetValue0 + Size;
1299 
1300       if (isDSOffset2Legal(SDValue(), OffsetValue0, OffsetValue1, Size)) {
1301         SDLoc DL(Addr);
1302         SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1303 
1304         // XXX - This is kind of hacky. Create a dummy sub node so we can check
1305         // the known bits in isDSOffsetLegal. We need to emit the selected node
1306         // here, so this is thrown away.
1307         SDValue Sub =
1308             CurDAG->getNode(ISD::SUB, DL, MVT::i32, Zero, Addr.getOperand(1));
1309 
1310         if (isDSOffset2Legal(Sub, OffsetValue0, OffsetValue1, Size)) {
1311           SmallVector<SDValue, 3> Opnds;
1312           Opnds.push_back(Zero);
1313           Opnds.push_back(Addr.getOperand(1));
1314           unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32;
1315           if (Subtarget->hasAddNoCarry()) {
1316             SubOp = AMDGPU::V_SUB_U32_e64;
1317             Opnds.push_back(
1318                 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
1319           }
1320 
1321           MachineSDNode *MachineSub = CurDAG->getMachineNode(
1322               SubOp, DL, MVT::getIntegerVT(Size * 8), Opnds);
1323 
1324           Base = SDValue(MachineSub, 0);
1325           Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1326           Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1327           return true;
1328         }
1329       }
1330     }
1331   } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1332     unsigned OffsetValue0 = CAddr->getZExtValue();
1333     unsigned OffsetValue1 = OffsetValue0 + Size;
1334 
1335     if (isDSOffset2Legal(SDValue(), OffsetValue0, OffsetValue1, Size)) {
1336       SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1337       MachineSDNode *MovZero =
1338           CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32, Zero);
1339       Base = SDValue(MovZero, 0);
1340       Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1341       Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1342       return true;
1343     }
1344   }
1345 
1346   // default case
1347 
1348   Base = Addr;
1349   Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
1350   Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);
1351   return true;
1352 }
1353 
1354 bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
1355                                      SDValue &VAddr, SDValue &SOffset,
1356                                      SDValue &Offset, SDValue &Offen,
1357                                      SDValue &Idxen, SDValue &Addr64,
1358                                      SDValue &GLC, SDValue &SLC,
1359                                      SDValue &TFE, SDValue &DLC,
1360                                      SDValue &SWZ) const {
1361   // Subtarget prefers to use flat instruction
1362   // FIXME: This should be a pattern predicate and not reach here
1363   if (Subtarget->useFlatForGlobal())
1364     return false;
1365 
1366   SDLoc DL(Addr);
1367 
1368   if (!GLC.getNode())
1369     GLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
1370   if (!SLC.getNode())
1371     SLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
1372   TFE = CurDAG->getTargetConstant(0, DL, MVT::i1);
1373   DLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
1374   SWZ = CurDAG->getTargetConstant(0, DL, MVT::i1);
1375 
1376   Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1377   Offen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1378   Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1);
1379   SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1380 
1381   ConstantSDNode *C1 = nullptr;
1382   SDValue N0 = Addr;
1383   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1384     C1 = cast<ConstantSDNode>(Addr.getOperand(1));
1385     if (isUInt<32>(C1->getZExtValue()))
1386       N0 = Addr.getOperand(0);
1387     else
1388       C1 = nullptr;
1389   }
1390 
1391   if (N0.getOpcode() == ISD::ADD) {
1392     // (add N2, N3) -> addr64, or
1393     // (add (add N2, N3), C1) -> addr64
1394     SDValue N2 = N0.getOperand(0);
1395     SDValue N3 = N0.getOperand(1);
1396     Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1397 
1398     if (N2->isDivergent()) {
1399       if (N3->isDivergent()) {
1400         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
1401         // addr64, and construct the resource from a 0 address.
1402         Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1403         VAddr = N0;
1404       } else {
1405         // N2 is divergent, N3 is not.
1406         Ptr = N3;
1407         VAddr = N2;
1408       }
1409     } else {
1410       // N2 is not divergent.
1411       Ptr = N2;
1412       VAddr = N3;
1413     }
1414     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1415   } else if (N0->isDivergent()) {
1416     // N0 is divergent. Use it as the addr64, and construct the resource from a
1417     // 0 address.
1418     Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1419     VAddr = N0;
1420     Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1421   } else {
1422     // N0 -> offset, or
1423     // (N0 + C1) -> offset
1424     VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
1425     Ptr = N0;
1426   }
1427 
1428   if (!C1) {
1429     // No offset.
1430     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1431     return true;
1432   }
1433 
1434   if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue())) {
1435     // Legal offset for instruction.
1436     Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1437     return true;
1438   }
1439 
1440   // Illegal offset, store it in soffset.
1441   Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1442   SOffset =
1443       SDValue(CurDAG->getMachineNode(
1444                   AMDGPU::S_MOV_B32, DL, MVT::i32,
1445                   CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)),
1446               0);
1447   return true;
1448 }
1449 
1450 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
1451                                            SDValue &VAddr, SDValue &SOffset,
1452                                            SDValue &Offset, SDValue &GLC,
1453                                            SDValue &SLC, SDValue &TFE,
1454                                            SDValue &DLC, SDValue &SWZ) const {
1455   SDValue Ptr, Offen, Idxen, Addr64;
1456 
1457   // addr64 bit was removed for volcanic islands.
1458   // FIXME: This should be a pattern predicate and not reach here
1459   if (!Subtarget->hasAddr64())
1460     return false;
1461 
1462   if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
1463               GLC, SLC, TFE, DLC, SWZ))
1464     return false;
1465 
1466   ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
1467   if (C->getSExtValue()) {
1468     SDLoc DL(Addr);
1469 
1470     const SITargetLowering& Lowering =
1471       *static_cast<const SITargetLowering*>(getTargetLowering());
1472 
1473     SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0);
1474     return true;
1475   }
1476 
1477   return false;
1478 }
1479 
1480 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
1481                                            SDValue &VAddr, SDValue &SOffset,
1482                                            SDValue &Offset,
1483                                            SDValue &SLC) const {
1484   SLC = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i1);
1485   SDValue GLC, TFE, DLC, SWZ;
1486 
1487   return SelectMUBUFAddr64(Addr, SRsrc, VAddr, SOffset, Offset, GLC, SLC, TFE, DLC, SWZ);
1488 }
1489 
1490 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
1491   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
1492   return PSV && PSV->isStack();
1493 }
1494 
1495 std::pair<SDValue, SDValue> AMDGPUDAGToDAGISel::foldFrameIndex(SDValue N) const {
1496   SDLoc DL(N);
1497 
1498   auto *FI = dyn_cast<FrameIndexSDNode>(N);
1499   SDValue TFI =
1500       FI ? CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0)) : N;
1501 
1502   // We rebase the base address into an absolute stack address and hence
1503   // use constant 0 for soffset.
1504   return std::make_pair(TFI, CurDAG->getTargetConstant(0, DL, MVT::i32));
1505 }
1506 
1507 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(SDNode *Parent,
1508                                                  SDValue Addr, SDValue &Rsrc,
1509                                                  SDValue &VAddr, SDValue &SOffset,
1510                                                  SDValue &ImmOffset) const {
1511 
1512   SDLoc DL(Addr);
1513   MachineFunction &MF = CurDAG->getMachineFunction();
1514   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1515 
1516   Rsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1517 
1518   if (ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1519     int64_t Imm = CAddr->getSExtValue();
1520     const int64_t NullPtr =
1521         AMDGPUTargetMachine::getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS);
1522     // Don't fold null pointer.
1523     if (Imm != NullPtr) {
1524       SDValue HighBits = CurDAG->getTargetConstant(Imm & ~4095, DL, MVT::i32);
1525       MachineSDNode *MovHighBits = CurDAG->getMachineNode(
1526         AMDGPU::V_MOV_B32_e32, DL, MVT::i32, HighBits);
1527       VAddr = SDValue(MovHighBits, 0);
1528 
1529       // In a call sequence, stores to the argument stack area are relative to the
1530       // stack pointer.
1531       const MachinePointerInfo &PtrInfo
1532         = cast<MemSDNode>(Parent)->getPointerInfo();
1533       SOffset = isStackPtrRelative(PtrInfo)
1534         ? CurDAG->getRegister(Info->getStackPtrOffsetReg(), MVT::i32)
1535         : CurDAG->getTargetConstant(0, DL, MVT::i32);
1536       ImmOffset = CurDAG->getTargetConstant(Imm & 4095, DL, MVT::i16);
1537       return true;
1538     }
1539   }
1540 
1541   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1542     // (add n0, c1)
1543 
1544     SDValue N0 = Addr.getOperand(0);
1545     SDValue N1 = Addr.getOperand(1);
1546 
1547     // Offsets in vaddr must be positive if range checking is enabled.
1548     //
1549     // The total computation of vaddr + soffset + offset must not overflow.  If
1550     // vaddr is negative, even if offset is 0 the sgpr offset add will end up
1551     // overflowing.
1552     //
1553     // Prior to gfx9, MUBUF instructions with the vaddr offset enabled would
1554     // always perform a range check. If a negative vaddr base index was used,
1555     // this would fail the range check. The overall address computation would
1556     // compute a valid address, but this doesn't happen due to the range
1557     // check. For out-of-bounds MUBUF loads, a 0 is returned.
1558     //
1559     // Therefore it should be safe to fold any VGPR offset on gfx9 into the
1560     // MUBUF vaddr, but not on older subtargets which can only do this if the
1561     // sign bit is known 0.
1562     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1563     if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue()) &&
1564         (!Subtarget->privateMemoryResourceIsRangeChecked() ||
1565          CurDAG->SignBitIsZero(N0))) {
1566       std::tie(VAddr, SOffset) = foldFrameIndex(N0);
1567       ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1568       return true;
1569     }
1570   }
1571 
1572   // (node)
1573   std::tie(VAddr, SOffset) = foldFrameIndex(Addr);
1574   ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1575   return true;
1576 }
1577 
1578 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffset(SDNode *Parent,
1579                                                   SDValue Addr,
1580                                                   SDValue &SRsrc,
1581                                                   SDValue &SOffset,
1582                                                   SDValue &Offset) const {
1583   ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr);
1584   if (!CAddr || !SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue()))
1585     return false;
1586 
1587   SDLoc DL(Addr);
1588   MachineFunction &MF = CurDAG->getMachineFunction();
1589   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1590 
1591   SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1592 
1593   const MachinePointerInfo &PtrInfo = cast<MemSDNode>(Parent)->getPointerInfo();
1594 
1595   // FIXME: Get from MachinePointerInfo? We should only be using the frame
1596   // offset if we know this is in a call sequence.
1597   SOffset = isStackPtrRelative(PtrInfo)
1598                 ? CurDAG->getRegister(Info->getStackPtrOffsetReg(), MVT::i32)
1599                 : CurDAG->getTargetConstant(0, DL, MVT::i32);
1600 
1601   Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1602   return true;
1603 }
1604 
1605 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1606                                            SDValue &SOffset, SDValue &Offset,
1607                                            SDValue &GLC, SDValue &SLC,
1608                                            SDValue &TFE, SDValue &DLC,
1609                                            SDValue &SWZ) const {
1610   SDValue Ptr, VAddr, Offen, Idxen, Addr64;
1611   const SIInstrInfo *TII =
1612     static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
1613 
1614   if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
1615               GLC, SLC, TFE, DLC, SWZ))
1616     return false;
1617 
1618   if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
1619       !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
1620       !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
1621     uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
1622                     APInt::getAllOnesValue(32).getZExtValue(); // Size
1623     SDLoc DL(Addr);
1624 
1625     const SITargetLowering& Lowering =
1626       *static_cast<const SITargetLowering*>(getTargetLowering());
1627 
1628     SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0);
1629     return true;
1630   }
1631   return false;
1632 }
1633 
1634 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1635                                            SDValue &Soffset, SDValue &Offset
1636                                            ) const {
1637   SDValue GLC, SLC, TFE, DLC, SWZ;
1638 
1639   return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE, DLC, SWZ);
1640 }
1641 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1642                                            SDValue &Soffset, SDValue &Offset,
1643                                            SDValue &SLC) const {
1644   SDValue GLC, TFE, DLC, SWZ;
1645 
1646   return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE, DLC, SWZ);
1647 }
1648 
1649 // Find a load or store from corresponding pattern root.
1650 // Roots may be build_vector, bitconvert or their combinations.
1651 static MemSDNode* findMemSDNode(SDNode *N) {
1652   N = AMDGPUTargetLowering::stripBitcast(SDValue(N,0)).getNode();
1653   if (MemSDNode *MN = dyn_cast<MemSDNode>(N))
1654     return MN;
1655   assert(isa<BuildVectorSDNode>(N));
1656   for (SDValue V : N->op_values())
1657     if (MemSDNode *MN =
1658           dyn_cast<MemSDNode>(AMDGPUTargetLowering::stripBitcast(V)))
1659       return MN;
1660   llvm_unreachable("cannot find MemSDNode in the pattern!");
1661 }
1662 
1663 static bool getBaseWithOffsetUsingSplitOR(SelectionDAG &DAG, SDValue Addr,
1664                                           SDValue &N0, SDValue &N1) {
1665   if (Addr.getValueType() == MVT::i64 && Addr.getOpcode() == ISD::BITCAST &&
1666       Addr.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
1667     // As we split 64-bit `or` earlier, it's complicated pattern to match, i.e.
1668     // (i64 (bitcast (v2i32 (build_vector
1669     //                        (or (extract_vector_elt V, 0), OFFSET),
1670     //                        (extract_vector_elt V, 1)))))
1671     SDValue Lo = Addr.getOperand(0).getOperand(0);
1672     if (Lo.getOpcode() == ISD::OR && DAG.isBaseWithConstantOffset(Lo)) {
1673       SDValue BaseLo = Lo.getOperand(0);
1674       SDValue BaseHi = Addr.getOperand(0).getOperand(1);
1675       // Check that split base (Lo and Hi) are extracted from the same one.
1676       if (BaseLo.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
1677           BaseHi.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
1678           BaseLo.getOperand(0) == BaseHi.getOperand(0) &&
1679           // Lo is statically extracted from index 0.
1680           isa<ConstantSDNode>(BaseLo.getOperand(1)) &&
1681           BaseLo.getConstantOperandVal(1) == 0 &&
1682           // Hi is statically extracted from index 0.
1683           isa<ConstantSDNode>(BaseHi.getOperand(1)) &&
1684           BaseHi.getConstantOperandVal(1) == 1) {
1685         N0 = BaseLo.getOperand(0).getOperand(0);
1686         N1 = Lo.getOperand(1);
1687         return true;
1688       }
1689     }
1690   }
1691   return false;
1692 }
1693 
1694 template <bool IsSigned>
1695 bool AMDGPUDAGToDAGISel::SelectFlatOffset(SDNode *N,
1696                                           SDValue Addr,
1697                                           SDValue &VAddr,
1698                                           SDValue &Offset) const {
1699   int64_t OffsetVal = 0;
1700 
1701   unsigned AS = findMemSDNode(N)->getAddressSpace();
1702 
1703   if (Subtarget->hasFlatInstOffsets() &&
1704       (!Subtarget->hasFlatSegmentOffsetBug() ||
1705        AS != AMDGPUAS::FLAT_ADDRESS)) {
1706     SDValue N0, N1;
1707     if (CurDAG->isBaseWithConstantOffset(Addr)) {
1708       N0 = Addr.getOperand(0);
1709       N1 = Addr.getOperand(1);
1710     } else if (getBaseWithOffsetUsingSplitOR(*CurDAG, Addr, N0, N1)) {
1711       assert(N0 && N1 && isa<ConstantSDNode>(N1));
1712     }
1713     if (N0 && N1) {
1714       uint64_t COffsetVal = cast<ConstantSDNode>(N1)->getSExtValue();
1715 
1716       const SIInstrInfo *TII = Subtarget->getInstrInfo();
1717       if (TII->isLegalFLATOffset(COffsetVal, AS, IsSigned)) {
1718         Addr = N0;
1719         OffsetVal = COffsetVal;
1720       } else {
1721         // If the offset doesn't fit, put the low bits into the offset field and
1722         // add the rest.
1723         //
1724         // For a FLAT instruction the hardware decides whether to access
1725         // global/scratch/shared memory based on the high bits of vaddr,
1726         // ignoring the offset field, so we have to ensure that when we add
1727         // remainder to vaddr it still points into the same underlying object.
1728         // The easiest way to do that is to make sure that we split the offset
1729         // into two pieces that are both >= 0 or both <= 0.
1730 
1731         SDLoc DL(N);
1732         uint64_t RemainderOffset = COffsetVal;
1733         uint64_t ImmField = 0;
1734         const unsigned NumBits = TII->getNumFlatOffsetBits(IsSigned);
1735         if (IsSigned) {
1736           // Use signed division by a power of two to truncate towards 0.
1737           int64_t D = 1LL << (NumBits - 1);
1738           RemainderOffset = (static_cast<int64_t>(COffsetVal) / D) * D;
1739           ImmField = COffsetVal - RemainderOffset;
1740         } else if (static_cast<int64_t>(COffsetVal) >= 0) {
1741           ImmField = COffsetVal & maskTrailingOnes<uint64_t>(NumBits);
1742           RemainderOffset = COffsetVal - ImmField;
1743         }
1744         assert(TII->isLegalFLATOffset(ImmField, AS, IsSigned));
1745         assert(RemainderOffset + ImmField == COffsetVal);
1746 
1747         OffsetVal = ImmField;
1748 
1749         SDValue AddOffsetLo =
1750             getMaterializedScalarImm32(Lo_32(RemainderOffset), DL);
1751         SDValue Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
1752 
1753         if (Addr.getValueType().getSizeInBits() == 32) {
1754           SmallVector<SDValue, 3> Opnds;
1755           Opnds.push_back(N0);
1756           Opnds.push_back(AddOffsetLo);
1757           unsigned AddOp = AMDGPU::V_ADD_CO_U32_e32;
1758           if (Subtarget->hasAddNoCarry()) {
1759             AddOp = AMDGPU::V_ADD_U32_e64;
1760             Opnds.push_back(Clamp);
1761           }
1762           Addr = SDValue(CurDAG->getMachineNode(AddOp, DL, MVT::i32, Opnds), 0);
1763         } else {
1764           // TODO: Should this try to use a scalar add pseudo if the base address
1765           // is uniform and saddr is usable?
1766           SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
1767           SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
1768 
1769           SDNode *N0Lo = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1770                                                 DL, MVT::i32, N0, Sub0);
1771           SDNode *N0Hi = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1772                                                 DL, MVT::i32, N0, Sub1);
1773 
1774           SDValue AddOffsetHi =
1775               getMaterializedScalarImm32(Hi_32(RemainderOffset), DL);
1776 
1777           SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i1);
1778 
1779           SDNode *Add =
1780               CurDAG->getMachineNode(AMDGPU::V_ADD_CO_U32_e64, DL, VTs,
1781                                      {AddOffsetLo, SDValue(N0Lo, 0), Clamp});
1782 
1783           SDNode *Addc = CurDAG->getMachineNode(
1784               AMDGPU::V_ADDC_U32_e64, DL, VTs,
1785               {AddOffsetHi, SDValue(N0Hi, 0), SDValue(Add, 1), Clamp});
1786 
1787           SDValue RegSequenceArgs[] = {
1788               CurDAG->getTargetConstant(AMDGPU::VReg_64RegClassID, DL, MVT::i32),
1789               SDValue(Add, 0), Sub0, SDValue(Addc, 0), Sub1};
1790 
1791           Addr = SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
1792                                                 MVT::i64, RegSequenceArgs),
1793                          0);
1794         }
1795       }
1796     }
1797   }
1798 
1799   VAddr = Addr;
1800   Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i16);
1801   return true;
1802 }
1803 
1804 // If this matches zero_extend i32:x, return x
1805 static SDValue matchZExtFromI32(SDValue Op) {
1806   if (Op.getOpcode() != ISD::ZERO_EXTEND)
1807     return SDValue();
1808 
1809   SDValue ExtSrc = Op.getOperand(0);
1810   return (ExtSrc.getValueType() == MVT::i32) ? ExtSrc : SDValue();
1811 }
1812 
1813 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
1814 bool AMDGPUDAGToDAGISel::SelectGlobalSAddr(SDNode *N,
1815                                            SDValue Addr,
1816                                            SDValue &SAddr,
1817                                            SDValue &VOffset,
1818                                            SDValue &Offset) const {
1819   int64_t ImmOffset = 0;
1820 
1821   // Match the immediate offset first, which canonically is moved as low as
1822   // possible.
1823   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1824     SDValue LHS = Addr.getOperand(0);
1825     SDValue RHS = Addr.getOperand(1);
1826 
1827     int64_t COffsetVal = cast<ConstantSDNode>(RHS)->getSExtValue();
1828     const SIInstrInfo *TII = Subtarget->getInstrInfo();
1829 
1830     // TODO: Could split larger constant into VGPR offset.
1831     if (TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::GLOBAL_ADDRESS, true)) {
1832       Addr = LHS;
1833       ImmOffset = COffsetVal;
1834     }
1835   }
1836 
1837   // Match the variable offset.
1838   if (Addr.getOpcode() != ISD::ADD)
1839     return false;
1840 
1841   SDValue LHS = Addr.getOperand(0);
1842   SDValue RHS = Addr.getOperand(1);
1843 
1844   if (!LHS->isDivergent()) {
1845     // add (i64 sgpr), (zero_extend (i32 vgpr))
1846     if (SDValue ZextRHS = matchZExtFromI32(RHS)) {
1847       SAddr = LHS;
1848       VOffset = ZextRHS;
1849     }
1850   }
1851 
1852   if (!SAddr && !RHS->isDivergent()) {
1853     // add (zero_extend (i32 vgpr)), (i64 sgpr)
1854     if (SDValue ZextLHS = matchZExtFromI32(LHS)) {
1855       SAddr = RHS;
1856       VOffset = ZextLHS;
1857     }
1858   }
1859 
1860   if (!SAddr)
1861     return false;
1862 
1863   Offset = CurDAG->getTargetConstant(ImmOffset, SDLoc(), MVT::i16);
1864   return true;
1865 }
1866 
1867 // Match (32-bit SGPR base) + sext(imm offset)
1868 bool AMDGPUDAGToDAGISel::SelectScratchSAddr(SDNode *N,
1869                                             SDValue Addr,
1870                                             SDValue &SAddr,
1871                                             SDValue &Offset) const {
1872   if (Addr->isDivergent())
1873     return false;
1874 
1875   SAddr = Addr;
1876   int64_t COffsetVal = 0;
1877 
1878   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1879     COffsetVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1880     SAddr = Addr.getOperand(0);
1881   }
1882 
1883   if (auto FI = dyn_cast<FrameIndexSDNode>(SAddr)) {
1884     SAddr = CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0));
1885   } else if (SAddr.getOpcode() == ISD::ADD &&
1886              isa<FrameIndexSDNode>(SAddr.getOperand(0))) {
1887     // Materialize this into a scalar move for scalar address to avoid
1888     // readfirstlane.
1889     auto FI = cast<FrameIndexSDNode>(SAddr.getOperand(0));
1890     SDValue TFI = CurDAG->getTargetFrameIndex(FI->getIndex(),
1891                                               FI->getValueType(0));
1892     SAddr = SDValue(CurDAG->getMachineNode(AMDGPU::S_ADD_U32, SDLoc(SAddr),
1893                                            MVT::i32, TFI, SAddr.getOperand(1)),
1894                     0);
1895   }
1896 
1897   const SIInstrInfo *TII = Subtarget->getInstrInfo();
1898 
1899   if (!TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::PRIVATE_ADDRESS, true)) {
1900     int64_t RemainderOffset = COffsetVal;
1901     int64_t ImmField = 0;
1902     const unsigned NumBits = TII->getNumFlatOffsetBits(true);
1903     // Use signed division by a power of two to truncate towards 0.
1904     int64_t D = 1LL << (NumBits - 1);
1905     RemainderOffset = (COffsetVal / D) * D;
1906     ImmField = COffsetVal - RemainderOffset;
1907 
1908     assert(TII->isLegalFLATOffset(ImmField, AMDGPUAS::PRIVATE_ADDRESS, true));
1909     assert(RemainderOffset + ImmField == COffsetVal);
1910 
1911     COffsetVal = ImmField;
1912 
1913     SDLoc DL(N);
1914     SDValue AddOffset =
1915         getMaterializedScalarImm32(Lo_32(RemainderOffset), DL);
1916     SAddr = SDValue(CurDAG->getMachineNode(AMDGPU::S_ADD_U32, DL, MVT::i32,
1917                                            SAddr, AddOffset), 0);
1918   }
1919 
1920   Offset = CurDAG->getTargetConstant(COffsetVal, SDLoc(), MVT::i16);
1921 
1922   return true;
1923 }
1924 
1925 bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode,
1926                                           SDValue &Offset, bool &Imm) const {
1927   ConstantSDNode *C = dyn_cast<ConstantSDNode>(ByteOffsetNode);
1928   if (!C) {
1929     if (ByteOffsetNode.getValueType().isScalarInteger() &&
1930         ByteOffsetNode.getValueType().getSizeInBits() == 32) {
1931       Offset = ByteOffsetNode;
1932       Imm = false;
1933       return true;
1934     }
1935     if (ByteOffsetNode.getOpcode() == ISD::ZERO_EXTEND) {
1936       if (ByteOffsetNode.getOperand(0).getValueType().getSizeInBits() == 32) {
1937         Offset = ByteOffsetNode.getOperand(0);
1938         Imm = false;
1939         return true;
1940       }
1941     }
1942     return false;
1943   }
1944 
1945   SDLoc SL(ByteOffsetNode);
1946   // GFX9 and GFX10 have signed byte immediate offsets.
1947   int64_t ByteOffset = C->getSExtValue();
1948   Optional<int64_t> EncodedOffset =
1949       AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset, false);
1950   if (EncodedOffset) {
1951     Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);
1952     Imm = true;
1953     return true;
1954   }
1955 
1956   // SGPR and literal offsets are unsigned.
1957   if (ByteOffset < 0)
1958     return false;
1959 
1960   EncodedOffset = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget, ByteOffset);
1961   if (EncodedOffset) {
1962     Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);
1963     return true;
1964   }
1965 
1966   if (!isUInt<32>(ByteOffset) && !isInt<32>(ByteOffset))
1967     return false;
1968 
1969   SDValue C32Bit = CurDAG->getTargetConstant(ByteOffset, SL, MVT::i32);
1970   Offset = SDValue(
1971       CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, C32Bit), 0);
1972 
1973   return true;
1974 }
1975 
1976 SDValue AMDGPUDAGToDAGISel::Expand32BitAddress(SDValue Addr) const {
1977   if (Addr.getValueType() != MVT::i32)
1978     return Addr;
1979 
1980   // Zero-extend a 32-bit address.
1981   SDLoc SL(Addr);
1982 
1983   const MachineFunction &MF = CurDAG->getMachineFunction();
1984   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1985   unsigned AddrHiVal = Info->get32BitAddressHighBits();
1986   SDValue AddrHi = CurDAG->getTargetConstant(AddrHiVal, SL, MVT::i32);
1987 
1988   const SDValue Ops[] = {
1989     CurDAG->getTargetConstant(AMDGPU::SReg_64_XEXECRegClassID, SL, MVT::i32),
1990     Addr,
1991     CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32),
1992     SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, AddrHi),
1993             0),
1994     CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32),
1995   };
1996 
1997   return SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, SL, MVT::i64,
1998                                         Ops), 0);
1999 }
2000 
2001 bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase,
2002                                      SDValue &Offset, bool &Imm) const {
2003   SDLoc SL(Addr);
2004 
2005   // A 32-bit (address + offset) should not cause unsigned 32-bit integer
2006   // wraparound, because s_load instructions perform the addition in 64 bits.
2007   if ((Addr.getValueType() != MVT::i32 ||
2008        Addr->getFlags().hasNoUnsignedWrap())) {
2009     SDValue N0, N1;
2010     // Extract the base and offset if possible.
2011     if (CurDAG->isBaseWithConstantOffset(Addr) ||
2012         Addr.getOpcode() == ISD::ADD) {
2013       N0 = Addr.getOperand(0);
2014       N1 = Addr.getOperand(1);
2015     } else if (getBaseWithOffsetUsingSplitOR(*CurDAG, Addr, N0, N1)) {
2016       assert(N0 && N1 && isa<ConstantSDNode>(N1));
2017     }
2018     if (N0 && N1) {
2019       if (SelectSMRDOffset(N1, Offset, Imm)) {
2020         SBase = Expand32BitAddress(N0);
2021         return true;
2022       }
2023     }
2024   }
2025   SBase = Expand32BitAddress(Addr);
2026   Offset = CurDAG->getTargetConstant(0, SL, MVT::i32);
2027   Imm = true;
2028   return true;
2029 }
2030 
2031 bool AMDGPUDAGToDAGISel::SelectSMRDImm(SDValue Addr, SDValue &SBase,
2032                                        SDValue &Offset) const {
2033   bool Imm = false;
2034   return SelectSMRD(Addr, SBase, Offset, Imm) && Imm;
2035 }
2036 
2037 bool AMDGPUDAGToDAGISel::SelectSMRDImm32(SDValue Addr, SDValue &SBase,
2038                                          SDValue &Offset) const {
2039 
2040   assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
2041 
2042   bool Imm = false;
2043   if (!SelectSMRD(Addr, SBase, Offset, Imm))
2044     return false;
2045 
2046   return !Imm && isa<ConstantSDNode>(Offset);
2047 }
2048 
2049 bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDValue Addr, SDValue &SBase,
2050                                         SDValue &Offset) const {
2051   bool Imm = false;
2052   return SelectSMRD(Addr, SBase, Offset, Imm) && !Imm &&
2053          !isa<ConstantSDNode>(Offset);
2054 }
2055 
2056 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm(SDValue Addr,
2057                                              SDValue &Offset) const {
2058   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr)) {
2059     // The immediate offset for S_BUFFER instructions is unsigned.
2060     if (auto Imm =
2061             AMDGPU::getSMRDEncodedOffset(*Subtarget, C->getZExtValue(), true)) {
2062       Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32);
2063       return true;
2064     }
2065   }
2066 
2067   return false;
2068 }
2069 
2070 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(SDValue Addr,
2071                                                SDValue &Offset) const {
2072   assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
2073 
2074   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr)) {
2075     if (auto Imm = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget,
2076                                                          C->getZExtValue())) {
2077       Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32);
2078       return true;
2079     }
2080   }
2081 
2082   return false;
2083 }
2084 
2085 bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index,
2086                                             SDValue &Base,
2087                                             SDValue &Offset) const {
2088   SDLoc DL(Index);
2089 
2090   if (CurDAG->isBaseWithConstantOffset(Index)) {
2091     SDValue N0 = Index.getOperand(0);
2092     SDValue N1 = Index.getOperand(1);
2093     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
2094 
2095     // (add n0, c0)
2096     // Don't peel off the offset (c0) if doing so could possibly lead
2097     // the base (n0) to be negative.
2098     // (or n0, |c0|) can never change a sign given isBaseWithConstantOffset.
2099     if (C1->getSExtValue() <= 0 || CurDAG->SignBitIsZero(N0) ||
2100         (Index->getOpcode() == ISD::OR && C1->getSExtValue() >= 0)) {
2101       Base = N0;
2102       Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32);
2103       return true;
2104     }
2105   }
2106 
2107   if (isa<ConstantSDNode>(Index))
2108     return false;
2109 
2110   Base = Index;
2111   Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
2112   return true;
2113 }
2114 
2115 SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, const SDLoc &DL,
2116                                      SDValue Val, uint32_t Offset,
2117                                      uint32_t Width) {
2118   // Transformation function, pack the offset and width of a BFE into
2119   // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
2120   // source, bits [5:0] contain the offset and bits [22:16] the width.
2121   uint32_t PackedVal = Offset | (Width << 16);
2122   SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32);
2123 
2124   return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
2125 }
2126 
2127 void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
2128   // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c)
2129   // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c)
2130   // Predicate: 0 < b <= c < 32
2131 
2132   const SDValue &Shl = N->getOperand(0);
2133   ConstantSDNode *B = dyn_cast<ConstantSDNode>(Shl->getOperand(1));
2134   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
2135 
2136   if (B && C) {
2137     uint32_t BVal = B->getZExtValue();
2138     uint32_t CVal = C->getZExtValue();
2139 
2140     if (0 < BVal && BVal <= CVal && CVal < 32) {
2141       bool Signed = N->getOpcode() == ISD::SRA;
2142       unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2143 
2144       ReplaceNode(N, getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0), CVal - BVal,
2145                               32 - CVal));
2146       return;
2147     }
2148   }
2149   SelectCode(N);
2150 }
2151 
2152 void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
2153   switch (N->getOpcode()) {
2154   case ISD::AND:
2155     if (N->getOperand(0).getOpcode() == ISD::SRL) {
2156       // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)"
2157       // Predicate: isMask(mask)
2158       const SDValue &Srl = N->getOperand(0);
2159       ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1));
2160       ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
2161 
2162       if (Shift && Mask) {
2163         uint32_t ShiftVal = Shift->getZExtValue();
2164         uint32_t MaskVal = Mask->getZExtValue();
2165 
2166         if (isMask_32(MaskVal)) {
2167           uint32_t WidthVal = countPopulation(MaskVal);
2168 
2169           ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
2170                                   Srl.getOperand(0), ShiftVal, WidthVal));
2171           return;
2172         }
2173       }
2174     }
2175     break;
2176   case ISD::SRL:
2177     if (N->getOperand(0).getOpcode() == ISD::AND) {
2178       // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)"
2179       // Predicate: isMask(mask >> b)
2180       const SDValue &And = N->getOperand(0);
2181       ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1));
2182       ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1));
2183 
2184       if (Shift && Mask) {
2185         uint32_t ShiftVal = Shift->getZExtValue();
2186         uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal;
2187 
2188         if (isMask_32(MaskVal)) {
2189           uint32_t WidthVal = countPopulation(MaskVal);
2190 
2191           ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
2192                                   And.getOperand(0), ShiftVal, WidthVal));
2193           return;
2194         }
2195       }
2196     } else if (N->getOperand(0).getOpcode() == ISD::SHL) {
2197       SelectS_BFEFromShifts(N);
2198       return;
2199     }
2200     break;
2201   case ISD::SRA:
2202     if (N->getOperand(0).getOpcode() == ISD::SHL) {
2203       SelectS_BFEFromShifts(N);
2204       return;
2205     }
2206     break;
2207 
2208   case ISD::SIGN_EXTEND_INREG: {
2209     // sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8
2210     SDValue Src = N->getOperand(0);
2211     if (Src.getOpcode() != ISD::SRL)
2212       break;
2213 
2214     const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1));
2215     if (!Amt)
2216       break;
2217 
2218     unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
2219     ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_I32, SDLoc(N), Src.getOperand(0),
2220                             Amt->getZExtValue(), Width));
2221     return;
2222   }
2223   }
2224 
2225   SelectCode(N);
2226 }
2227 
2228 bool AMDGPUDAGToDAGISel::isCBranchSCC(const SDNode *N) const {
2229   assert(N->getOpcode() == ISD::BRCOND);
2230   if (!N->hasOneUse())
2231     return false;
2232 
2233   SDValue Cond = N->getOperand(1);
2234   if (Cond.getOpcode() == ISD::CopyToReg)
2235     Cond = Cond.getOperand(2);
2236 
2237   if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
2238     return false;
2239 
2240   MVT VT = Cond.getOperand(0).getSimpleValueType();
2241   if (VT == MVT::i32)
2242     return true;
2243 
2244   if (VT == MVT::i64) {
2245     auto ST = static_cast<const GCNSubtarget *>(Subtarget);
2246 
2247     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
2248     return (CC == ISD::SETEQ || CC == ISD::SETNE) && ST->hasScalarCompareEq64();
2249   }
2250 
2251   return false;
2252 }
2253 
2254 void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
2255   SDValue Cond = N->getOperand(1);
2256 
2257   if (Cond.isUndef()) {
2258     CurDAG->SelectNodeTo(N, AMDGPU::SI_BR_UNDEF, MVT::Other,
2259                          N->getOperand(2), N->getOperand(0));
2260     return;
2261   }
2262 
2263   const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget);
2264   const SIRegisterInfo *TRI = ST->getRegisterInfo();
2265 
2266   bool UseSCCBr = isCBranchSCC(N) && isUniformBr(N);
2267   unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ;
2268   Register CondReg = UseSCCBr ? AMDGPU::SCC : TRI->getVCC();
2269   SDLoc SL(N);
2270 
2271   if (!UseSCCBr) {
2272     // This is the case that we are selecting to S_CBRANCH_VCCNZ.  We have not
2273     // analyzed what generates the vcc value, so we do not know whether vcc
2274     // bits for disabled lanes are 0.  Thus we need to mask out bits for
2275     // disabled lanes.
2276     //
2277     // For the case that we select S_CBRANCH_SCC1 and it gets
2278     // changed to S_CBRANCH_VCCNZ in SIFixSGPRCopies, SIFixSGPRCopies calls
2279     // SIInstrInfo::moveToVALU which inserts the S_AND).
2280     //
2281     // We could add an analysis of what generates the vcc value here and omit
2282     // the S_AND when is unnecessary. But it would be better to add a separate
2283     // pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it
2284     // catches both cases.
2285     Cond = SDValue(CurDAG->getMachineNode(ST->isWave32() ? AMDGPU::S_AND_B32
2286                                                          : AMDGPU::S_AND_B64,
2287                      SL, MVT::i1,
2288                      CurDAG->getRegister(ST->isWave32() ? AMDGPU::EXEC_LO
2289                                                         : AMDGPU::EXEC,
2290                                          MVT::i1),
2291                     Cond),
2292                    0);
2293   }
2294 
2295   SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, CondReg, Cond);
2296   CurDAG->SelectNodeTo(N, BrOp, MVT::Other,
2297                        N->getOperand(2), // Basic Block
2298                        VCC.getValue(0));
2299 }
2300 
2301 void AMDGPUDAGToDAGISel::SelectFMAD_FMA(SDNode *N) {
2302   MVT VT = N->getSimpleValueType(0);
2303   bool IsFMA = N->getOpcode() == ISD::FMA;
2304   if (VT != MVT::f32 || (!Subtarget->hasMadMixInsts() &&
2305                          !Subtarget->hasFmaMixInsts()) ||
2306       ((IsFMA && Subtarget->hasMadMixInsts()) ||
2307        (!IsFMA && Subtarget->hasFmaMixInsts()))) {
2308     SelectCode(N);
2309     return;
2310   }
2311 
2312   SDValue Src0 = N->getOperand(0);
2313   SDValue Src1 = N->getOperand(1);
2314   SDValue Src2 = N->getOperand(2);
2315   unsigned Src0Mods, Src1Mods, Src2Mods;
2316 
2317   // Avoid using v_mad_mix_f32/v_fma_mix_f32 unless there is actually an operand
2318   // using the conversion from f16.
2319   bool Sel0 = SelectVOP3PMadMixModsImpl(Src0, Src0, Src0Mods);
2320   bool Sel1 = SelectVOP3PMadMixModsImpl(Src1, Src1, Src1Mods);
2321   bool Sel2 = SelectVOP3PMadMixModsImpl(Src2, Src2, Src2Mods);
2322 
2323   assert((IsFMA || !Mode.allFP32Denormals()) &&
2324          "fmad selected with denormals enabled");
2325   // TODO: We can select this with f32 denormals enabled if all the sources are
2326   // converted from f16 (in which case fmad isn't legal).
2327 
2328   if (Sel0 || Sel1 || Sel2) {
2329     // For dummy operands.
2330     SDValue Zero = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
2331     SDValue Ops[] = {
2332       CurDAG->getTargetConstant(Src0Mods, SDLoc(), MVT::i32), Src0,
2333       CurDAG->getTargetConstant(Src1Mods, SDLoc(), MVT::i32), Src1,
2334       CurDAG->getTargetConstant(Src2Mods, SDLoc(), MVT::i32), Src2,
2335       CurDAG->getTargetConstant(0, SDLoc(), MVT::i1),
2336       Zero, Zero
2337     };
2338 
2339     CurDAG->SelectNodeTo(N,
2340                          IsFMA ? AMDGPU::V_FMA_MIX_F32 : AMDGPU::V_MAD_MIX_F32,
2341                          MVT::f32, Ops);
2342   } else {
2343     SelectCode(N);
2344   }
2345 }
2346 
2347 // This is here because there isn't a way to use the generated sub0_sub1 as the
2348 // subreg index to EXTRACT_SUBREG in tablegen.
2349 void AMDGPUDAGToDAGISel::SelectATOMIC_CMP_SWAP(SDNode *N) {
2350   MemSDNode *Mem = cast<MemSDNode>(N);
2351   unsigned AS = Mem->getAddressSpace();
2352   if (AS == AMDGPUAS::FLAT_ADDRESS) {
2353     SelectCode(N);
2354     return;
2355   }
2356 
2357   MVT VT = N->getSimpleValueType(0);
2358   bool Is32 = (VT == MVT::i32);
2359   SDLoc SL(N);
2360 
2361   MachineSDNode *CmpSwap = nullptr;
2362   if (Subtarget->hasAddr64()) {
2363     SDValue SRsrc, VAddr, SOffset, Offset, SLC;
2364 
2365     if (SelectMUBUFAddr64(Mem->getBasePtr(), SRsrc, VAddr, SOffset, Offset, SLC)) {
2366       unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN :
2367         AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN;
2368       SDValue CmpVal = Mem->getOperand(2);
2369       SDValue GLC = CurDAG->getTargetConstant(1, SL, MVT::i1);
2370 
2371       // XXX - Do we care about glue operands?
2372 
2373       SDValue Ops[] = {
2374         CmpVal, VAddr, SRsrc, SOffset, Offset, GLC, SLC, Mem->getChain()
2375       };
2376 
2377       CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
2378     }
2379   }
2380 
2381   if (!CmpSwap) {
2382     SDValue SRsrc, SOffset, Offset, SLC;
2383     if (SelectMUBUFOffset(Mem->getBasePtr(), SRsrc, SOffset, Offset, SLC)) {
2384       unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN :
2385         AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN;
2386 
2387       SDValue CmpVal = Mem->getOperand(2);
2388       SDValue GLC = CurDAG->getTargetConstant(1, SL, MVT::i1);
2389       SDValue Ops[] = {
2390         CmpVal, SRsrc, SOffset, Offset, GLC, SLC, Mem->getChain()
2391       };
2392 
2393       CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
2394     }
2395   }
2396 
2397   if (!CmpSwap) {
2398     SelectCode(N);
2399     return;
2400   }
2401 
2402   MachineMemOperand *MMO = Mem->getMemOperand();
2403   CurDAG->setNodeMemRefs(CmpSwap, {MMO});
2404 
2405   unsigned SubReg = Is32 ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
2406   SDValue Extract
2407     = CurDAG->getTargetExtractSubreg(SubReg, SL, VT, SDValue(CmpSwap, 0));
2408 
2409   ReplaceUses(SDValue(N, 0), Extract);
2410   ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 1));
2411   CurDAG->RemoveDeadNode(N);
2412 }
2413 
2414 void AMDGPUDAGToDAGISel::SelectDSAppendConsume(SDNode *N, unsigned IntrID) {
2415   // The address is assumed to be uniform, so if it ends up in a VGPR, it will
2416   // be copied to an SGPR with readfirstlane.
2417   unsigned Opc = IntrID == Intrinsic::amdgcn_ds_append ?
2418     AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
2419 
2420   SDValue Chain = N->getOperand(0);
2421   SDValue Ptr = N->getOperand(2);
2422   MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2423   MachineMemOperand *MMO = M->getMemOperand();
2424   bool IsGDS = M->getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
2425 
2426   SDValue Offset;
2427   if (CurDAG->isBaseWithConstantOffset(Ptr)) {
2428     SDValue PtrBase = Ptr.getOperand(0);
2429     SDValue PtrOffset = Ptr.getOperand(1);
2430 
2431     const APInt &OffsetVal = cast<ConstantSDNode>(PtrOffset)->getAPIntValue();
2432     if (isDSOffsetLegal(PtrBase, OffsetVal.getZExtValue())) {
2433       N = glueCopyToM0(N, PtrBase);
2434       Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i32);
2435     }
2436   }
2437 
2438   if (!Offset) {
2439     N = glueCopyToM0(N, Ptr);
2440     Offset = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
2441   }
2442 
2443   SDValue Ops[] = {
2444     Offset,
2445     CurDAG->getTargetConstant(IsGDS, SDLoc(), MVT::i32),
2446     Chain,
2447     N->getOperand(N->getNumOperands() - 1) // New glue
2448   };
2449 
2450   SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2451   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2452 }
2453 
2454 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
2455   switch (IntrID) {
2456   case Intrinsic::amdgcn_ds_gws_init:
2457     return AMDGPU::DS_GWS_INIT;
2458   case Intrinsic::amdgcn_ds_gws_barrier:
2459     return AMDGPU::DS_GWS_BARRIER;
2460   case Intrinsic::amdgcn_ds_gws_sema_v:
2461     return AMDGPU::DS_GWS_SEMA_V;
2462   case Intrinsic::amdgcn_ds_gws_sema_br:
2463     return AMDGPU::DS_GWS_SEMA_BR;
2464   case Intrinsic::amdgcn_ds_gws_sema_p:
2465     return AMDGPU::DS_GWS_SEMA_P;
2466   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2467     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
2468   default:
2469     llvm_unreachable("not a gws intrinsic");
2470   }
2471 }
2472 
2473 void AMDGPUDAGToDAGISel::SelectDS_GWS(SDNode *N, unsigned IntrID) {
2474   if (IntrID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
2475       !Subtarget->hasGWSSemaReleaseAll()) {
2476     // Let this error.
2477     SelectCode(N);
2478     return;
2479   }
2480 
2481   // Chain, intrinsic ID, vsrc, offset
2482   const bool HasVSrc = N->getNumOperands() == 4;
2483   assert(HasVSrc || N->getNumOperands() == 3);
2484 
2485   SDLoc SL(N);
2486   SDValue BaseOffset = N->getOperand(HasVSrc ? 3 : 2);
2487   int ImmOffset = 0;
2488   MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2489   MachineMemOperand *MMO = M->getMemOperand();
2490 
2491   // Don't worry if the offset ends up in a VGPR. Only one lane will have
2492   // effect, so SIFixSGPRCopies will validly insert readfirstlane.
2493 
2494   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
2495   // offset field) % 64. Some versions of the programming guide omit the m0
2496   // part, or claim it's from offset 0.
2497   if (ConstantSDNode *ConstOffset = dyn_cast<ConstantSDNode>(BaseOffset)) {
2498     // If we have a constant offset, try to use the 0 in m0 as the base.
2499     // TODO: Look into changing the default m0 initialization value. If the
2500     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
2501     // the immediate offset.
2502     glueCopyToM0(N, CurDAG->getTargetConstant(0, SL, MVT::i32));
2503     ImmOffset = ConstOffset->getZExtValue();
2504   } else {
2505     if (CurDAG->isBaseWithConstantOffset(BaseOffset)) {
2506       ImmOffset = BaseOffset.getConstantOperandVal(1);
2507       BaseOffset = BaseOffset.getOperand(0);
2508     }
2509 
2510     // Prefer to do the shift in an SGPR since it should be possible to use m0
2511     // as the result directly. If it's already an SGPR, it will be eliminated
2512     // later.
2513     SDNode *SGPROffset
2514       = CurDAG->getMachineNode(AMDGPU::V_READFIRSTLANE_B32, SL, MVT::i32,
2515                                BaseOffset);
2516     // Shift to offset in m0
2517     SDNode *M0Base
2518       = CurDAG->getMachineNode(AMDGPU::S_LSHL_B32, SL, MVT::i32,
2519                                SDValue(SGPROffset, 0),
2520                                CurDAG->getTargetConstant(16, SL, MVT::i32));
2521     glueCopyToM0(N, SDValue(M0Base, 0));
2522   }
2523 
2524   SDValue Chain = N->getOperand(0);
2525   SDValue OffsetField = CurDAG->getTargetConstant(ImmOffset, SL, MVT::i32);
2526 
2527   const unsigned Opc = gwsIntrinToOpcode(IntrID);
2528   SmallVector<SDValue, 5> Ops;
2529   if (HasVSrc)
2530     Ops.push_back(N->getOperand(2));
2531   Ops.push_back(OffsetField);
2532   Ops.push_back(Chain);
2533 
2534   SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2535   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2536 }
2537 
2538 void AMDGPUDAGToDAGISel::SelectInterpP1F16(SDNode *N) {
2539   if (Subtarget->getLDSBankCount() != 16) {
2540     // This is a single instruction with a pattern.
2541     SelectCode(N);
2542     return;
2543   }
2544 
2545   SDLoc DL(N);
2546 
2547   // This requires 2 instructions. It is possible to write a pattern to support
2548   // this, but the generated isel emitter doesn't correctly deal with multiple
2549   // output instructions using the same physical register input. The copy to m0
2550   // is incorrectly placed before the second instruction.
2551   //
2552   // TODO: Match source modifiers.
2553   //
2554   // def : Pat <
2555   //   (int_amdgcn_interp_p1_f16
2556   //    (VOP3Mods f32:$src0, i32:$src0_modifiers),
2557   //                             (i32 timm:$attrchan), (i32 timm:$attr),
2558   //                             (i1 timm:$high), M0),
2559   //   (V_INTERP_P1LV_F16 $src0_modifiers, VGPR_32:$src0, timm:$attr,
2560   //       timm:$attrchan, 0,
2561   //       (V_INTERP_MOV_F32 2, timm:$attr, timm:$attrchan), timm:$high)> {
2562   //   let Predicates = [has16BankLDS];
2563   // }
2564 
2565   // 16 bank LDS
2566   SDValue ToM0 = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, AMDGPU::M0,
2567                                       N->getOperand(5), SDValue());
2568 
2569   SDVTList VTs = CurDAG->getVTList(MVT::f32, MVT::Other);
2570 
2571   SDNode *InterpMov =
2572     CurDAG->getMachineNode(AMDGPU::V_INTERP_MOV_F32, DL, VTs, {
2573         CurDAG->getTargetConstant(2, DL, MVT::i32), // P0
2574         N->getOperand(3),  // Attr
2575         N->getOperand(2),  // Attrchan
2576         ToM0.getValue(1) // In glue
2577   });
2578 
2579   SDNode *InterpP1LV =
2580     CurDAG->getMachineNode(AMDGPU::V_INTERP_P1LV_F16, DL, MVT::f32, {
2581         CurDAG->getTargetConstant(0, DL, MVT::i32), // $src0_modifiers
2582         N->getOperand(1), // Src0
2583         N->getOperand(3), // Attr
2584         N->getOperand(2), // Attrchan
2585         CurDAG->getTargetConstant(0, DL, MVT::i32), // $src2_modifiers
2586         SDValue(InterpMov, 0), // Src2 - holds two f16 values selected by high
2587         N->getOperand(4), // high
2588         CurDAG->getTargetConstant(0, DL, MVT::i1), // $clamp
2589         CurDAG->getTargetConstant(0, DL, MVT::i32), // $omod
2590         SDValue(InterpMov, 1)
2591   });
2592 
2593   CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), SDValue(InterpP1LV, 0));
2594 }
2595 
2596 void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) {
2597   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2598   switch (IntrID) {
2599   case Intrinsic::amdgcn_ds_append:
2600   case Intrinsic::amdgcn_ds_consume: {
2601     if (N->getValueType(0) != MVT::i32)
2602       break;
2603     SelectDSAppendConsume(N, IntrID);
2604     return;
2605   }
2606   }
2607 
2608   SelectCode(N);
2609 }
2610 
2611 void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
2612   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
2613   unsigned Opcode;
2614   switch (IntrID) {
2615   case Intrinsic::amdgcn_wqm:
2616     Opcode = AMDGPU::WQM;
2617     break;
2618   case Intrinsic::amdgcn_softwqm:
2619     Opcode = AMDGPU::SOFT_WQM;
2620     break;
2621   case Intrinsic::amdgcn_wwm:
2622     Opcode = AMDGPU::WWM;
2623     break;
2624   case Intrinsic::amdgcn_interp_p1_f16:
2625     SelectInterpP1F16(N);
2626     return;
2627   default:
2628     SelectCode(N);
2629     return;
2630   }
2631 
2632   SDValue Src = N->getOperand(1);
2633   CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), {Src});
2634 }
2635 
2636 void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) {
2637   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2638   switch (IntrID) {
2639   case Intrinsic::amdgcn_ds_gws_init:
2640   case Intrinsic::amdgcn_ds_gws_barrier:
2641   case Intrinsic::amdgcn_ds_gws_sema_v:
2642   case Intrinsic::amdgcn_ds_gws_sema_br:
2643   case Intrinsic::amdgcn_ds_gws_sema_p:
2644   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2645     SelectDS_GWS(N, IntrID);
2646     return;
2647   default:
2648     break;
2649   }
2650 
2651   SelectCode(N);
2652 }
2653 
2654 bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src,
2655                                             unsigned &Mods,
2656                                             bool AllowAbs) const {
2657   Mods = 0;
2658   Src = In;
2659 
2660   if (Src.getOpcode() == ISD::FNEG) {
2661     Mods |= SISrcMods::NEG;
2662     Src = Src.getOperand(0);
2663   }
2664 
2665   if (AllowAbs && Src.getOpcode() == ISD::FABS) {
2666     Mods |= SISrcMods::ABS;
2667     Src = Src.getOperand(0);
2668   }
2669 
2670   return true;
2671 }
2672 
2673 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
2674                                         SDValue &SrcMods) const {
2675   unsigned Mods;
2676   if (SelectVOP3ModsImpl(In, Src, Mods)) {
2677     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2678     return true;
2679   }
2680 
2681   return false;
2682 }
2683 
2684 bool AMDGPUDAGToDAGISel::SelectVOP3BMods(SDValue In, SDValue &Src,
2685                                          SDValue &SrcMods) const {
2686   unsigned Mods;
2687   if (SelectVOP3ModsImpl(In, Src, Mods, /* AllowAbs */ false)) {
2688     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2689     return true;
2690   }
2691 
2692   return false;
2693 }
2694 
2695 bool AMDGPUDAGToDAGISel::SelectVOP3Mods_NNaN(SDValue In, SDValue &Src,
2696                                              SDValue &SrcMods) const {
2697   SelectVOP3Mods(In, Src, SrcMods);
2698   return isNoNanSrc(Src);
2699 }
2700 
2701 bool AMDGPUDAGToDAGISel::SelectVOP3NoMods(SDValue In, SDValue &Src) const {
2702   if (In.getOpcode() == ISD::FABS || In.getOpcode() == ISD::FNEG)
2703     return false;
2704 
2705   Src = In;
2706   return true;
2707 }
2708 
2709 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
2710                                          SDValue &SrcMods, SDValue &Clamp,
2711                                          SDValue &Omod) const {
2712   SDLoc DL(In);
2713   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2714   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2715 
2716   return SelectVOP3Mods(In, Src, SrcMods);
2717 }
2718 
2719 bool AMDGPUDAGToDAGISel::SelectVOP3BMods0(SDValue In, SDValue &Src,
2720                                           SDValue &SrcMods, SDValue &Clamp,
2721                                           SDValue &Omod) const {
2722   SDLoc DL(In);
2723   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2724   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2725 
2726   return SelectVOP3BMods(In, Src, SrcMods);
2727 }
2728 
2729 bool AMDGPUDAGToDAGISel::SelectVOP3OMods(SDValue In, SDValue &Src,
2730                                          SDValue &Clamp, SDValue &Omod) const {
2731   Src = In;
2732 
2733   SDLoc DL(In);
2734   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2735   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2736 
2737   return true;
2738 }
2739 
2740 bool AMDGPUDAGToDAGISel::SelectVOP3PMods(SDValue In, SDValue &Src,
2741                                          SDValue &SrcMods) const {
2742   unsigned Mods = 0;
2743   Src = In;
2744 
2745   if (Src.getOpcode() == ISD::FNEG) {
2746     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
2747     Src = Src.getOperand(0);
2748   }
2749 
2750   if (Src.getOpcode() == ISD::BUILD_VECTOR) {
2751     unsigned VecMods = Mods;
2752 
2753     SDValue Lo = stripBitcast(Src.getOperand(0));
2754     SDValue Hi = stripBitcast(Src.getOperand(1));
2755 
2756     if (Lo.getOpcode() == ISD::FNEG) {
2757       Lo = stripBitcast(Lo.getOperand(0));
2758       Mods ^= SISrcMods::NEG;
2759     }
2760 
2761     if (Hi.getOpcode() == ISD::FNEG) {
2762       Hi = stripBitcast(Hi.getOperand(0));
2763       Mods ^= SISrcMods::NEG_HI;
2764     }
2765 
2766     if (isExtractHiElt(Lo, Lo))
2767       Mods |= SISrcMods::OP_SEL_0;
2768 
2769     if (isExtractHiElt(Hi, Hi))
2770       Mods |= SISrcMods::OP_SEL_1;
2771 
2772     Lo = stripExtractLoElt(Lo);
2773     Hi = stripExtractLoElt(Hi);
2774 
2775     if (Lo == Hi && !isInlineImmediate(Lo.getNode())) {
2776       // Really a scalar input. Just select from the low half of the register to
2777       // avoid packing.
2778 
2779       Src = Lo;
2780       SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2781       return true;
2782     }
2783 
2784     Mods = VecMods;
2785   }
2786 
2787   // Packed instructions do not have abs modifiers.
2788   Mods |= SISrcMods::OP_SEL_1;
2789 
2790   SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2791   return true;
2792 }
2793 
2794 bool AMDGPUDAGToDAGISel::SelectVOP3OpSel(SDValue In, SDValue &Src,
2795                                          SDValue &SrcMods) const {
2796   Src = In;
2797   // FIXME: Handle op_sel
2798   SrcMods = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
2799   return true;
2800 }
2801 
2802 bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods(SDValue In, SDValue &Src,
2803                                              SDValue &SrcMods) const {
2804   // FIXME: Handle op_sel
2805   return SelectVOP3Mods(In, Src, SrcMods);
2806 }
2807 
2808 // The return value is not whether the match is possible (which it always is),
2809 // but whether or not it a conversion is really used.
2810 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src,
2811                                                    unsigned &Mods) const {
2812   Mods = 0;
2813   SelectVOP3ModsImpl(In, Src, Mods);
2814 
2815   if (Src.getOpcode() == ISD::FP_EXTEND) {
2816     Src = Src.getOperand(0);
2817     assert(Src.getValueType() == MVT::f16);
2818     Src = stripBitcast(Src);
2819 
2820     // Be careful about folding modifiers if we already have an abs. fneg is
2821     // applied last, so we don't want to apply an earlier fneg.
2822     if ((Mods & SISrcMods::ABS) == 0) {
2823       unsigned ModsTmp;
2824       SelectVOP3ModsImpl(Src, Src, ModsTmp);
2825 
2826       if ((ModsTmp & SISrcMods::NEG) != 0)
2827         Mods ^= SISrcMods::NEG;
2828 
2829       if ((ModsTmp & SISrcMods::ABS) != 0)
2830         Mods |= SISrcMods::ABS;
2831     }
2832 
2833     // op_sel/op_sel_hi decide the source type and source.
2834     // If the source's op_sel_hi is set, it indicates to do a conversion from fp16.
2835     // If the sources's op_sel is set, it picks the high half of the source
2836     // register.
2837 
2838     Mods |= SISrcMods::OP_SEL_1;
2839     if (isExtractHiElt(Src, Src)) {
2840       Mods |= SISrcMods::OP_SEL_0;
2841 
2842       // TODO: Should we try to look for neg/abs here?
2843     }
2844 
2845     return true;
2846   }
2847 
2848   return false;
2849 }
2850 
2851 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixMods(SDValue In, SDValue &Src,
2852                                                SDValue &SrcMods) const {
2853   unsigned Mods = 0;
2854   SelectVOP3PMadMixModsImpl(In, Src, Mods);
2855   SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2856   return true;
2857 }
2858 
2859 SDValue AMDGPUDAGToDAGISel::getHi16Elt(SDValue In) const {
2860   if (In.isUndef())
2861     return CurDAG->getUNDEF(MVT::i32);
2862 
2863   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(In)) {
2864     SDLoc SL(In);
2865     return CurDAG->getConstant(C->getZExtValue() << 16, SL, MVT::i32);
2866   }
2867 
2868   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(In)) {
2869     SDLoc SL(In);
2870     return CurDAG->getConstant(
2871       C->getValueAPF().bitcastToAPInt().getZExtValue() << 16, SL, MVT::i32);
2872   }
2873 
2874   SDValue Src;
2875   if (isExtractHiElt(In, Src))
2876     return Src;
2877 
2878   return SDValue();
2879 }
2880 
2881 bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const {
2882   assert(CurDAG->getTarget().getTargetTriple().getArch() == Triple::amdgcn);
2883 
2884   const SIRegisterInfo *SIRI =
2885     static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
2886   const SIInstrInfo * SII =
2887     static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
2888 
2889   unsigned Limit = 0;
2890   bool AllUsesAcceptSReg = true;
2891   for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
2892     Limit < 10 && U != E; ++U, ++Limit) {
2893     const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
2894 
2895     // If the register class is unknown, it could be an unknown
2896     // register class that needs to be an SGPR, e.g. an inline asm
2897     // constraint
2898     if (!RC || SIRI->isSGPRClass(RC))
2899       return false;
2900 
2901     if (RC != &AMDGPU::VS_32RegClass) {
2902       AllUsesAcceptSReg = false;
2903       SDNode * User = *U;
2904       if (User->isMachineOpcode()) {
2905         unsigned Opc = User->getMachineOpcode();
2906         MCInstrDesc Desc = SII->get(Opc);
2907         if (Desc.isCommutable()) {
2908           unsigned OpIdx = Desc.getNumDefs() + U.getOperandNo();
2909           unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
2910           if (SII->findCommutedOpIndices(Desc, OpIdx, CommuteIdx1)) {
2911             unsigned CommutedOpNo = CommuteIdx1 - Desc.getNumDefs();
2912             const TargetRegisterClass *CommutedRC = getOperandRegClass(*U, CommutedOpNo);
2913             if (CommutedRC == &AMDGPU::VS_32RegClass)
2914               AllUsesAcceptSReg = true;
2915           }
2916         }
2917       }
2918       // If "AllUsesAcceptSReg == false" so far we haven't suceeded
2919       // commuting current user. This means have at least one use
2920       // that strictly require VGPR. Thus, we will not attempt to commute
2921       // other user instructions.
2922       if (!AllUsesAcceptSReg)
2923         break;
2924     }
2925   }
2926   return !AllUsesAcceptSReg && (Limit < 10);
2927 }
2928 
2929 bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode * N) const {
2930   auto Ld = cast<LoadSDNode>(N);
2931 
2932   return Ld->getAlignment() >= 4 &&
2933         (
2934           (
2935             (
2936               Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS       ||
2937               Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT
2938             )
2939             &&
2940             !N->isDivergent()
2941           )
2942           ||
2943           (
2944             Subtarget->getScalarizeGlobalBehavior() &&
2945             Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
2946             Ld->isSimple() &&
2947             !N->isDivergent() &&
2948             static_cast<const SITargetLowering *>(
2949               getTargetLowering())->isMemOpHasNoClobberedMemOperand(N)
2950           )
2951         );
2952 }
2953 
2954 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
2955   const AMDGPUTargetLowering& Lowering =
2956     *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
2957   bool IsModified = false;
2958   do {
2959     IsModified = false;
2960 
2961     // Go over all selected nodes and try to fold them a bit more
2962     SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_begin();
2963     while (Position != CurDAG->allnodes_end()) {
2964       SDNode *Node = &*Position++;
2965       MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(Node);
2966       if (!MachineNode)
2967         continue;
2968 
2969       SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
2970       if (ResNode != Node) {
2971         if (ResNode)
2972           ReplaceUses(Node, ResNode);
2973         IsModified = true;
2974       }
2975     }
2976     CurDAG->RemoveDeadNodes();
2977   } while (IsModified);
2978 }
2979 
2980 bool R600DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
2981   Subtarget = &MF.getSubtarget<R600Subtarget>();
2982   return SelectionDAGISel::runOnMachineFunction(MF);
2983 }
2984 
2985 bool R600DAGToDAGISel::isConstantLoad(const MemSDNode *N, int CbId) const {
2986   if (!N->readMem())
2987     return false;
2988   if (CbId == -1)
2989     return N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
2990            N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
2991 
2992   return N->getAddressSpace() == AMDGPUAS::CONSTANT_BUFFER_0 + CbId;
2993 }
2994 
2995 bool R600DAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
2996                                                          SDValue& IntPtr) {
2997   if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
2998     IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr),
2999                                        true);
3000     return true;
3001   }
3002   return false;
3003 }
3004 
3005 bool R600DAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
3006     SDValue& BaseReg, SDValue &Offset) {
3007   if (!isa<ConstantSDNode>(Addr)) {
3008     BaseReg = Addr;
3009     Offset = CurDAG->getIntPtrConstant(0, SDLoc(Addr), true);
3010     return true;
3011   }
3012   return false;
3013 }
3014 
3015 void R600DAGToDAGISel::Select(SDNode *N) {
3016   unsigned int Opc = N->getOpcode();
3017   if (N->isMachineOpcode()) {
3018     N->setNodeId(-1);
3019     return;   // Already selected.
3020   }
3021 
3022   switch (Opc) {
3023   default: break;
3024   case AMDGPUISD::BUILD_VERTICAL_VECTOR:
3025   case ISD::SCALAR_TO_VECTOR:
3026   case ISD::BUILD_VECTOR: {
3027     EVT VT = N->getValueType(0);
3028     unsigned NumVectorElts = VT.getVectorNumElements();
3029     unsigned RegClassID;
3030     // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
3031     // that adds a 128 bits reg copy when going through TwoAddressInstructions
3032     // pass. We want to avoid 128 bits copies as much as possible because they
3033     // can't be bundled by our scheduler.
3034     switch(NumVectorElts) {
3035     case 2: RegClassID = R600::R600_Reg64RegClassID; break;
3036     case 4:
3037       if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
3038         RegClassID = R600::R600_Reg128VerticalRegClassID;
3039       else
3040         RegClassID = R600::R600_Reg128RegClassID;
3041       break;
3042     default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
3043     }
3044     SelectBuildVector(N, RegClassID);
3045     return;
3046   }
3047   }
3048 
3049   SelectCode(N);
3050 }
3051 
3052 bool R600DAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
3053                                           SDValue &Offset) {
3054   ConstantSDNode *C;
3055   SDLoc DL(Addr);
3056 
3057   if ((C = dyn_cast<ConstantSDNode>(Addr))) {
3058     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
3059     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
3060   } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
3061              (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
3062     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
3063     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
3064   } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
3065             (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
3066     Base = Addr.getOperand(0);
3067     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
3068   } else {
3069     Base = Addr;
3070     Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
3071   }
3072 
3073   return true;
3074 }
3075 
3076 bool R600DAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
3077                                           SDValue &Offset) {
3078   ConstantSDNode *IMMOffset;
3079 
3080   if (Addr.getOpcode() == ISD::ADD
3081       && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
3082       && isInt<16>(IMMOffset->getZExtValue())) {
3083 
3084       Base = Addr.getOperand(0);
3085       Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
3086                                          MVT::i32);
3087       return true;
3088   // If the pointer address is constant, we can move it to the offset field.
3089   } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
3090              && isInt<16>(IMMOffset->getZExtValue())) {
3091     Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
3092                                   SDLoc(CurDAG->getEntryNode()),
3093                                   R600::ZERO, MVT::i32);
3094     Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
3095                                        MVT::i32);
3096     return true;
3097   }
3098 
3099   // Default case, no offset
3100   Base = Addr;
3101   Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
3102   return true;
3103 }
3104