1 //===-- AMDGPUISelDAGToDAG.cpp - A dag to dag inst selector for AMDGPU ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //==-----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Defines an instruction selector for the AMDGPU target.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPU.h"
15 #include "AMDGPUArgumentUsageInfo.h"
16 #include "AMDGPUISelLowering.h" // For AMDGPUISD
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPUPerfHintAnalysis.h"
19 #include "AMDGPURegisterInfo.h"
20 #include "AMDGPUSubtarget.h"
21 #include "AMDGPUTargetMachine.h"
22 #include "SIDefines.h"
23 #include "SIISelLowering.h"
24 #include "SIInstrInfo.h"
25 #include "SIMachineFunctionInfo.h"
26 #include "SIRegisterInfo.h"
27 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
28 #include "llvm/ADT/APInt.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/StringRef.h"
31 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/CodeGen/FunctionLoweringInfo.h"
34 #include "llvm/CodeGen/ISDOpcodes.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/SelectionDAG.h"
38 #include "llvm/CodeGen/SelectionDAGISel.h"
39 #include "llvm/CodeGen/SelectionDAGNodes.h"
40 #include "llvm/CodeGen/ValueTypes.h"
41 #include "llvm/IR/BasicBlock.h"
42 #ifdef EXPENSIVE_CHECKS
43 #include "llvm/IR/Dominators.h"
44 #endif
45 #include "llvm/IR/Instruction.h"
46 #include "llvm/MC/MCInstrDesc.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/CodeGen.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/MachineValueType.h"
51 #include "llvm/Support/MathExtras.h"
52 #include <cassert>
53 #include <cstdint>
54 #include <new>
55 #include <vector>
56 
57 #define DEBUG_TYPE "isel"
58 
59 using namespace llvm;
60 
61 namespace llvm {
62 
63 class R600InstrInfo;
64 
65 } // end namespace llvm
66 
67 //===----------------------------------------------------------------------===//
68 // Instruction Selector Implementation
69 //===----------------------------------------------------------------------===//
70 
71 namespace {
72 
73 static bool isNullConstantOrUndef(SDValue V) {
74   if (V.isUndef())
75     return true;
76 
77   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
78   return Const != nullptr && Const->isNullValue();
79 }
80 
81 static bool getConstantValue(SDValue N, uint32_t &Out) {
82   // This is only used for packed vectors, where ussing 0 for undef should
83   // always be good.
84   if (N.isUndef()) {
85     Out = 0;
86     return true;
87   }
88 
89   if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
90     Out = C->getAPIntValue().getSExtValue();
91     return true;
92   }
93 
94   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N)) {
95     Out = C->getValueAPF().bitcastToAPInt().getSExtValue();
96     return true;
97   }
98 
99   return false;
100 }
101 
102 // TODO: Handle undef as zero
103 static SDNode *packConstantV2I16(const SDNode *N, SelectionDAG &DAG,
104                                  bool Negate = false) {
105   assert(N->getOpcode() == ISD::BUILD_VECTOR && N->getNumOperands() == 2);
106   uint32_t LHSVal, RHSVal;
107   if (getConstantValue(N->getOperand(0), LHSVal) &&
108       getConstantValue(N->getOperand(1), RHSVal)) {
109     SDLoc SL(N);
110     uint32_t K = Negate ?
111       (-LHSVal & 0xffff) | (-RHSVal << 16) :
112       (LHSVal & 0xffff) | (RHSVal << 16);
113     return DAG.getMachineNode(AMDGPU::S_MOV_B32, SL, N->getValueType(0),
114                               DAG.getTargetConstant(K, SL, MVT::i32));
115   }
116 
117   return nullptr;
118 }
119 
120 static SDNode *packNegConstantV2I16(const SDNode *N, SelectionDAG &DAG) {
121   return packConstantV2I16(N, DAG, true);
122 }
123 
124 /// AMDGPU specific code to select AMDGPU machine instructions for
125 /// SelectionDAG operations.
126 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
127   // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
128   // make the right decision when generating code for different targets.
129   const GCNSubtarget *Subtarget;
130   bool EnableLateStructurizeCFG;
131 
132 public:
133   explicit AMDGPUDAGToDAGISel(TargetMachine *TM = nullptr,
134                               CodeGenOpt::Level OptLevel = CodeGenOpt::Default)
135     : SelectionDAGISel(*TM, OptLevel) {
136     EnableLateStructurizeCFG = AMDGPUTargetMachine::EnableLateStructurizeCFG;
137   }
138   ~AMDGPUDAGToDAGISel() override = default;
139 
140   void getAnalysisUsage(AnalysisUsage &AU) const override {
141     AU.addRequired<AMDGPUArgumentUsageInfo>();
142     AU.addRequired<AMDGPUPerfHintAnalysis>();
143     AU.addRequired<LegacyDivergenceAnalysis>();
144 #ifdef EXPENSIVE_CHECKS
145     AU.addRequired<DominatorTreeWrapperPass>();
146     AU.addRequired<LoopInfoWrapperPass>();
147 #endif
148     SelectionDAGISel::getAnalysisUsage(AU);
149   }
150 
151   bool matchLoadD16FromBuildVector(SDNode *N) const;
152 
153   bool runOnMachineFunction(MachineFunction &MF) override;
154   void PreprocessISelDAG() override;
155   void Select(SDNode *N) override;
156   StringRef getPassName() const override;
157   void PostprocessISelDAG() override;
158 
159 protected:
160   void SelectBuildVector(SDNode *N, unsigned RegClassID);
161 
162 private:
163   std::pair<SDValue, SDValue> foldFrameIndex(SDValue N) const;
164   bool isNoNanSrc(SDValue N) const;
165   bool isInlineImmediate(const SDNode *N, bool Negated = false) const;
166   bool isNegInlineImmediate(const SDNode *N) const {
167     return isInlineImmediate(N, true);
168   }
169 
170   bool isVGPRImm(const SDNode *N) const;
171   bool isUniformLoad(const SDNode *N) const;
172   bool isUniformBr(const SDNode *N) const;
173 
174   MachineSDNode *buildSMovImm64(SDLoc &DL, uint64_t Val, EVT VT) const;
175 
176   SDNode *glueCopyToM0LDSInit(SDNode *N) const;
177   SDNode *glueCopyToM0(SDNode *N, SDValue Val) const;
178 
179   const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
180   virtual bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
181   virtual bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
182   bool isDSOffsetLegal(SDValue Base, unsigned Offset,
183                        unsigned OffsetBits) const;
184   bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const;
185   bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
186                                  SDValue &Offset1) const;
187   bool SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
188                    SDValue &SOffset, SDValue &Offset, SDValue &Offen,
189                    SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC,
190                    SDValue &TFE, SDValue &DLC) const;
191   bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
192                          SDValue &SOffset, SDValue &Offset, SDValue &GLC,
193                          SDValue &SLC, SDValue &TFE, SDValue &DLC) const;
194   bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
195                          SDValue &VAddr, SDValue &SOffset, SDValue &Offset,
196                          SDValue &SLC) const;
197   bool SelectMUBUFScratchOffen(SDNode *Parent,
198                                SDValue Addr, SDValue &RSrc, SDValue &VAddr,
199                                SDValue &SOffset, SDValue &ImmOffset) const;
200   bool SelectMUBUFScratchOffset(SDNode *Parent,
201                                 SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
202                                 SDValue &Offset) const;
203 
204   bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset,
205                          SDValue &Offset, SDValue &GLC, SDValue &SLC,
206                          SDValue &TFE, SDValue &DLC) const;
207   bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
208                          SDValue &Offset, SDValue &SLC) const;
209   bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
210                          SDValue &Offset) const;
211 
212   bool SelectFlatAtomic(SDNode *N, SDValue Addr, SDValue &VAddr,
213                         SDValue &Offset, SDValue &SLC) const;
214   bool SelectFlatAtomicSigned(SDNode *N, SDValue Addr, SDValue &VAddr,
215                               SDValue &Offset, SDValue &SLC) const;
216 
217   template <bool IsSigned>
218   bool SelectFlatOffset(SDNode *N, SDValue Addr, SDValue &VAddr,
219                         SDValue &Offset, SDValue &SLC) const;
220 
221   bool SelectSMRDOffset(SDValue ByteOffsetNode, SDValue &Offset,
222                         bool &Imm) const;
223   SDValue Expand32BitAddress(SDValue Addr) const;
224   bool SelectSMRD(SDValue Addr, SDValue &SBase, SDValue &Offset,
225                   bool &Imm) const;
226   bool SelectSMRDImm(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
227   bool SelectSMRDImm32(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
228   bool SelectSMRDSgpr(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
229   bool SelectSMRDBufferImm(SDValue Addr, SDValue &Offset) const;
230   bool SelectSMRDBufferImm32(SDValue Addr, SDValue &Offset) const;
231   bool SelectMOVRELOffset(SDValue Index, SDValue &Base, SDValue &Offset) const;
232 
233   bool SelectVOP3Mods_NNaN(SDValue In, SDValue &Src, SDValue &SrcMods) const;
234   bool SelectVOP3ModsImpl(SDValue In, SDValue &Src, unsigned &SrcMods) const;
235   bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
236   bool SelectVOP3NoMods(SDValue In, SDValue &Src) const;
237   bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
238                        SDValue &Clamp, SDValue &Omod) const;
239   bool SelectVOP3NoMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
240                          SDValue &Clamp, SDValue &Omod) const;
241 
242   bool SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, SDValue &SrcMods,
243                                  SDValue &Clamp,
244                                  SDValue &Omod) const;
245 
246   bool SelectVOP3OMods(SDValue In, SDValue &Src,
247                        SDValue &Clamp, SDValue &Omod) const;
248 
249   bool SelectVOP3PMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
250   bool SelectVOP3PMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
251                         SDValue &Clamp) const;
252 
253   bool SelectVOP3OpSel(SDValue In, SDValue &Src, SDValue &SrcMods) const;
254   bool SelectVOP3OpSel0(SDValue In, SDValue &Src, SDValue &SrcMods,
255                         SDValue &Clamp) const;
256 
257   bool SelectVOP3OpSelMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
258   bool SelectVOP3OpSelMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
259                             SDValue &Clamp) const;
260   bool SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src, unsigned &Mods) const;
261   bool SelectVOP3PMadMixMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
262 
263   SDValue getHi16Elt(SDValue In) const;
264 
265   void SelectADD_SUB_I64(SDNode *N);
266   void SelectAddcSubb(SDNode *N);
267   void SelectUADDO_USUBO(SDNode *N);
268   void SelectDIV_SCALE(SDNode *N);
269   void SelectDIV_FMAS(SDNode *N);
270   void SelectMAD_64_32(SDNode *N);
271   void SelectFMA_W_CHAIN(SDNode *N);
272   void SelectFMUL_W_CHAIN(SDNode *N);
273 
274   SDNode *getS_BFE(unsigned Opcode, const SDLoc &DL, SDValue Val,
275                    uint32_t Offset, uint32_t Width);
276   void SelectS_BFEFromShifts(SDNode *N);
277   void SelectS_BFE(SDNode *N);
278   bool isCBranchSCC(const SDNode *N) const;
279   void SelectBRCOND(SDNode *N);
280   void SelectFMAD_FMA(SDNode *N);
281   void SelectATOMIC_CMP_SWAP(SDNode *N);
282   void SelectDSAppendConsume(SDNode *N, unsigned IntrID);
283   void SelectDS_GWS(SDNode *N, unsigned IntrID);
284   void SelectINTRINSIC_W_CHAIN(SDNode *N);
285   void SelectINTRINSIC_VOID(SDNode *N);
286 
287 protected:
288   // Include the pieces autogenerated from the target description.
289 #include "AMDGPUGenDAGISel.inc"
290 };
291 
292 class R600DAGToDAGISel : public AMDGPUDAGToDAGISel {
293   const R600Subtarget *Subtarget;
294 
295   bool isConstantLoad(const MemSDNode *N, int cbID) const;
296   bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
297   bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
298                                        SDValue& Offset);
299 public:
300   explicit R600DAGToDAGISel(TargetMachine *TM, CodeGenOpt::Level OptLevel) :
301       AMDGPUDAGToDAGISel(TM, OptLevel) {}
302 
303   void Select(SDNode *N) override;
304 
305   bool SelectADDRIndirect(SDValue Addr, SDValue &Base,
306                           SDValue &Offset) override;
307   bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
308                           SDValue &Offset) override;
309 
310   bool runOnMachineFunction(MachineFunction &MF) override;
311 
312   void PreprocessISelDAG() override {}
313 
314 protected:
315   // Include the pieces autogenerated from the target description.
316 #include "R600GenDAGISel.inc"
317 };
318 
319 static SDValue stripBitcast(SDValue Val) {
320   return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val;
321 }
322 
323 // Figure out if this is really an extract of the high 16-bits of a dword.
324 static bool isExtractHiElt(SDValue In, SDValue &Out) {
325   In = stripBitcast(In);
326   if (In.getOpcode() != ISD::TRUNCATE)
327     return false;
328 
329   SDValue Srl = In.getOperand(0);
330   if (Srl.getOpcode() == ISD::SRL) {
331     if (ConstantSDNode *ShiftAmt = dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
332       if (ShiftAmt->getZExtValue() == 16) {
333         Out = stripBitcast(Srl.getOperand(0));
334         return true;
335       }
336     }
337   }
338 
339   return false;
340 }
341 
342 // Look through operations that obscure just looking at the low 16-bits of the
343 // same register.
344 static SDValue stripExtractLoElt(SDValue In) {
345   if (In.getOpcode() == ISD::TRUNCATE) {
346     SDValue Src = In.getOperand(0);
347     if (Src.getValueType().getSizeInBits() == 32)
348       return stripBitcast(Src);
349   }
350 
351   return In;
352 }
353 
354 }  // end anonymous namespace
355 
356 INITIALIZE_PASS_BEGIN(AMDGPUDAGToDAGISel, "amdgpu-isel",
357                       "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
358 INITIALIZE_PASS_DEPENDENCY(AMDGPUArgumentUsageInfo)
359 INITIALIZE_PASS_DEPENDENCY(AMDGPUPerfHintAnalysis)
360 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
361 #ifdef EXPENSIVE_CHECKS
362 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
363 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
364 #endif
365 INITIALIZE_PASS_END(AMDGPUDAGToDAGISel, "amdgpu-isel",
366                     "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
367 
368 /// This pass converts a legalized DAG into a AMDGPU-specific
369 // DAG, ready for instruction scheduling.
370 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine *TM,
371                                         CodeGenOpt::Level OptLevel) {
372   return new AMDGPUDAGToDAGISel(TM, OptLevel);
373 }
374 
375 /// This pass converts a legalized DAG into a R600-specific
376 // DAG, ready for instruction scheduling.
377 FunctionPass *llvm::createR600ISelDag(TargetMachine *TM,
378                                       CodeGenOpt::Level OptLevel) {
379   return new R600DAGToDAGISel(TM, OptLevel);
380 }
381 
382 bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
383 #ifdef EXPENSIVE_CHECKS
384   DominatorTree & DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
385   LoopInfo * LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
386   for (auto &L : LI->getLoopsInPreorder()) {
387     assert(L->isLCSSAForm(DT));
388   }
389 #endif
390   Subtarget = &MF.getSubtarget<GCNSubtarget>();
391   return SelectionDAGISel::runOnMachineFunction(MF);
392 }
393 
394 bool AMDGPUDAGToDAGISel::matchLoadD16FromBuildVector(SDNode *N) const {
395   assert(Subtarget->d16PreservesUnusedBits());
396   MVT VT = N->getValueType(0).getSimpleVT();
397   if (VT != MVT::v2i16 && VT != MVT::v2f16)
398     return false;
399 
400   SDValue Lo = N->getOperand(0);
401   SDValue Hi = N->getOperand(1);
402 
403   LoadSDNode *LdHi = dyn_cast<LoadSDNode>(stripBitcast(Hi));
404 
405   // build_vector lo, (load ptr) -> load_d16_hi ptr, lo
406   // build_vector lo, (zextload ptr from i8) -> load_d16_hi_u8 ptr, lo
407   // build_vector lo, (sextload ptr from i8) -> load_d16_hi_i8 ptr, lo
408 
409   // Need to check for possible indirect dependencies on the other half of the
410   // vector to avoid introducing a cycle.
411   if (LdHi && Hi.hasOneUse() && !LdHi->isPredecessorOf(Lo.getNode())) {
412     SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
413 
414     SDValue TiedIn = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Lo);
415     SDValue Ops[] = {
416       LdHi->getChain(), LdHi->getBasePtr(), TiedIn
417     };
418 
419     unsigned LoadOp = AMDGPUISD::LOAD_D16_HI;
420     if (LdHi->getMemoryVT() == MVT::i8) {
421       LoadOp = LdHi->getExtensionType() == ISD::SEXTLOAD ?
422         AMDGPUISD::LOAD_D16_HI_I8 : AMDGPUISD::LOAD_D16_HI_U8;
423     } else {
424       assert(LdHi->getMemoryVT() == MVT::i16);
425     }
426 
427     SDValue NewLoadHi =
428       CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdHi), VTList,
429                                   Ops, LdHi->getMemoryVT(),
430                                   LdHi->getMemOperand());
431 
432     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadHi);
433     CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdHi, 1), NewLoadHi.getValue(1));
434     return true;
435   }
436 
437   // build_vector (load ptr), hi -> load_d16_lo ptr, hi
438   // build_vector (zextload ptr from i8), hi -> load_d16_lo_u8 ptr, hi
439   // build_vector (sextload ptr from i8), hi -> load_d16_lo_i8 ptr, hi
440   LoadSDNode *LdLo = dyn_cast<LoadSDNode>(stripBitcast(Lo));
441   if (LdLo && Lo.hasOneUse()) {
442     SDValue TiedIn = getHi16Elt(Hi);
443     if (!TiedIn || LdLo->isPredecessorOf(TiedIn.getNode()))
444       return false;
445 
446     SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
447     unsigned LoadOp = AMDGPUISD::LOAD_D16_LO;
448     if (LdLo->getMemoryVT() == MVT::i8) {
449       LoadOp = LdLo->getExtensionType() == ISD::SEXTLOAD ?
450         AMDGPUISD::LOAD_D16_LO_I8 : AMDGPUISD::LOAD_D16_LO_U8;
451     } else {
452       assert(LdLo->getMemoryVT() == MVT::i16);
453     }
454 
455     TiedIn = CurDAG->getNode(ISD::BITCAST, SDLoc(N), VT, TiedIn);
456 
457     SDValue Ops[] = {
458       LdLo->getChain(), LdLo->getBasePtr(), TiedIn
459     };
460 
461     SDValue NewLoadLo =
462       CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdLo), VTList,
463                                   Ops, LdLo->getMemoryVT(),
464                                   LdLo->getMemOperand());
465 
466     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadLo);
467     CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdLo, 1), NewLoadLo.getValue(1));
468     return true;
469   }
470 
471   return false;
472 }
473 
474 void AMDGPUDAGToDAGISel::PreprocessISelDAG() {
475   if (!Subtarget->d16PreservesUnusedBits())
476     return;
477 
478   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
479 
480   bool MadeChange = false;
481   while (Position != CurDAG->allnodes_begin()) {
482     SDNode *N = &*--Position;
483     if (N->use_empty())
484       continue;
485 
486     switch (N->getOpcode()) {
487     case ISD::BUILD_VECTOR:
488       MadeChange |= matchLoadD16FromBuildVector(N);
489       break;
490     default:
491       break;
492     }
493   }
494 
495   if (MadeChange) {
496     CurDAG->RemoveDeadNodes();
497     LLVM_DEBUG(dbgs() << "After PreProcess:\n";
498                CurDAG->dump(););
499   }
500 }
501 
502 bool AMDGPUDAGToDAGISel::isNoNanSrc(SDValue N) const {
503   if (TM.Options.NoNaNsFPMath)
504     return true;
505 
506   // TODO: Move into isKnownNeverNaN
507   if (N->getFlags().isDefined())
508     return N->getFlags().hasNoNaNs();
509 
510   return CurDAG->isKnownNeverNaN(N);
511 }
512 
513 bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N,
514                                            bool Negated) const {
515   if (N->isUndef())
516     return true;
517 
518   const SIInstrInfo *TII = Subtarget->getInstrInfo();
519   if (Negated) {
520     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
521       return TII->isInlineConstant(-C->getAPIntValue());
522 
523     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
524       return TII->isInlineConstant(-C->getValueAPF().bitcastToAPInt());
525 
526   } else {
527     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
528       return TII->isInlineConstant(C->getAPIntValue());
529 
530     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
531       return TII->isInlineConstant(C->getValueAPF().bitcastToAPInt());
532   }
533 
534   return false;
535 }
536 
537 /// Determine the register class for \p OpNo
538 /// \returns The register class of the virtual register that will be used for
539 /// the given operand number \OpNo or NULL if the register class cannot be
540 /// determined.
541 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
542                                                           unsigned OpNo) const {
543   if (!N->isMachineOpcode()) {
544     if (N->getOpcode() == ISD::CopyToReg) {
545       unsigned Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
546       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
547         MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo();
548         return MRI.getRegClass(Reg);
549       }
550 
551       const SIRegisterInfo *TRI
552         = static_cast<const GCNSubtarget *>(Subtarget)->getRegisterInfo();
553       return TRI->getPhysRegClass(Reg);
554     }
555 
556     return nullptr;
557   }
558 
559   switch (N->getMachineOpcode()) {
560   default: {
561     const MCInstrDesc &Desc =
562         Subtarget->getInstrInfo()->get(N->getMachineOpcode());
563     unsigned OpIdx = Desc.getNumDefs() + OpNo;
564     if (OpIdx >= Desc.getNumOperands())
565       return nullptr;
566     int RegClass = Desc.OpInfo[OpIdx].RegClass;
567     if (RegClass == -1)
568       return nullptr;
569 
570     return Subtarget->getRegisterInfo()->getRegClass(RegClass);
571   }
572   case AMDGPU::REG_SEQUENCE: {
573     unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
574     const TargetRegisterClass *SuperRC =
575         Subtarget->getRegisterInfo()->getRegClass(RCID);
576 
577     SDValue SubRegOp = N->getOperand(OpNo + 1);
578     unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
579     return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
580                                                               SubRegIdx);
581   }
582   }
583 }
584 
585 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N, SDValue Val) const {
586   const SITargetLowering& Lowering =
587     *static_cast<const SITargetLowering*>(getTargetLowering());
588 
589   assert(N->getOperand(0).getValueType() == MVT::Other && "Expected chain");
590 
591   SDValue M0 = Lowering.copyToM0(*CurDAG, N->getOperand(0), SDLoc(N),
592                                  Val);
593 
594   SDValue Glue = M0.getValue(1);
595 
596   SmallVector <SDValue, 8> Ops;
597   Ops.push_back(M0); // Replace the chain.
598   for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
599     Ops.push_back(N->getOperand(i));
600 
601   Ops.push_back(Glue);
602   return CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops);
603 }
604 
605 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0LDSInit(SDNode *N) const {
606   unsigned AS = cast<MemSDNode>(N)->getAddressSpace();
607   if (AS == AMDGPUAS::LOCAL_ADDRESS) {
608     if (Subtarget->ldsRequiresM0Init())
609       return glueCopyToM0(N, CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32));
610   } else if (AS == AMDGPUAS::REGION_ADDRESS) {
611     MachineFunction &MF = CurDAG->getMachineFunction();
612     unsigned Value = MF.getInfo<SIMachineFunctionInfo>()->getGDSSize();
613     return
614         glueCopyToM0(N, CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i32));
615   }
616   return N;
617 }
618 
619 MachineSDNode *AMDGPUDAGToDAGISel::buildSMovImm64(SDLoc &DL, uint64_t Imm,
620                                                   EVT VT) const {
621   SDNode *Lo = CurDAG->getMachineNode(
622       AMDGPU::S_MOV_B32, DL, MVT::i32,
623       CurDAG->getConstant(Imm & 0xFFFFFFFF, DL, MVT::i32));
624   SDNode *Hi =
625       CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
626                              CurDAG->getConstant(Imm >> 32, DL, MVT::i32));
627   const SDValue Ops[] = {
628       CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
629       SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
630       SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)};
631 
632   return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, VT, Ops);
633 }
634 
635 static unsigned selectSGPRVectorRegClassID(unsigned NumVectorElts) {
636   switch (NumVectorElts) {
637   case 1:
638     return AMDGPU::SReg_32_XM0RegClassID;
639   case 2:
640     return AMDGPU::SReg_64RegClassID;
641   case 3:
642     return AMDGPU::SGPR_96RegClassID;
643   case 4:
644     return AMDGPU::SReg_128RegClassID;
645   case 5:
646     return AMDGPU::SGPR_160RegClassID;
647   case 8:
648     return AMDGPU::SReg_256RegClassID;
649   case 16:
650     return AMDGPU::SReg_512RegClassID;
651   }
652 
653   llvm_unreachable("invalid vector size");
654 }
655 
656 void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) {
657   EVT VT = N->getValueType(0);
658   unsigned NumVectorElts = VT.getVectorNumElements();
659   EVT EltVT = VT.getVectorElementType();
660   SDLoc DL(N);
661   SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
662 
663   if (NumVectorElts == 1) {
664     CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0),
665                          RegClass);
666     return;
667   }
668 
669   assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
670                                   "supported yet");
671   // 16 = Max Num Vector Elements
672   // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
673   // 1 = Vector Register Class
674   SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
675 
676   RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
677   bool IsRegSeq = true;
678   unsigned NOps = N->getNumOperands();
679   for (unsigned i = 0; i < NOps; i++) {
680     // XXX: Why is this here?
681     if (isa<RegisterSDNode>(N->getOperand(i))) {
682       IsRegSeq = false;
683       break;
684     }
685     unsigned Sub = AMDGPURegisterInfo::getSubRegFromChannel(i);
686     RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
687     RegSeqArgs[1 + (2 * i) + 1] = CurDAG->getTargetConstant(Sub, DL, MVT::i32);
688   }
689   if (NOps != NumVectorElts) {
690     // Fill in the missing undef elements if this was a scalar_to_vector.
691     assert(N->getOpcode() == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
692     MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
693                                                    DL, EltVT);
694     for (unsigned i = NOps; i < NumVectorElts; ++i) {
695       unsigned Sub = AMDGPURegisterInfo::getSubRegFromChannel(i);
696       RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
697       RegSeqArgs[1 + (2 * i) + 1] =
698           CurDAG->getTargetConstant(Sub, DL, MVT::i32);
699     }
700   }
701 
702   if (!IsRegSeq)
703     SelectCode(N);
704   CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs);
705 }
706 
707 void AMDGPUDAGToDAGISel::Select(SDNode *N) {
708   unsigned int Opc = N->getOpcode();
709   if (N->isMachineOpcode()) {
710     N->setNodeId(-1);
711     return;   // Already selected.
712   }
713 
714   if (isa<AtomicSDNode>(N) ||
715       (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC ||
716        Opc == ISD::ATOMIC_LOAD_FADD ||
717        Opc == AMDGPUISD::ATOMIC_LOAD_FMIN ||
718        Opc == AMDGPUISD::ATOMIC_LOAD_FMAX))
719     N = glueCopyToM0LDSInit(N);
720 
721   switch (Opc) {
722   default:
723     break;
724   // We are selecting i64 ADD here instead of custom lower it during
725   // DAG legalization, so we can fold some i64 ADDs used for address
726   // calculation into the LOAD and STORE instructions.
727   case ISD::ADDC:
728   case ISD::ADDE:
729   case ISD::SUBC:
730   case ISD::SUBE: {
731     if (N->getValueType(0) != MVT::i64)
732       break;
733 
734     SelectADD_SUB_I64(N);
735     return;
736   }
737   case ISD::ADDCARRY:
738   case ISD::SUBCARRY:
739     if (N->getValueType(0) != MVT::i32)
740       break;
741 
742     SelectAddcSubb(N);
743     return;
744   case ISD::UADDO:
745   case ISD::USUBO: {
746     SelectUADDO_USUBO(N);
747     return;
748   }
749   case AMDGPUISD::FMUL_W_CHAIN: {
750     SelectFMUL_W_CHAIN(N);
751     return;
752   }
753   case AMDGPUISD::FMA_W_CHAIN: {
754     SelectFMA_W_CHAIN(N);
755     return;
756   }
757 
758   case ISD::SCALAR_TO_VECTOR:
759   case ISD::BUILD_VECTOR: {
760     EVT VT = N->getValueType(0);
761     unsigned NumVectorElts = VT.getVectorNumElements();
762     if (VT.getScalarSizeInBits() == 16) {
763       if (Opc == ISD::BUILD_VECTOR && NumVectorElts == 2) {
764         if (SDNode *Packed = packConstantV2I16(N, *CurDAG)) {
765           ReplaceNode(N, Packed);
766           return;
767         }
768       }
769 
770       break;
771     }
772 
773     assert(VT.getVectorElementType().bitsEq(MVT::i32));
774     unsigned RegClassID = selectSGPRVectorRegClassID(NumVectorElts);
775     SelectBuildVector(N, RegClassID);
776     return;
777   }
778   case ISD::BUILD_PAIR: {
779     SDValue RC, SubReg0, SubReg1;
780     SDLoc DL(N);
781     if (N->getValueType(0) == MVT::i128) {
782       RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32);
783       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32);
784       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32);
785     } else if (N->getValueType(0) == MVT::i64) {
786       RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32);
787       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
788       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
789     } else {
790       llvm_unreachable("Unhandled value type for BUILD_PAIR");
791     }
792     const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
793                             N->getOperand(1), SubReg1 };
794     ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
795                                           N->getValueType(0), Ops));
796     return;
797   }
798 
799   case ISD::Constant:
800   case ISD::ConstantFP: {
801     if (N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
802       break;
803 
804     uint64_t Imm;
805     if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
806       Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
807     else {
808       ConstantSDNode *C = cast<ConstantSDNode>(N);
809       Imm = C->getZExtValue();
810     }
811 
812     SDLoc DL(N);
813     ReplaceNode(N, buildSMovImm64(DL, Imm, N->getValueType(0)));
814     return;
815   }
816   case ISD::LOAD:
817   case ISD::STORE:
818   case ISD::ATOMIC_LOAD:
819   case ISD::ATOMIC_STORE: {
820     N = glueCopyToM0LDSInit(N);
821     break;
822   }
823 
824   case AMDGPUISD::BFE_I32:
825   case AMDGPUISD::BFE_U32: {
826     // There is a scalar version available, but unlike the vector version which
827     // has a separate operand for the offset and width, the scalar version packs
828     // the width and offset into a single operand. Try to move to the scalar
829     // version if the offsets are constant, so that we can try to keep extended
830     // loads of kernel arguments in SGPRs.
831 
832     // TODO: Technically we could try to pattern match scalar bitshifts of
833     // dynamic values, but it's probably not useful.
834     ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
835     if (!Offset)
836       break;
837 
838     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
839     if (!Width)
840       break;
841 
842     bool Signed = Opc == AMDGPUISD::BFE_I32;
843 
844     uint32_t OffsetVal = Offset->getZExtValue();
845     uint32_t WidthVal = Width->getZExtValue();
846 
847     ReplaceNode(N, getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
848                             SDLoc(N), N->getOperand(0), OffsetVal, WidthVal));
849     return;
850   }
851   case AMDGPUISD::DIV_SCALE: {
852     SelectDIV_SCALE(N);
853     return;
854   }
855   case AMDGPUISD::DIV_FMAS: {
856     SelectDIV_FMAS(N);
857     return;
858   }
859   case AMDGPUISD::MAD_I64_I32:
860   case AMDGPUISD::MAD_U64_U32: {
861     SelectMAD_64_32(N);
862     return;
863   }
864   case ISD::CopyToReg: {
865     const SITargetLowering& Lowering =
866       *static_cast<const SITargetLowering*>(getTargetLowering());
867     N = Lowering.legalizeTargetIndependentNode(N, *CurDAG);
868     break;
869   }
870   case ISD::AND:
871   case ISD::SRL:
872   case ISD::SRA:
873   case ISD::SIGN_EXTEND_INREG:
874     if (N->getValueType(0) != MVT::i32)
875       break;
876 
877     SelectS_BFE(N);
878     return;
879   case ISD::BRCOND:
880     SelectBRCOND(N);
881     return;
882   case ISD::FMAD:
883   case ISD::FMA:
884     SelectFMAD_FMA(N);
885     return;
886   case AMDGPUISD::ATOMIC_CMP_SWAP:
887     SelectATOMIC_CMP_SWAP(N);
888     return;
889   case AMDGPUISD::CVT_PKRTZ_F16_F32:
890   case AMDGPUISD::CVT_PKNORM_I16_F32:
891   case AMDGPUISD::CVT_PKNORM_U16_F32:
892   case AMDGPUISD::CVT_PK_U16_U32:
893   case AMDGPUISD::CVT_PK_I16_I32: {
894     // Hack around using a legal type if f16 is illegal.
895     if (N->getValueType(0) == MVT::i32) {
896       MVT NewVT = Opc == AMDGPUISD::CVT_PKRTZ_F16_F32 ? MVT::v2f16 : MVT::v2i16;
897       N = CurDAG->MorphNodeTo(N, N->getOpcode(), CurDAG->getVTList(NewVT),
898                               { N->getOperand(0), N->getOperand(1) });
899       SelectCode(N);
900       return;
901     }
902 
903     break;
904   }
905   case ISD::INTRINSIC_W_CHAIN: {
906     SelectINTRINSIC_W_CHAIN(N);
907     return;
908   }
909   case ISD::INTRINSIC_VOID: {
910     SelectINTRINSIC_VOID(N);
911     return;
912   }
913   }
914 
915   SelectCode(N);
916 }
917 
918 bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const {
919   const BasicBlock *BB = FuncInfo->MBB->getBasicBlock();
920   const Instruction *Term = BB->getTerminator();
921   return Term->getMetadata("amdgpu.uniform") ||
922          Term->getMetadata("structurizecfg.uniform");
923 }
924 
925 StringRef AMDGPUDAGToDAGISel::getPassName() const {
926   return "AMDGPU DAG->DAG Pattern Instruction Selection";
927 }
928 
929 //===----------------------------------------------------------------------===//
930 // Complex Patterns
931 //===----------------------------------------------------------------------===//
932 
933 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
934                                             SDValue &Offset) {
935   return false;
936 }
937 
938 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
939                                             SDValue &Offset) {
940   ConstantSDNode *C;
941   SDLoc DL(Addr);
942 
943   if ((C = dyn_cast<ConstantSDNode>(Addr))) {
944     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
945     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
946   } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
947              (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
948     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
949     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
950   } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
951             (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
952     Base = Addr.getOperand(0);
953     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
954   } else {
955     Base = Addr;
956     Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
957   }
958 
959   return true;
960 }
961 
962 // FIXME: Should only handle addcarry/subcarry
963 void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
964   SDLoc DL(N);
965   SDValue LHS = N->getOperand(0);
966   SDValue RHS = N->getOperand(1);
967 
968   unsigned Opcode = N->getOpcode();
969   bool ConsumeCarry = (Opcode == ISD::ADDE || Opcode == ISD::SUBE);
970   bool ProduceCarry =
971       ConsumeCarry || Opcode == ISD::ADDC || Opcode == ISD::SUBC;
972   bool IsAdd = Opcode == ISD::ADD || Opcode == ISD::ADDC || Opcode == ISD::ADDE;
973 
974   SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
975   SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
976 
977   SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
978                                        DL, MVT::i32, LHS, Sub0);
979   SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
980                                        DL, MVT::i32, LHS, Sub1);
981 
982   SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
983                                        DL, MVT::i32, RHS, Sub0);
984   SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
985                                        DL, MVT::i32, RHS, Sub1);
986 
987   SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
988 
989   unsigned Opc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
990   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
991 
992   SDNode *AddLo;
993   if (!ConsumeCarry) {
994     SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
995     AddLo = CurDAG->getMachineNode(Opc, DL, VTList, Args);
996   } else {
997     SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0), N->getOperand(2) };
998     AddLo = CurDAG->getMachineNode(CarryOpc, DL, VTList, Args);
999   }
1000   SDValue AddHiArgs[] = {
1001     SDValue(Hi0, 0),
1002     SDValue(Hi1, 0),
1003     SDValue(AddLo, 1)
1004   };
1005   SDNode *AddHi = CurDAG->getMachineNode(CarryOpc, DL, VTList, AddHiArgs);
1006 
1007   SDValue RegSequenceArgs[] = {
1008     CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
1009     SDValue(AddLo,0),
1010     Sub0,
1011     SDValue(AddHi,0),
1012     Sub1,
1013   };
1014   SDNode *RegSequence = CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
1015                                                MVT::i64, RegSequenceArgs);
1016 
1017   if (ProduceCarry) {
1018     // Replace the carry-use
1019     ReplaceUses(SDValue(N, 1), SDValue(AddHi, 1));
1020   }
1021 
1022   // Replace the remaining uses.
1023   ReplaceNode(N, RegSequence);
1024 }
1025 
1026 void AMDGPUDAGToDAGISel::SelectAddcSubb(SDNode *N) {
1027   SDLoc DL(N);
1028   SDValue LHS = N->getOperand(0);
1029   SDValue RHS = N->getOperand(1);
1030   SDValue CI = N->getOperand(2);
1031 
1032   unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::V_ADDC_U32_e64
1033                                                  : AMDGPU::V_SUBB_U32_e64;
1034   CurDAG->SelectNodeTo(
1035       N, Opc, N->getVTList(),
1036       {LHS, RHS, CI, CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
1037 }
1038 
1039 void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) {
1040   // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
1041   // carry out despite the _i32 name. These were renamed in VI to _U32.
1042   // FIXME: We should probably rename the opcodes here.
1043   unsigned Opc = N->getOpcode() == ISD::UADDO ?
1044     AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64;
1045 
1046   CurDAG->SelectNodeTo(
1047       N, Opc, N->getVTList(),
1048       {N->getOperand(0), N->getOperand(1),
1049        CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
1050 }
1051 
1052 void AMDGPUDAGToDAGISel::SelectFMA_W_CHAIN(SDNode *N) {
1053   SDLoc SL(N);
1054   //  src0_modifiers, src0,  src1_modifiers, src1, src2_modifiers, src2, clamp, omod
1055   SDValue Ops[10];
1056 
1057   SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[6], Ops[7]);
1058   SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
1059   SelectVOP3Mods(N->getOperand(3), Ops[5], Ops[4]);
1060   Ops[8] = N->getOperand(0);
1061   Ops[9] = N->getOperand(4);
1062 
1063   CurDAG->SelectNodeTo(N, AMDGPU::V_FMA_F32, N->getVTList(), Ops);
1064 }
1065 
1066 void AMDGPUDAGToDAGISel::SelectFMUL_W_CHAIN(SDNode *N) {
1067   SDLoc SL(N);
1068   //    src0_modifiers, src0,  src1_modifiers, src1, clamp, omod
1069   SDValue Ops[8];
1070 
1071   SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[4], Ops[5]);
1072   SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
1073   Ops[6] = N->getOperand(0);
1074   Ops[7] = N->getOperand(3);
1075 
1076   CurDAG->SelectNodeTo(N, AMDGPU::V_MUL_F32_e64, N->getVTList(), Ops);
1077 }
1078 
1079 // We need to handle this here because tablegen doesn't support matching
1080 // instructions with multiple outputs.
1081 void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
1082   SDLoc SL(N);
1083   EVT VT = N->getValueType(0);
1084 
1085   assert(VT == MVT::f32 || VT == MVT::f64);
1086 
1087   unsigned Opc
1088     = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
1089 
1090   SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2) };
1091   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1092 }
1093 
1094 void AMDGPUDAGToDAGISel::SelectDIV_FMAS(SDNode *N) {
1095   const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget);
1096   const SIRegisterInfo *TRI = ST->getRegisterInfo();
1097 
1098   SDLoc SL(N);
1099   EVT VT = N->getValueType(0);
1100 
1101   assert(VT == MVT::f32 || VT == MVT::f64);
1102 
1103   unsigned Opc
1104     = (VT == MVT::f64) ? AMDGPU::V_DIV_FMAS_F64 : AMDGPU::V_DIV_FMAS_F32;
1105 
1106   SDValue CarryIn = N->getOperand(3);
1107   // V_DIV_FMAS implicitly reads VCC.
1108   SDValue VCC = CurDAG->getCopyToReg(CurDAG->getEntryNode(), SL,
1109                                      TRI->getVCC(), CarryIn, SDValue());
1110 
1111   SDValue Ops[10];
1112 
1113   SelectVOP3Mods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
1114   SelectVOP3Mods(N->getOperand(1), Ops[3], Ops[2]);
1115   SelectVOP3Mods(N->getOperand(2), Ops[5], Ops[4]);
1116 
1117   Ops[8] = VCC;
1118   Ops[9] = VCC.getValue(1);
1119 
1120   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1121 }
1122 
1123 // We need to handle this here because tablegen doesn't support matching
1124 // instructions with multiple outputs.
1125 void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) {
1126   SDLoc SL(N);
1127   bool Signed = N->getOpcode() == AMDGPUISD::MAD_I64_I32;
1128   unsigned Opc = Signed ? AMDGPU::V_MAD_I64_I32 : AMDGPU::V_MAD_U64_U32;
1129 
1130   SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1);
1131   SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
1132                     Clamp };
1133   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1134 }
1135 
1136 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(SDValue Base, unsigned Offset,
1137                                          unsigned OffsetBits) const {
1138   if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
1139       (OffsetBits == 8 && !isUInt<8>(Offset)))
1140     return false;
1141 
1142   if (Subtarget->hasUsableDSOffset() ||
1143       Subtarget->unsafeDSOffsetFoldingEnabled())
1144     return true;
1145 
1146   // On Southern Islands instruction with a negative base value and an offset
1147   // don't seem to work.
1148   return CurDAG->SignBitIsZero(Base);
1149 }
1150 
1151 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
1152                                               SDValue &Offset) const {
1153   SDLoc DL(Addr);
1154   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1155     SDValue N0 = Addr.getOperand(0);
1156     SDValue N1 = Addr.getOperand(1);
1157     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1158     if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) {
1159       // (add n0, c0)
1160       Base = N0;
1161       Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1162       return true;
1163     }
1164   } else if (Addr.getOpcode() == ISD::SUB) {
1165     // sub C, x -> add (sub 0, x), C
1166     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1167       int64_t ByteOffset = C->getSExtValue();
1168       if (isUInt<16>(ByteOffset)) {
1169         SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1170 
1171         // XXX - This is kind of hacky. Create a dummy sub node so we can check
1172         // the known bits in isDSOffsetLegal. We need to emit the selected node
1173         // here, so this is thrown away.
1174         SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
1175                                       Zero, Addr.getOperand(1));
1176 
1177         if (isDSOffsetLegal(Sub, ByteOffset, 16)) {
1178           SmallVector<SDValue, 3> Opnds;
1179           Opnds.push_back(Zero);
1180           Opnds.push_back(Addr.getOperand(1));
1181 
1182           // FIXME: Select to VOP3 version for with-carry.
1183           unsigned SubOp = AMDGPU::V_SUB_I32_e32;
1184           if (Subtarget->hasAddNoCarry()) {
1185             SubOp = AMDGPU::V_SUB_U32_e64;
1186             Opnds.push_back(
1187                 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
1188           }
1189 
1190           MachineSDNode *MachineSub =
1191               CurDAG->getMachineNode(SubOp, DL, MVT::i32, Opnds);
1192 
1193           Base = SDValue(MachineSub, 0);
1194           Offset = CurDAG->getTargetConstant(ByteOffset, DL, MVT::i16);
1195           return true;
1196         }
1197       }
1198     }
1199   } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1200     // If we have a constant address, prefer to put the constant into the
1201     // offset. This can save moves to load the constant address since multiple
1202     // operations can share the zero base address register, and enables merging
1203     // into read2 / write2 instructions.
1204 
1205     SDLoc DL(Addr);
1206 
1207     if (isUInt<16>(CAddr->getZExtValue())) {
1208       SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1209       MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
1210                                  DL, MVT::i32, Zero);
1211       Base = SDValue(MovZero, 0);
1212       Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1213       return true;
1214     }
1215   }
1216 
1217   // default case
1218   Base = Addr;
1219   Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i16);
1220   return true;
1221 }
1222 
1223 // TODO: If offset is too big, put low 16-bit into offset.
1224 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
1225                                                    SDValue &Offset0,
1226                                                    SDValue &Offset1) const {
1227   SDLoc DL(Addr);
1228 
1229   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1230     SDValue N0 = Addr.getOperand(0);
1231     SDValue N1 = Addr.getOperand(1);
1232     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1233     unsigned DWordOffset0 = C1->getZExtValue() / 4;
1234     unsigned DWordOffset1 = DWordOffset0 + 1;
1235     // (add n0, c0)
1236     if (isDSOffsetLegal(N0, DWordOffset1, 8)) {
1237       Base = N0;
1238       Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
1239       Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
1240       return true;
1241     }
1242   } else if (Addr.getOpcode() == ISD::SUB) {
1243     // sub C, x -> add (sub 0, x), C
1244     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1245       unsigned DWordOffset0 = C->getZExtValue() / 4;
1246       unsigned DWordOffset1 = DWordOffset0 + 1;
1247 
1248       if (isUInt<8>(DWordOffset0)) {
1249         SDLoc DL(Addr);
1250         SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1251 
1252         // XXX - This is kind of hacky. Create a dummy sub node so we can check
1253         // the known bits in isDSOffsetLegal. We need to emit the selected node
1254         // here, so this is thrown away.
1255         SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
1256                                       Zero, Addr.getOperand(1));
1257 
1258         if (isDSOffsetLegal(Sub, DWordOffset1, 8)) {
1259           SmallVector<SDValue, 3> Opnds;
1260           Opnds.push_back(Zero);
1261           Opnds.push_back(Addr.getOperand(1));
1262           unsigned SubOp = AMDGPU::V_SUB_I32_e32;
1263           if (Subtarget->hasAddNoCarry()) {
1264             SubOp = AMDGPU::V_SUB_U32_e64;
1265             Opnds.push_back(
1266                 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
1267           }
1268 
1269           MachineSDNode *MachineSub
1270             = CurDAG->getMachineNode(SubOp, DL, MVT::i32, Opnds);
1271 
1272           Base = SDValue(MachineSub, 0);
1273           Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
1274           Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
1275           return true;
1276         }
1277       }
1278     }
1279   } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1280     unsigned DWordOffset0 = CAddr->getZExtValue() / 4;
1281     unsigned DWordOffset1 = DWordOffset0 + 1;
1282     assert(4 * DWordOffset0 == CAddr->getZExtValue());
1283 
1284     if (isUInt<8>(DWordOffset0) && isUInt<8>(DWordOffset1)) {
1285       SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1286       MachineSDNode *MovZero
1287         = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
1288                                  DL, MVT::i32, Zero);
1289       Base = SDValue(MovZero, 0);
1290       Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
1291       Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
1292       return true;
1293     }
1294   }
1295 
1296   // default case
1297 
1298   Base = Addr;
1299   Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
1300   Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);
1301   return true;
1302 }
1303 
1304 bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
1305                                      SDValue &VAddr, SDValue &SOffset,
1306                                      SDValue &Offset, SDValue &Offen,
1307                                      SDValue &Idxen, SDValue &Addr64,
1308                                      SDValue &GLC, SDValue &SLC,
1309                                      SDValue &TFE, SDValue &DLC) const {
1310   // Subtarget prefers to use flat instruction
1311   if (Subtarget->useFlatForGlobal())
1312     return false;
1313 
1314   SDLoc DL(Addr);
1315 
1316   if (!GLC.getNode())
1317     GLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
1318   if (!SLC.getNode())
1319     SLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
1320   TFE = CurDAG->getTargetConstant(0, DL, MVT::i1);
1321   DLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
1322 
1323   Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1324   Offen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1325   Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1);
1326   SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1327 
1328   ConstantSDNode *C1 = nullptr;
1329   SDValue N0 = Addr;
1330   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1331     C1 = cast<ConstantSDNode>(Addr.getOperand(1));
1332     if (isUInt<32>(C1->getZExtValue()))
1333       N0 = Addr.getOperand(0);
1334     else
1335       C1 = nullptr;
1336   }
1337 
1338   if (N0.getOpcode() == ISD::ADD) {
1339     // (add N2, N3) -> addr64, or
1340     // (add (add N2, N3), C1) -> addr64
1341     SDValue N2 = N0.getOperand(0);
1342     SDValue N3 = N0.getOperand(1);
1343     Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1344 
1345     if (N2->isDivergent()) {
1346       if (N3->isDivergent()) {
1347         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
1348         // addr64, and construct the resource from a 0 address.
1349         Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1350         VAddr = N0;
1351       } else {
1352         // N2 is divergent, N3 is not.
1353         Ptr = N3;
1354         VAddr = N2;
1355       }
1356     } else {
1357       // N2 is not divergent.
1358       Ptr = N2;
1359       VAddr = N3;
1360     }
1361     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1362   } else if (N0->isDivergent()) {
1363     // N0 is divergent. Use it as the addr64, and construct the resource from a
1364     // 0 address.
1365     Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1366     VAddr = N0;
1367     Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1368   } else {
1369     // N0 -> offset, or
1370     // (N0 + C1) -> offset
1371     VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
1372     Ptr = N0;
1373   }
1374 
1375   if (!C1) {
1376     // No offset.
1377     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1378     return true;
1379   }
1380 
1381   if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue())) {
1382     // Legal offset for instruction.
1383     Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1384     return true;
1385   }
1386 
1387   // Illegal offset, store it in soffset.
1388   Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1389   SOffset =
1390       SDValue(CurDAG->getMachineNode(
1391                   AMDGPU::S_MOV_B32, DL, MVT::i32,
1392                   CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)),
1393               0);
1394   return true;
1395 }
1396 
1397 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
1398                                            SDValue &VAddr, SDValue &SOffset,
1399                                            SDValue &Offset, SDValue &GLC,
1400                                            SDValue &SLC, SDValue &TFE,
1401                                            SDValue &DLC) const {
1402   SDValue Ptr, Offen, Idxen, Addr64;
1403 
1404   // addr64 bit was removed for volcanic islands.
1405   if (!Subtarget->hasAddr64())
1406     return false;
1407 
1408   if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
1409               GLC, SLC, TFE, DLC))
1410     return false;
1411 
1412   ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
1413   if (C->getSExtValue()) {
1414     SDLoc DL(Addr);
1415 
1416     const SITargetLowering& Lowering =
1417       *static_cast<const SITargetLowering*>(getTargetLowering());
1418 
1419     SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0);
1420     return true;
1421   }
1422 
1423   return false;
1424 }
1425 
1426 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
1427                                            SDValue &VAddr, SDValue &SOffset,
1428                                            SDValue &Offset,
1429                                            SDValue &SLC) const {
1430   SLC = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i1);
1431   SDValue GLC, TFE, DLC;
1432 
1433   return SelectMUBUFAddr64(Addr, SRsrc, VAddr, SOffset, Offset, GLC, SLC, TFE, DLC);
1434 }
1435 
1436 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
1437   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
1438   return PSV && PSV->isStack();
1439 }
1440 
1441 std::pair<SDValue, SDValue> AMDGPUDAGToDAGISel::foldFrameIndex(SDValue N) const {
1442   const MachineFunction &MF = CurDAG->getMachineFunction();
1443   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1444 
1445   if (auto FI = dyn_cast<FrameIndexSDNode>(N)) {
1446     SDValue TFI = CurDAG->getTargetFrameIndex(FI->getIndex(),
1447                                               FI->getValueType(0));
1448 
1449     // If we can resolve this to a frame index access, this will be relative to
1450     // either the stack or frame pointer SGPR.
1451     return std::make_pair(
1452         TFI, CurDAG->getRegister(Info->getStackPtrOffsetReg(), MVT::i32));
1453   }
1454 
1455   // If we don't know this private access is a local stack object, it needs to
1456   // be relative to the entry point's scratch wave offset register.
1457   return std::make_pair(N, CurDAG->getRegister(Info->getScratchWaveOffsetReg(),
1458                                                MVT::i32));
1459 }
1460 
1461 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(SDNode *Parent,
1462                                                  SDValue Addr, SDValue &Rsrc,
1463                                                  SDValue &VAddr, SDValue &SOffset,
1464                                                  SDValue &ImmOffset) const {
1465 
1466   SDLoc DL(Addr);
1467   MachineFunction &MF = CurDAG->getMachineFunction();
1468   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1469 
1470   Rsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1471 
1472   if (ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1473     unsigned Imm = CAddr->getZExtValue();
1474 
1475     SDValue HighBits = CurDAG->getTargetConstant(Imm & ~4095, DL, MVT::i32);
1476     MachineSDNode *MovHighBits = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
1477                                                         DL, MVT::i32, HighBits);
1478     VAddr = SDValue(MovHighBits, 0);
1479 
1480     // In a call sequence, stores to the argument stack area are relative to the
1481     // stack pointer.
1482     const MachinePointerInfo &PtrInfo = cast<MemSDNode>(Parent)->getPointerInfo();
1483     unsigned SOffsetReg = isStackPtrRelative(PtrInfo) ?
1484       Info->getStackPtrOffsetReg() : Info->getScratchWaveOffsetReg();
1485 
1486     SOffset = CurDAG->getRegister(SOffsetReg, MVT::i32);
1487     ImmOffset = CurDAG->getTargetConstant(Imm & 4095, DL, MVT::i16);
1488     return true;
1489   }
1490 
1491   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1492     // (add n0, c1)
1493 
1494     SDValue N0 = Addr.getOperand(0);
1495     SDValue N1 = Addr.getOperand(1);
1496 
1497     // Offsets in vaddr must be positive if range checking is enabled.
1498     //
1499     // The total computation of vaddr + soffset + offset must not overflow.  If
1500     // vaddr is negative, even if offset is 0 the sgpr offset add will end up
1501     // overflowing.
1502     //
1503     // Prior to gfx9, MUBUF instructions with the vaddr offset enabled would
1504     // always perform a range check. If a negative vaddr base index was used,
1505     // this would fail the range check. The overall address computation would
1506     // compute a valid address, but this doesn't happen due to the range
1507     // check. For out-of-bounds MUBUF loads, a 0 is returned.
1508     //
1509     // Therefore it should be safe to fold any VGPR offset on gfx9 into the
1510     // MUBUF vaddr, but not on older subtargets which can only do this if the
1511     // sign bit is known 0.
1512     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1513     if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue()) &&
1514         (!Subtarget->privateMemoryResourceIsRangeChecked() ||
1515          CurDAG->SignBitIsZero(N0))) {
1516       std::tie(VAddr, SOffset) = foldFrameIndex(N0);
1517       ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1518       return true;
1519     }
1520   }
1521 
1522   // (node)
1523   std::tie(VAddr, SOffset) = foldFrameIndex(Addr);
1524   ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1525   return true;
1526 }
1527 
1528 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffset(SDNode *Parent,
1529                                                   SDValue Addr,
1530                                                   SDValue &SRsrc,
1531                                                   SDValue &SOffset,
1532                                                   SDValue &Offset) const {
1533   ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr);
1534   if (!CAddr || !SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue()))
1535     return false;
1536 
1537   SDLoc DL(Addr);
1538   MachineFunction &MF = CurDAG->getMachineFunction();
1539   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1540 
1541   SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1542 
1543   const MachinePointerInfo &PtrInfo = cast<MemSDNode>(Parent)->getPointerInfo();
1544   unsigned SOffsetReg = isStackPtrRelative(PtrInfo) ?
1545     Info->getStackPtrOffsetReg() : Info->getScratchWaveOffsetReg();
1546 
1547   // FIXME: Get from MachinePointerInfo? We should only be using the frame
1548   // offset if we know this is in a call sequence.
1549   SOffset = CurDAG->getRegister(SOffsetReg, MVT::i32);
1550 
1551   Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1552   return true;
1553 }
1554 
1555 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1556                                            SDValue &SOffset, SDValue &Offset,
1557                                            SDValue &GLC, SDValue &SLC,
1558                                            SDValue &TFE, SDValue &DLC) const {
1559   SDValue Ptr, VAddr, Offen, Idxen, Addr64;
1560   const SIInstrInfo *TII =
1561     static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
1562 
1563   if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
1564               GLC, SLC, TFE, DLC))
1565     return false;
1566 
1567   if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
1568       !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
1569       !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
1570     uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
1571                     APInt::getAllOnesValue(32).getZExtValue(); // Size
1572     SDLoc DL(Addr);
1573 
1574     const SITargetLowering& Lowering =
1575       *static_cast<const SITargetLowering*>(getTargetLowering());
1576 
1577     SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0);
1578     return true;
1579   }
1580   return false;
1581 }
1582 
1583 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1584                                            SDValue &Soffset, SDValue &Offset
1585                                            ) const {
1586   SDValue GLC, SLC, TFE, DLC;
1587 
1588   return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE, DLC);
1589 }
1590 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1591                                            SDValue &Soffset, SDValue &Offset,
1592                                            SDValue &SLC) const {
1593   SDValue GLC, TFE, DLC;
1594 
1595   return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE, DLC);
1596 }
1597 
1598 template <bool IsSigned>
1599 bool AMDGPUDAGToDAGISel::SelectFlatOffset(SDNode *N,
1600                                           SDValue Addr,
1601                                           SDValue &VAddr,
1602                                           SDValue &Offset,
1603                                           SDValue &SLC) const {
1604   return static_cast<const SITargetLowering*>(getTargetLowering())->
1605     SelectFlatOffset(IsSigned, *CurDAG, N, Addr, VAddr, Offset, SLC);
1606 }
1607 
1608 bool AMDGPUDAGToDAGISel::SelectFlatAtomic(SDNode *N,
1609                                           SDValue Addr,
1610                                           SDValue &VAddr,
1611                                           SDValue &Offset,
1612                                           SDValue &SLC) const {
1613   return SelectFlatOffset<false>(N, Addr, VAddr, Offset, SLC);
1614 }
1615 
1616 bool AMDGPUDAGToDAGISel::SelectFlatAtomicSigned(SDNode *N,
1617                                           SDValue Addr,
1618                                           SDValue &VAddr,
1619                                           SDValue &Offset,
1620                                           SDValue &SLC) const {
1621   return SelectFlatOffset<true>(N, Addr, VAddr, Offset, SLC);
1622 }
1623 
1624 bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode,
1625                                           SDValue &Offset, bool &Imm) const {
1626 
1627   // FIXME: Handle non-constant offsets.
1628   ConstantSDNode *C = dyn_cast<ConstantSDNode>(ByteOffsetNode);
1629   if (!C)
1630     return false;
1631 
1632   SDLoc SL(ByteOffsetNode);
1633   GCNSubtarget::Generation Gen = Subtarget->getGeneration();
1634   int64_t ByteOffset = C->getSExtValue();
1635   int64_t EncodedOffset = AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset);
1636 
1637   if (AMDGPU::isLegalSMRDImmOffset(*Subtarget, ByteOffset)) {
1638     Offset = CurDAG->getTargetConstant(EncodedOffset, SL, MVT::i32);
1639     Imm = true;
1640     return true;
1641   }
1642 
1643   if (!isUInt<32>(EncodedOffset) || !isUInt<32>(ByteOffset))
1644     return false;
1645 
1646   if (Gen == AMDGPUSubtarget::SEA_ISLANDS && isUInt<32>(EncodedOffset)) {
1647     // 32-bit Immediates are supported on Sea Islands.
1648     Offset = CurDAG->getTargetConstant(EncodedOffset, SL, MVT::i32);
1649   } else {
1650     SDValue C32Bit = CurDAG->getTargetConstant(ByteOffset, SL, MVT::i32);
1651     Offset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32,
1652                                             C32Bit), 0);
1653   }
1654   Imm = false;
1655   return true;
1656 }
1657 
1658 SDValue AMDGPUDAGToDAGISel::Expand32BitAddress(SDValue Addr) const {
1659   if (Addr.getValueType() != MVT::i32)
1660     return Addr;
1661 
1662   // Zero-extend a 32-bit address.
1663   SDLoc SL(Addr);
1664 
1665   const MachineFunction &MF = CurDAG->getMachineFunction();
1666   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1667   unsigned AddrHiVal = Info->get32BitAddressHighBits();
1668   SDValue AddrHi = CurDAG->getTargetConstant(AddrHiVal, SL, MVT::i32);
1669 
1670   const SDValue Ops[] = {
1671     CurDAG->getTargetConstant(AMDGPU::SReg_64_XEXECRegClassID, SL, MVT::i32),
1672     Addr,
1673     CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32),
1674     SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, AddrHi),
1675             0),
1676     CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32),
1677   };
1678 
1679   return SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, SL, MVT::i64,
1680                                         Ops), 0);
1681 }
1682 
1683 bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase,
1684                                      SDValue &Offset, bool &Imm) const {
1685   SDLoc SL(Addr);
1686 
1687   // A 32-bit (address + offset) should not cause unsigned 32-bit integer
1688   // wraparound, because s_load instructions perform the addition in 64 bits.
1689   if ((Addr.getValueType() != MVT::i32 ||
1690        Addr->getFlags().hasNoUnsignedWrap()) &&
1691       CurDAG->isBaseWithConstantOffset(Addr)) {
1692     SDValue N0 = Addr.getOperand(0);
1693     SDValue N1 = Addr.getOperand(1);
1694 
1695     if (SelectSMRDOffset(N1, Offset, Imm)) {
1696       SBase = Expand32BitAddress(N0);
1697       return true;
1698     }
1699   }
1700   SBase = Expand32BitAddress(Addr);
1701   Offset = CurDAG->getTargetConstant(0, SL, MVT::i32);
1702   Imm = true;
1703   return true;
1704 }
1705 
1706 bool AMDGPUDAGToDAGISel::SelectSMRDImm(SDValue Addr, SDValue &SBase,
1707                                        SDValue &Offset) const {
1708   bool Imm;
1709   return SelectSMRD(Addr, SBase, Offset, Imm) && Imm;
1710 }
1711 
1712 bool AMDGPUDAGToDAGISel::SelectSMRDImm32(SDValue Addr, SDValue &SBase,
1713                                          SDValue &Offset) const {
1714 
1715   if (Subtarget->getGeneration() != AMDGPUSubtarget::SEA_ISLANDS)
1716     return false;
1717 
1718   bool Imm;
1719   if (!SelectSMRD(Addr, SBase, Offset, Imm))
1720     return false;
1721 
1722   return !Imm && isa<ConstantSDNode>(Offset);
1723 }
1724 
1725 bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDValue Addr, SDValue &SBase,
1726                                         SDValue &Offset) const {
1727   bool Imm;
1728   return SelectSMRD(Addr, SBase, Offset, Imm) && !Imm &&
1729          !isa<ConstantSDNode>(Offset);
1730 }
1731 
1732 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm(SDValue Addr,
1733                                              SDValue &Offset) const {
1734   bool Imm;
1735   return SelectSMRDOffset(Addr, Offset, Imm) && Imm;
1736 }
1737 
1738 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(SDValue Addr,
1739                                                SDValue &Offset) const {
1740   if (Subtarget->getGeneration() != AMDGPUSubtarget::SEA_ISLANDS)
1741     return false;
1742 
1743   bool Imm;
1744   if (!SelectSMRDOffset(Addr, Offset, Imm))
1745     return false;
1746 
1747   return !Imm && isa<ConstantSDNode>(Offset);
1748 }
1749 
1750 bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index,
1751                                             SDValue &Base,
1752                                             SDValue &Offset) const {
1753   SDLoc DL(Index);
1754 
1755   if (CurDAG->isBaseWithConstantOffset(Index)) {
1756     SDValue N0 = Index.getOperand(0);
1757     SDValue N1 = Index.getOperand(1);
1758     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1759 
1760     // (add n0, c0)
1761     // Don't peel off the offset (c0) if doing so could possibly lead
1762     // the base (n0) to be negative.
1763     if (C1->getSExtValue() <= 0 || CurDAG->SignBitIsZero(N0)) {
1764       Base = N0;
1765       Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32);
1766       return true;
1767     }
1768   }
1769 
1770   if (isa<ConstantSDNode>(Index))
1771     return false;
1772 
1773   Base = Index;
1774   Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1775   return true;
1776 }
1777 
1778 SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, const SDLoc &DL,
1779                                      SDValue Val, uint32_t Offset,
1780                                      uint32_t Width) {
1781   // Transformation function, pack the offset and width of a BFE into
1782   // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
1783   // source, bits [5:0] contain the offset and bits [22:16] the width.
1784   uint32_t PackedVal = Offset | (Width << 16);
1785   SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32);
1786 
1787   return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
1788 }
1789 
1790 void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
1791   // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c)
1792   // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c)
1793   // Predicate: 0 < b <= c < 32
1794 
1795   const SDValue &Shl = N->getOperand(0);
1796   ConstantSDNode *B = dyn_cast<ConstantSDNode>(Shl->getOperand(1));
1797   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
1798 
1799   if (B && C) {
1800     uint32_t BVal = B->getZExtValue();
1801     uint32_t CVal = C->getZExtValue();
1802 
1803     if (0 < BVal && BVal <= CVal && CVal < 32) {
1804       bool Signed = N->getOpcode() == ISD::SRA;
1805       unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1806 
1807       ReplaceNode(N, getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0), CVal - BVal,
1808                               32 - CVal));
1809       return;
1810     }
1811   }
1812   SelectCode(N);
1813 }
1814 
1815 void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
1816   switch (N->getOpcode()) {
1817   case ISD::AND:
1818     if (N->getOperand(0).getOpcode() == ISD::SRL) {
1819       // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)"
1820       // Predicate: isMask(mask)
1821       const SDValue &Srl = N->getOperand(0);
1822       ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1));
1823       ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
1824 
1825       if (Shift && Mask) {
1826         uint32_t ShiftVal = Shift->getZExtValue();
1827         uint32_t MaskVal = Mask->getZExtValue();
1828 
1829         if (isMask_32(MaskVal)) {
1830           uint32_t WidthVal = countPopulation(MaskVal);
1831 
1832           ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
1833                                   Srl.getOperand(0), ShiftVal, WidthVal));
1834           return;
1835         }
1836       }
1837     }
1838     break;
1839   case ISD::SRL:
1840     if (N->getOperand(0).getOpcode() == ISD::AND) {
1841       // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)"
1842       // Predicate: isMask(mask >> b)
1843       const SDValue &And = N->getOperand(0);
1844       ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1));
1845       ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1));
1846 
1847       if (Shift && Mask) {
1848         uint32_t ShiftVal = Shift->getZExtValue();
1849         uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal;
1850 
1851         if (isMask_32(MaskVal)) {
1852           uint32_t WidthVal = countPopulation(MaskVal);
1853 
1854           ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
1855                                   And.getOperand(0), ShiftVal, WidthVal));
1856           return;
1857         }
1858       }
1859     } else if (N->getOperand(0).getOpcode() == ISD::SHL) {
1860       SelectS_BFEFromShifts(N);
1861       return;
1862     }
1863     break;
1864   case ISD::SRA:
1865     if (N->getOperand(0).getOpcode() == ISD::SHL) {
1866       SelectS_BFEFromShifts(N);
1867       return;
1868     }
1869     break;
1870 
1871   case ISD::SIGN_EXTEND_INREG: {
1872     // sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8
1873     SDValue Src = N->getOperand(0);
1874     if (Src.getOpcode() != ISD::SRL)
1875       break;
1876 
1877     const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1));
1878     if (!Amt)
1879       break;
1880 
1881     unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
1882     ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_I32, SDLoc(N), Src.getOperand(0),
1883                             Amt->getZExtValue(), Width));
1884     return;
1885   }
1886   }
1887 
1888   SelectCode(N);
1889 }
1890 
1891 bool AMDGPUDAGToDAGISel::isCBranchSCC(const SDNode *N) const {
1892   assert(N->getOpcode() == ISD::BRCOND);
1893   if (!N->hasOneUse())
1894     return false;
1895 
1896   SDValue Cond = N->getOperand(1);
1897   if (Cond.getOpcode() == ISD::CopyToReg)
1898     Cond = Cond.getOperand(2);
1899 
1900   if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
1901     return false;
1902 
1903   MVT VT = Cond.getOperand(0).getSimpleValueType();
1904   if (VT == MVT::i32)
1905     return true;
1906 
1907   if (VT == MVT::i64) {
1908     auto ST = static_cast<const GCNSubtarget *>(Subtarget);
1909 
1910     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
1911     return (CC == ISD::SETEQ || CC == ISD::SETNE) && ST->hasScalarCompareEq64();
1912   }
1913 
1914   return false;
1915 }
1916 
1917 void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
1918   SDValue Cond = N->getOperand(1);
1919 
1920   if (Cond.isUndef()) {
1921     CurDAG->SelectNodeTo(N, AMDGPU::SI_BR_UNDEF, MVT::Other,
1922                          N->getOperand(2), N->getOperand(0));
1923     return;
1924   }
1925 
1926   const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget);
1927   const SIRegisterInfo *TRI = ST->getRegisterInfo();
1928 
1929   bool UseSCCBr = isCBranchSCC(N) && isUniformBr(N);
1930   unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ;
1931   unsigned CondReg = UseSCCBr ? (unsigned)AMDGPU::SCC : TRI->getVCC();
1932   SDLoc SL(N);
1933 
1934   if (!UseSCCBr) {
1935     // This is the case that we are selecting to S_CBRANCH_VCCNZ.  We have not
1936     // analyzed what generates the vcc value, so we do not know whether vcc
1937     // bits for disabled lanes are 0.  Thus we need to mask out bits for
1938     // disabled lanes.
1939     //
1940     // For the case that we select S_CBRANCH_SCC1 and it gets
1941     // changed to S_CBRANCH_VCCNZ in SIFixSGPRCopies, SIFixSGPRCopies calls
1942     // SIInstrInfo::moveToVALU which inserts the S_AND).
1943     //
1944     // We could add an analysis of what generates the vcc value here and omit
1945     // the S_AND when is unnecessary. But it would be better to add a separate
1946     // pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it
1947     // catches both cases.
1948     Cond = SDValue(CurDAG->getMachineNode(ST->isWave32() ? AMDGPU::S_AND_B32
1949                                                          : AMDGPU::S_AND_B64,
1950                      SL, MVT::i1,
1951                      CurDAG->getRegister(ST->isWave32() ? AMDGPU::EXEC_LO
1952                                                         : AMDGPU::EXEC,
1953                                          MVT::i1),
1954                     Cond),
1955                    0);
1956   }
1957 
1958   SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, CondReg, Cond);
1959   CurDAG->SelectNodeTo(N, BrOp, MVT::Other,
1960                        N->getOperand(2), // Basic Block
1961                        VCC.getValue(0));
1962 }
1963 
1964 void AMDGPUDAGToDAGISel::SelectFMAD_FMA(SDNode *N) {
1965   MVT VT = N->getSimpleValueType(0);
1966   bool IsFMA = N->getOpcode() == ISD::FMA;
1967   if (VT != MVT::f32 || (!Subtarget->hasMadMixInsts() &&
1968                          !Subtarget->hasFmaMixInsts()) ||
1969       ((IsFMA && Subtarget->hasMadMixInsts()) ||
1970        (!IsFMA && Subtarget->hasFmaMixInsts()))) {
1971     SelectCode(N);
1972     return;
1973   }
1974 
1975   SDValue Src0 = N->getOperand(0);
1976   SDValue Src1 = N->getOperand(1);
1977   SDValue Src2 = N->getOperand(2);
1978   unsigned Src0Mods, Src1Mods, Src2Mods;
1979 
1980   // Avoid using v_mad_mix_f32/v_fma_mix_f32 unless there is actually an operand
1981   // using the conversion from f16.
1982   bool Sel0 = SelectVOP3PMadMixModsImpl(Src0, Src0, Src0Mods);
1983   bool Sel1 = SelectVOP3PMadMixModsImpl(Src1, Src1, Src1Mods);
1984   bool Sel2 = SelectVOP3PMadMixModsImpl(Src2, Src2, Src2Mods);
1985 
1986   assert((IsFMA || !Subtarget->hasFP32Denormals()) &&
1987          "fmad selected with denormals enabled");
1988   // TODO: We can select this with f32 denormals enabled if all the sources are
1989   // converted from f16 (in which case fmad isn't legal).
1990 
1991   if (Sel0 || Sel1 || Sel2) {
1992     // For dummy operands.
1993     SDValue Zero = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
1994     SDValue Ops[] = {
1995       CurDAG->getTargetConstant(Src0Mods, SDLoc(), MVT::i32), Src0,
1996       CurDAG->getTargetConstant(Src1Mods, SDLoc(), MVT::i32), Src1,
1997       CurDAG->getTargetConstant(Src2Mods, SDLoc(), MVT::i32), Src2,
1998       CurDAG->getTargetConstant(0, SDLoc(), MVT::i1),
1999       Zero, Zero
2000     };
2001 
2002     CurDAG->SelectNodeTo(N,
2003                          IsFMA ? AMDGPU::V_FMA_MIX_F32 : AMDGPU::V_MAD_MIX_F32,
2004                          MVT::f32, Ops);
2005   } else {
2006     SelectCode(N);
2007   }
2008 }
2009 
2010 // This is here because there isn't a way to use the generated sub0_sub1 as the
2011 // subreg index to EXTRACT_SUBREG in tablegen.
2012 void AMDGPUDAGToDAGISel::SelectATOMIC_CMP_SWAP(SDNode *N) {
2013   MemSDNode *Mem = cast<MemSDNode>(N);
2014   unsigned AS = Mem->getAddressSpace();
2015   if (AS == AMDGPUAS::FLAT_ADDRESS) {
2016     SelectCode(N);
2017     return;
2018   }
2019 
2020   MVT VT = N->getSimpleValueType(0);
2021   bool Is32 = (VT == MVT::i32);
2022   SDLoc SL(N);
2023 
2024   MachineSDNode *CmpSwap = nullptr;
2025   if (Subtarget->hasAddr64()) {
2026     SDValue SRsrc, VAddr, SOffset, Offset, SLC;
2027 
2028     if (SelectMUBUFAddr64(Mem->getBasePtr(), SRsrc, VAddr, SOffset, Offset, SLC)) {
2029       unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN :
2030         AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN;
2031       SDValue CmpVal = Mem->getOperand(2);
2032 
2033       // XXX - Do we care about glue operands?
2034 
2035       SDValue Ops[] = {
2036         CmpVal, VAddr, SRsrc, SOffset, Offset, SLC, Mem->getChain()
2037       };
2038 
2039       CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
2040     }
2041   }
2042 
2043   if (!CmpSwap) {
2044     SDValue SRsrc, SOffset, Offset, SLC;
2045     if (SelectMUBUFOffset(Mem->getBasePtr(), SRsrc, SOffset, Offset, SLC)) {
2046       unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN :
2047         AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN;
2048 
2049       SDValue CmpVal = Mem->getOperand(2);
2050       SDValue Ops[] = {
2051         CmpVal, SRsrc, SOffset, Offset, SLC, Mem->getChain()
2052       };
2053 
2054       CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
2055     }
2056   }
2057 
2058   if (!CmpSwap) {
2059     SelectCode(N);
2060     return;
2061   }
2062 
2063   MachineMemOperand *MMO = Mem->getMemOperand();
2064   CurDAG->setNodeMemRefs(CmpSwap, {MMO});
2065 
2066   unsigned SubReg = Is32 ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
2067   SDValue Extract
2068     = CurDAG->getTargetExtractSubreg(SubReg, SL, VT, SDValue(CmpSwap, 0));
2069 
2070   ReplaceUses(SDValue(N, 0), Extract);
2071   ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 1));
2072   CurDAG->RemoveDeadNode(N);
2073 }
2074 
2075 void AMDGPUDAGToDAGISel::SelectDSAppendConsume(SDNode *N, unsigned IntrID) {
2076   // The address is assumed to be uniform, so if it ends up in a VGPR, it will
2077   // be copied to an SGPR with readfirstlane.
2078   unsigned Opc = IntrID == Intrinsic::amdgcn_ds_append ?
2079     AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
2080 
2081   SDValue Chain = N->getOperand(0);
2082   SDValue Ptr = N->getOperand(2);
2083   MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2084   MachineMemOperand *MMO = M->getMemOperand();
2085   bool IsGDS = M->getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
2086 
2087   SDValue Offset;
2088   if (CurDAG->isBaseWithConstantOffset(Ptr)) {
2089     SDValue PtrBase = Ptr.getOperand(0);
2090     SDValue PtrOffset = Ptr.getOperand(1);
2091 
2092     const APInt &OffsetVal = cast<ConstantSDNode>(PtrOffset)->getAPIntValue();
2093     if (isDSOffsetLegal(PtrBase, OffsetVal.getZExtValue(), 16)) {
2094       N = glueCopyToM0(N, PtrBase);
2095       Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i32);
2096     }
2097   }
2098 
2099   if (!Offset) {
2100     N = glueCopyToM0(N, Ptr);
2101     Offset = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
2102   }
2103 
2104   SDValue Ops[] = {
2105     Offset,
2106     CurDAG->getTargetConstant(IsGDS, SDLoc(), MVT::i32),
2107     Chain,
2108     N->getOperand(N->getNumOperands() - 1) // New glue
2109   };
2110 
2111   SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2112   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2113 }
2114 
2115 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
2116   switch (IntrID) {
2117   case Intrinsic::amdgcn_ds_gws_init:
2118     return AMDGPU::DS_GWS_INIT;
2119   case Intrinsic::amdgcn_ds_gws_barrier:
2120     return AMDGPU::DS_GWS_BARRIER;
2121   case Intrinsic::amdgcn_ds_gws_sema_v:
2122     return AMDGPU::DS_GWS_SEMA_V;
2123   case Intrinsic::amdgcn_ds_gws_sema_br:
2124     return AMDGPU::DS_GWS_SEMA_BR;
2125   case Intrinsic::amdgcn_ds_gws_sema_p:
2126     return AMDGPU::DS_GWS_SEMA_P;
2127   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2128     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
2129   default:
2130     llvm_unreachable("not a gws intrinsic");
2131   }
2132 }
2133 
2134 void AMDGPUDAGToDAGISel::SelectDS_GWS(SDNode *N, unsigned IntrID) {
2135   if (IntrID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
2136       !Subtarget->hasGWSSemaReleaseAll()) {
2137     // Let this error.
2138     SelectCode(N);
2139     return;
2140   }
2141 
2142   // Chain, intrinsic ID, vsrc, offset
2143   const bool HasVSrc = N->getNumOperands() == 4;
2144   assert(HasVSrc || N->getNumOperands() == 3);
2145 
2146   SDLoc SL(N);
2147   SDValue BaseOffset = N->getOperand(HasVSrc ? 3 : 2);
2148   int ImmOffset = 0;
2149   MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2150   MachineMemOperand *MMO = M->getMemOperand();
2151 
2152   // Don't worry if the offset ends up in a VGPR. Only one lane will have
2153   // effect, so SIFixSGPRCopies will validly insert readfirstlane.
2154 
2155   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
2156   // offset field) % 64. Some versions of the programming guide omit the m0
2157   // part, or claim it's from offset 0.
2158   if (ConstantSDNode *ConstOffset = dyn_cast<ConstantSDNode>(BaseOffset)) {
2159     // If we have a constant offset, try to use the default value for m0 as a
2160     // base to possibly avoid setting it up.
2161     glueCopyToM0(N, CurDAG->getTargetConstant(-1, SL, MVT::i32));
2162     ImmOffset = ConstOffset->getZExtValue() + 1;
2163   } else {
2164     if (CurDAG->isBaseWithConstantOffset(BaseOffset)) {
2165       ImmOffset = BaseOffset.getConstantOperandVal(1);
2166       BaseOffset = BaseOffset.getOperand(0);
2167     }
2168 
2169     // Prefer to do the shift in an SGPR since it should be possible to use m0
2170     // as the result directly. If it's already an SGPR, it will be eliminated
2171     // later.
2172     SDNode *SGPROffset
2173       = CurDAG->getMachineNode(AMDGPU::V_READFIRSTLANE_B32, SL, MVT::i32,
2174                                BaseOffset);
2175     // Shift to offset in m0
2176     SDNode *M0Base
2177       = CurDAG->getMachineNode(AMDGPU::S_LSHL_B32, SL, MVT::i32,
2178                                SDValue(SGPROffset, 0),
2179                                CurDAG->getTargetConstant(16, SL, MVT::i32));
2180     glueCopyToM0(N, SDValue(M0Base, 0));
2181   }
2182 
2183   SDValue V0;
2184   SDValue Chain = N->getOperand(0);
2185   SDValue Glue;
2186   if (HasVSrc) {
2187     SDValue VSrc0 = N->getOperand(2);
2188 
2189     // The manual doesn't mention this, but it seems only v0 works.
2190     V0 = CurDAG->getRegister(AMDGPU::VGPR0, MVT::i32);
2191 
2192     SDValue CopyToV0 = CurDAG->getCopyToReg(
2193       N->getOperand(0), SL, V0, VSrc0,
2194       N->getOperand(N->getNumOperands() - 1));
2195     Chain = CopyToV0;
2196     Glue = CopyToV0.getValue(1);
2197   }
2198 
2199   SDValue OffsetField = CurDAG->getTargetConstant(ImmOffset, SL, MVT::i32);
2200 
2201   // TODO: Can this just be removed from the instruction?
2202   SDValue GDS = CurDAG->getTargetConstant(1, SL, MVT::i1);
2203 
2204   const unsigned Opc = gwsIntrinToOpcode(IntrID);
2205   SmallVector<SDValue, 5> Ops;
2206   if (HasVSrc)
2207     Ops.push_back(V0);
2208   Ops.push_back(OffsetField);
2209   Ops.push_back(GDS);
2210   Ops.push_back(Chain);
2211 
2212   if (HasVSrc)
2213     Ops.push_back(Glue);
2214 
2215   SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2216   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2217 }
2218 
2219 void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) {
2220   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2221   switch (IntrID) {
2222   case Intrinsic::amdgcn_ds_append:
2223   case Intrinsic::amdgcn_ds_consume: {
2224     if (N->getValueType(0) != MVT::i32)
2225       break;
2226     SelectDSAppendConsume(N, IntrID);
2227     return;
2228   }
2229   }
2230 
2231   SelectCode(N);
2232 }
2233 
2234 void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) {
2235   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2236   switch (IntrID) {
2237   case Intrinsic::amdgcn_ds_gws_init:
2238   case Intrinsic::amdgcn_ds_gws_barrier:
2239   case Intrinsic::amdgcn_ds_gws_sema_v:
2240   case Intrinsic::amdgcn_ds_gws_sema_br:
2241   case Intrinsic::amdgcn_ds_gws_sema_p:
2242   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2243     SelectDS_GWS(N, IntrID);
2244     return;
2245   default:
2246     break;
2247   }
2248 
2249   SelectCode(N);
2250 }
2251 
2252 bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src,
2253                                             unsigned &Mods) const {
2254   Mods = 0;
2255   Src = In;
2256 
2257   if (Src.getOpcode() == ISD::FNEG) {
2258     Mods |= SISrcMods::NEG;
2259     Src = Src.getOperand(0);
2260   }
2261 
2262   if (Src.getOpcode() == ISD::FABS) {
2263     Mods |= SISrcMods::ABS;
2264     Src = Src.getOperand(0);
2265   }
2266 
2267   return true;
2268 }
2269 
2270 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
2271                                         SDValue &SrcMods) const {
2272   unsigned Mods;
2273   if (SelectVOP3ModsImpl(In, Src, Mods)) {
2274     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2275     return true;
2276   }
2277 
2278   return false;
2279 }
2280 
2281 bool AMDGPUDAGToDAGISel::SelectVOP3Mods_NNaN(SDValue In, SDValue &Src,
2282                                              SDValue &SrcMods) const {
2283   SelectVOP3Mods(In, Src, SrcMods);
2284   return isNoNanSrc(Src);
2285 }
2286 
2287 bool AMDGPUDAGToDAGISel::SelectVOP3NoMods(SDValue In, SDValue &Src) const {
2288   if (In.getOpcode() == ISD::FABS || In.getOpcode() == ISD::FNEG)
2289     return false;
2290 
2291   Src = In;
2292   return true;
2293 }
2294 
2295 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
2296                                          SDValue &SrcMods, SDValue &Clamp,
2297                                          SDValue &Omod) const {
2298   SDLoc DL(In);
2299   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2300   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2301 
2302   return SelectVOP3Mods(In, Src, SrcMods);
2303 }
2304 
2305 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src,
2306                                                    SDValue &SrcMods,
2307                                                    SDValue &Clamp,
2308                                                    SDValue &Omod) const {
2309   Clamp = Omod = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
2310   return SelectVOP3Mods(In, Src, SrcMods);
2311 }
2312 
2313 bool AMDGPUDAGToDAGISel::SelectVOP3OMods(SDValue In, SDValue &Src,
2314                                          SDValue &Clamp, SDValue &Omod) const {
2315   Src = In;
2316 
2317   SDLoc DL(In);
2318   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2319   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2320 
2321   return true;
2322 }
2323 
2324 bool AMDGPUDAGToDAGISel::SelectVOP3PMods(SDValue In, SDValue &Src,
2325                                          SDValue &SrcMods) const {
2326   unsigned Mods = 0;
2327   Src = In;
2328 
2329   if (Src.getOpcode() == ISD::FNEG) {
2330     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
2331     Src = Src.getOperand(0);
2332   }
2333 
2334   if (Src.getOpcode() == ISD::BUILD_VECTOR) {
2335     unsigned VecMods = Mods;
2336 
2337     SDValue Lo = stripBitcast(Src.getOperand(0));
2338     SDValue Hi = stripBitcast(Src.getOperand(1));
2339 
2340     if (Lo.getOpcode() == ISD::FNEG) {
2341       Lo = stripBitcast(Lo.getOperand(0));
2342       Mods ^= SISrcMods::NEG;
2343     }
2344 
2345     if (Hi.getOpcode() == ISD::FNEG) {
2346       Hi = stripBitcast(Hi.getOperand(0));
2347       Mods ^= SISrcMods::NEG_HI;
2348     }
2349 
2350     if (isExtractHiElt(Lo, Lo))
2351       Mods |= SISrcMods::OP_SEL_0;
2352 
2353     if (isExtractHiElt(Hi, Hi))
2354       Mods |= SISrcMods::OP_SEL_1;
2355 
2356     Lo = stripExtractLoElt(Lo);
2357     Hi = stripExtractLoElt(Hi);
2358 
2359     if (Lo == Hi && !isInlineImmediate(Lo.getNode())) {
2360       // Really a scalar input. Just select from the low half of the register to
2361       // avoid packing.
2362 
2363       Src = Lo;
2364       SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2365       return true;
2366     }
2367 
2368     Mods = VecMods;
2369   }
2370 
2371   // Packed instructions do not have abs modifiers.
2372   Mods |= SISrcMods::OP_SEL_1;
2373 
2374   SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2375   return true;
2376 }
2377 
2378 bool AMDGPUDAGToDAGISel::SelectVOP3PMods0(SDValue In, SDValue &Src,
2379                                           SDValue &SrcMods,
2380                                           SDValue &Clamp) const {
2381   SDLoc SL(In);
2382 
2383   // FIXME: Handle clamp and op_sel
2384   Clamp = CurDAG->getTargetConstant(0, SL, MVT::i32);
2385 
2386   return SelectVOP3PMods(In, Src, SrcMods);
2387 }
2388 
2389 bool AMDGPUDAGToDAGISel::SelectVOP3OpSel(SDValue In, SDValue &Src,
2390                                          SDValue &SrcMods) const {
2391   Src = In;
2392   // FIXME: Handle op_sel
2393   SrcMods = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
2394   return true;
2395 }
2396 
2397 bool AMDGPUDAGToDAGISel::SelectVOP3OpSel0(SDValue In, SDValue &Src,
2398                                           SDValue &SrcMods,
2399                                           SDValue &Clamp) const {
2400   SDLoc SL(In);
2401 
2402   // FIXME: Handle clamp
2403   Clamp = CurDAG->getTargetConstant(0, SL, MVT::i32);
2404 
2405   return SelectVOP3OpSel(In, Src, SrcMods);
2406 }
2407 
2408 bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods(SDValue In, SDValue &Src,
2409                                              SDValue &SrcMods) const {
2410   // FIXME: Handle op_sel
2411   return SelectVOP3Mods(In, Src, SrcMods);
2412 }
2413 
2414 bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods0(SDValue In, SDValue &Src,
2415                                               SDValue &SrcMods,
2416                                               SDValue &Clamp) const {
2417   SDLoc SL(In);
2418 
2419   // FIXME: Handle clamp
2420   Clamp = CurDAG->getTargetConstant(0, SL, MVT::i32);
2421 
2422   return SelectVOP3OpSelMods(In, Src, SrcMods);
2423 }
2424 
2425 // The return value is not whether the match is possible (which it always is),
2426 // but whether or not it a conversion is really used.
2427 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src,
2428                                                    unsigned &Mods) const {
2429   Mods = 0;
2430   SelectVOP3ModsImpl(In, Src, Mods);
2431 
2432   if (Src.getOpcode() == ISD::FP_EXTEND) {
2433     Src = Src.getOperand(0);
2434     assert(Src.getValueType() == MVT::f16);
2435     Src = stripBitcast(Src);
2436 
2437     // Be careful about folding modifiers if we already have an abs. fneg is
2438     // applied last, so we don't want to apply an earlier fneg.
2439     if ((Mods & SISrcMods::ABS) == 0) {
2440       unsigned ModsTmp;
2441       SelectVOP3ModsImpl(Src, Src, ModsTmp);
2442 
2443       if ((ModsTmp & SISrcMods::NEG) != 0)
2444         Mods ^= SISrcMods::NEG;
2445 
2446       if ((ModsTmp & SISrcMods::ABS) != 0)
2447         Mods |= SISrcMods::ABS;
2448     }
2449 
2450     // op_sel/op_sel_hi decide the source type and source.
2451     // If the source's op_sel_hi is set, it indicates to do a conversion from fp16.
2452     // If the sources's op_sel is set, it picks the high half of the source
2453     // register.
2454 
2455     Mods |= SISrcMods::OP_SEL_1;
2456     if (isExtractHiElt(Src, Src)) {
2457       Mods |= SISrcMods::OP_SEL_0;
2458 
2459       // TODO: Should we try to look for neg/abs here?
2460     }
2461 
2462     return true;
2463   }
2464 
2465   return false;
2466 }
2467 
2468 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixMods(SDValue In, SDValue &Src,
2469                                                SDValue &SrcMods) const {
2470   unsigned Mods = 0;
2471   SelectVOP3PMadMixModsImpl(In, Src, Mods);
2472   SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2473   return true;
2474 }
2475 
2476 SDValue AMDGPUDAGToDAGISel::getHi16Elt(SDValue In) const {
2477   if (In.isUndef())
2478     return CurDAG->getUNDEF(MVT::i32);
2479 
2480   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(In)) {
2481     SDLoc SL(In);
2482     return CurDAG->getConstant(C->getZExtValue() << 16, SL, MVT::i32);
2483   }
2484 
2485   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(In)) {
2486     SDLoc SL(In);
2487     return CurDAG->getConstant(
2488       C->getValueAPF().bitcastToAPInt().getZExtValue() << 16, SL, MVT::i32);
2489   }
2490 
2491   SDValue Src;
2492   if (isExtractHiElt(In, Src))
2493     return Src;
2494 
2495   return SDValue();
2496 }
2497 
2498 bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const {
2499   assert(CurDAG->getTarget().getTargetTriple().getArch() == Triple::amdgcn);
2500 
2501   const SIRegisterInfo *SIRI =
2502     static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
2503   const SIInstrInfo * SII =
2504     static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
2505 
2506   unsigned Limit = 0;
2507   bool AllUsesAcceptSReg = true;
2508   for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
2509     Limit < 10 && U != E; ++U, ++Limit) {
2510     const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
2511 
2512     // If the register class is unknown, it could be an unknown
2513     // register class that needs to be an SGPR, e.g. an inline asm
2514     // constraint
2515     if (!RC || SIRI->isSGPRClass(RC))
2516       return false;
2517 
2518     if (RC != &AMDGPU::VS_32RegClass) {
2519       AllUsesAcceptSReg = false;
2520       SDNode * User = *U;
2521       if (User->isMachineOpcode()) {
2522         unsigned Opc = User->getMachineOpcode();
2523         MCInstrDesc Desc = SII->get(Opc);
2524         if (Desc.isCommutable()) {
2525           unsigned OpIdx = Desc.getNumDefs() + U.getOperandNo();
2526           unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
2527           if (SII->findCommutedOpIndices(Desc, OpIdx, CommuteIdx1)) {
2528             unsigned CommutedOpNo = CommuteIdx1 - Desc.getNumDefs();
2529             const TargetRegisterClass *CommutedRC = getOperandRegClass(*U, CommutedOpNo);
2530             if (CommutedRC == &AMDGPU::VS_32RegClass)
2531               AllUsesAcceptSReg = true;
2532           }
2533         }
2534       }
2535       // If "AllUsesAcceptSReg == false" so far we haven't suceeded
2536       // commuting current user. This means have at least one use
2537       // that strictly require VGPR. Thus, we will not attempt to commute
2538       // other user instructions.
2539       if (!AllUsesAcceptSReg)
2540         break;
2541     }
2542   }
2543   return !AllUsesAcceptSReg && (Limit < 10);
2544 }
2545 
2546 bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode * N) const {
2547   auto Ld = cast<LoadSDNode>(N);
2548 
2549   return Ld->getAlignment() >= 4 &&
2550         (
2551           (
2552             (
2553               Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS       ||
2554               Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT
2555             )
2556             &&
2557             !N->isDivergent()
2558           )
2559           ||
2560           (
2561             Subtarget->getScalarizeGlobalBehavior() &&
2562             Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
2563             !Ld->isVolatile() &&
2564             !N->isDivergent() &&
2565             static_cast<const SITargetLowering *>(
2566               getTargetLowering())->isMemOpHasNoClobberedMemOperand(N)
2567           )
2568         );
2569 }
2570 
2571 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
2572   const AMDGPUTargetLowering& Lowering =
2573     *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
2574   bool IsModified = false;
2575   do {
2576     IsModified = false;
2577 
2578     // Go over all selected nodes and try to fold them a bit more
2579     SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_begin();
2580     while (Position != CurDAG->allnodes_end()) {
2581       SDNode *Node = &*Position++;
2582       MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(Node);
2583       if (!MachineNode)
2584         continue;
2585 
2586       SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
2587       if (ResNode != Node) {
2588         if (ResNode)
2589           ReplaceUses(Node, ResNode);
2590         IsModified = true;
2591       }
2592     }
2593     CurDAG->RemoveDeadNodes();
2594   } while (IsModified);
2595 }
2596 
2597 bool R600DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
2598   Subtarget = &MF.getSubtarget<R600Subtarget>();
2599   return SelectionDAGISel::runOnMachineFunction(MF);
2600 }
2601 
2602 bool R600DAGToDAGISel::isConstantLoad(const MemSDNode *N, int CbId) const {
2603   if (!N->readMem())
2604     return false;
2605   if (CbId == -1)
2606     return N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
2607            N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
2608 
2609   return N->getAddressSpace() == AMDGPUAS::CONSTANT_BUFFER_0 + CbId;
2610 }
2611 
2612 bool R600DAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
2613                                                          SDValue& IntPtr) {
2614   if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
2615     IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr),
2616                                        true);
2617     return true;
2618   }
2619   return false;
2620 }
2621 
2622 bool R600DAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
2623     SDValue& BaseReg, SDValue &Offset) {
2624   if (!isa<ConstantSDNode>(Addr)) {
2625     BaseReg = Addr;
2626     Offset = CurDAG->getIntPtrConstant(0, SDLoc(Addr), true);
2627     return true;
2628   }
2629   return false;
2630 }
2631 
2632 void R600DAGToDAGISel::Select(SDNode *N) {
2633   unsigned int Opc = N->getOpcode();
2634   if (N->isMachineOpcode()) {
2635     N->setNodeId(-1);
2636     return;   // Already selected.
2637   }
2638 
2639   switch (Opc) {
2640   default: break;
2641   case AMDGPUISD::BUILD_VERTICAL_VECTOR:
2642   case ISD::SCALAR_TO_VECTOR:
2643   case ISD::BUILD_VECTOR: {
2644     EVT VT = N->getValueType(0);
2645     unsigned NumVectorElts = VT.getVectorNumElements();
2646     unsigned RegClassID;
2647     // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
2648     // that adds a 128 bits reg copy when going through TwoAddressInstructions
2649     // pass. We want to avoid 128 bits copies as much as possible because they
2650     // can't be bundled by our scheduler.
2651     switch(NumVectorElts) {
2652     case 2: RegClassID = R600::R600_Reg64RegClassID; break;
2653     case 4:
2654       if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
2655         RegClassID = R600::R600_Reg128VerticalRegClassID;
2656       else
2657         RegClassID = R600::R600_Reg128RegClassID;
2658       break;
2659     default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
2660     }
2661     SelectBuildVector(N, RegClassID);
2662     return;
2663   }
2664   }
2665 
2666   SelectCode(N);
2667 }
2668 
2669 bool R600DAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
2670                                           SDValue &Offset) {
2671   ConstantSDNode *C;
2672   SDLoc DL(Addr);
2673 
2674   if ((C = dyn_cast<ConstantSDNode>(Addr))) {
2675     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
2676     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
2677   } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
2678              (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
2679     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
2680     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
2681   } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
2682             (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
2683     Base = Addr.getOperand(0);
2684     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
2685   } else {
2686     Base = Addr;
2687     Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
2688   }
2689 
2690   return true;
2691 }
2692 
2693 bool R600DAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
2694                                           SDValue &Offset) {
2695   ConstantSDNode *IMMOffset;
2696 
2697   if (Addr.getOpcode() == ISD::ADD
2698       && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
2699       && isInt<16>(IMMOffset->getZExtValue())) {
2700 
2701       Base = Addr.getOperand(0);
2702       Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
2703                                          MVT::i32);
2704       return true;
2705   // If the pointer address is constant, we can move it to the offset field.
2706   } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
2707              && isInt<16>(IMMOffset->getZExtValue())) {
2708     Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
2709                                   SDLoc(CurDAG->getEntryNode()),
2710                                   R600::ZERO, MVT::i32);
2711     Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
2712                                        MVT::i32);
2713     return true;
2714   }
2715 
2716   // Default case, no offset
2717   Base = Addr;
2718   Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
2719   return true;
2720 }
2721