1 //===-- AMDGPUISelDAGToDAG.cpp - A dag to dag inst selector for AMDGPU ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //==-----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Defines an instruction selector for the AMDGPU target.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPU.h"
15 #include "AMDGPUArgumentUsageInfo.h"
16 #include "AMDGPUISelLowering.h" // For AMDGPUISD
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPUPerfHintAnalysis.h"
19 #include "AMDGPUSubtarget.h"
20 #include "AMDGPUTargetMachine.h"
21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
22 #include "SIDefines.h"
23 #include "SIISelLowering.h"
24 #include "SIInstrInfo.h"
25 #include "SIMachineFunctionInfo.h"
26 #include "SIRegisterInfo.h"
27 #include "llvm/ADT/APInt.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/CodeGen/FunctionLoweringInfo.h"
34 #include "llvm/CodeGen/ISDOpcodes.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/SelectionDAG.h"
38 #include "llvm/CodeGen/SelectionDAGISel.h"
39 #include "llvm/CodeGen/SelectionDAGNodes.h"
40 #include "llvm/CodeGen/ValueTypes.h"
41 #include "llvm/IR/BasicBlock.h"
42 #include "llvm/InitializePasses.h"
43 #ifdef EXPENSIVE_CHECKS
44 #include "llvm/IR/Dominators.h"
45 #endif
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/MC/MCInstrDesc.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CodeGen.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/MachineValueType.h"
52 #include "llvm/Support/MathExtras.h"
53 #include <cassert>
54 #include <cstdint>
55 #include <new>
56 #include <vector>
57 
58 #define DEBUG_TYPE "isel"
59 
60 using namespace llvm;
61 
62 namespace llvm {
63 
64 class R600InstrInfo;
65 
66 } // end namespace llvm
67 
68 //===----------------------------------------------------------------------===//
69 // Instruction Selector Implementation
70 //===----------------------------------------------------------------------===//
71 
72 namespace {
73 
74 static bool isNullConstantOrUndef(SDValue V) {
75   if (V.isUndef())
76     return true;
77 
78   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
79   return Const != nullptr && Const->isNullValue();
80 }
81 
82 static bool getConstantValue(SDValue N, uint32_t &Out) {
83   // This is only used for packed vectors, where ussing 0 for undef should
84   // always be good.
85   if (N.isUndef()) {
86     Out = 0;
87     return true;
88   }
89 
90   if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
91     Out = C->getAPIntValue().getSExtValue();
92     return true;
93   }
94 
95   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N)) {
96     Out = C->getValueAPF().bitcastToAPInt().getSExtValue();
97     return true;
98   }
99 
100   return false;
101 }
102 
103 // TODO: Handle undef as zero
104 static SDNode *packConstantV2I16(const SDNode *N, SelectionDAG &DAG,
105                                  bool Negate = false) {
106   assert(N->getOpcode() == ISD::BUILD_VECTOR && N->getNumOperands() == 2);
107   uint32_t LHSVal, RHSVal;
108   if (getConstantValue(N->getOperand(0), LHSVal) &&
109       getConstantValue(N->getOperand(1), RHSVal)) {
110     SDLoc SL(N);
111     uint32_t K = Negate ?
112       (-LHSVal & 0xffff) | (-RHSVal << 16) :
113       (LHSVal & 0xffff) | (RHSVal << 16);
114     return DAG.getMachineNode(AMDGPU::S_MOV_B32, SL, N->getValueType(0),
115                               DAG.getTargetConstant(K, SL, MVT::i32));
116   }
117 
118   return nullptr;
119 }
120 
121 static SDNode *packNegConstantV2I16(const SDNode *N, SelectionDAG &DAG) {
122   return packConstantV2I16(N, DAG, true);
123 }
124 
125 /// AMDGPU specific code to select AMDGPU machine instructions for
126 /// SelectionDAG operations.
127 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
128   // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
129   // make the right decision when generating code for different targets.
130   const GCNSubtarget *Subtarget;
131 
132   // Default FP mode for the current function.
133   AMDGPU::SIModeRegisterDefaults Mode;
134 
135   bool EnableLateStructurizeCFG;
136 
137 public:
138   explicit AMDGPUDAGToDAGISel(TargetMachine *TM = nullptr,
139                               CodeGenOpt::Level OptLevel = CodeGenOpt::Default)
140     : SelectionDAGISel(*TM, OptLevel) {
141     EnableLateStructurizeCFG = AMDGPUTargetMachine::EnableLateStructurizeCFG;
142   }
143   ~AMDGPUDAGToDAGISel() override = default;
144 
145   void getAnalysisUsage(AnalysisUsage &AU) const override {
146     AU.addRequired<AMDGPUArgumentUsageInfo>();
147     AU.addRequired<LegacyDivergenceAnalysis>();
148 #ifdef EXPENSIVE_CHECKS
149     AU.addRequired<DominatorTreeWrapperPass>();
150     AU.addRequired<LoopInfoWrapperPass>();
151 #endif
152     SelectionDAGISel::getAnalysisUsage(AU);
153   }
154 
155   bool matchLoadD16FromBuildVector(SDNode *N) const;
156 
157   bool runOnMachineFunction(MachineFunction &MF) override;
158   void PreprocessISelDAG() override;
159   void Select(SDNode *N) override;
160   StringRef getPassName() const override;
161   void PostprocessISelDAG() override;
162 
163 protected:
164   void SelectBuildVector(SDNode *N, unsigned RegClassID);
165 
166 private:
167   std::pair<SDValue, SDValue> foldFrameIndex(SDValue N) const;
168   bool isNoNanSrc(SDValue N) const;
169   bool isInlineImmediate(const SDNode *N, bool Negated = false) const;
170   bool isNegInlineImmediate(const SDNode *N) const {
171     return isInlineImmediate(N, true);
172   }
173 
174   bool isInlineImmediate16(int64_t Imm) const {
175     return AMDGPU::isInlinableLiteral16(Imm, Subtarget->hasInv2PiInlineImm());
176   }
177 
178   bool isInlineImmediate32(int64_t Imm) const {
179     return AMDGPU::isInlinableLiteral32(Imm, Subtarget->hasInv2PiInlineImm());
180   }
181 
182   bool isInlineImmediate64(int64_t Imm) const {
183     return AMDGPU::isInlinableLiteral64(Imm, Subtarget->hasInv2PiInlineImm());
184   }
185 
186   bool isInlineImmediate(const APFloat &Imm) const {
187     return Subtarget->getInstrInfo()->isInlineConstant(Imm);
188   }
189 
190   bool isVGPRImm(const SDNode *N) const;
191   bool isUniformLoad(const SDNode *N) const;
192   bool isUniformBr(const SDNode *N) const;
193 
194   MachineSDNode *buildSMovImm64(SDLoc &DL, uint64_t Val, EVT VT) const;
195 
196   SDNode *glueCopyToOp(SDNode *N, SDValue NewChain, SDValue Glue) const;
197   SDNode *glueCopyToM0(SDNode *N, SDValue Val) const;
198   SDNode *glueCopyToM0LDSInit(SDNode *N) const;
199 
200   const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
201   virtual bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
202   virtual bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
203   bool isDSOffsetLegal(SDValue Base, unsigned Offset,
204                        unsigned OffsetBits) const;
205   bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const;
206   bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
207                                  SDValue &Offset1) const;
208   bool SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
209                    SDValue &SOffset, SDValue &Offset, SDValue &Offen,
210                    SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC,
211                    SDValue &TFE, SDValue &DLC, SDValue &SWZ) const;
212   bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
213                          SDValue &SOffset, SDValue &Offset, SDValue &GLC,
214                          SDValue &SLC, SDValue &TFE, SDValue &DLC,
215                          SDValue &SWZ) const;
216   bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
217                          SDValue &VAddr, SDValue &SOffset, SDValue &Offset,
218                          SDValue &SLC) const;
219   bool SelectMUBUFScratchOffen(SDNode *Parent,
220                                SDValue Addr, SDValue &RSrc, SDValue &VAddr,
221                                SDValue &SOffset, SDValue &ImmOffset) const;
222   bool SelectMUBUFScratchOffset(SDNode *Parent,
223                                 SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
224                                 SDValue &Offset) const;
225 
226   bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset,
227                          SDValue &Offset, SDValue &GLC, SDValue &SLC,
228                          SDValue &TFE, SDValue &DLC, SDValue &SWZ) const;
229   bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
230                          SDValue &Offset, SDValue &SLC) const;
231   bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
232                          SDValue &Offset) const;
233 
234   template <bool IsSigned>
235   bool SelectFlatOffset(SDNode *N, SDValue Addr, SDValue &VAddr,
236                         SDValue &Offset, SDValue &SLC) const;
237   bool SelectFlatAtomic(SDNode *N, SDValue Addr, SDValue &VAddr,
238                         SDValue &Offset, SDValue &SLC) const;
239   bool SelectFlatAtomicSigned(SDNode *N, SDValue Addr, SDValue &VAddr,
240                               SDValue &Offset, SDValue &SLC) const;
241 
242   bool SelectSMRDOffset(SDValue ByteOffsetNode, SDValue &Offset,
243                         bool &Imm) const;
244   SDValue Expand32BitAddress(SDValue Addr) const;
245   bool SelectSMRD(SDValue Addr, SDValue &SBase, SDValue &Offset,
246                   bool &Imm) const;
247   bool SelectSMRDImm(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
248   bool SelectSMRDImm32(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
249   bool SelectSMRDSgpr(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
250   bool SelectSMRDBufferImm(SDValue Addr, SDValue &Offset) const;
251   bool SelectSMRDBufferImm32(SDValue Addr, SDValue &Offset) const;
252   bool SelectMOVRELOffset(SDValue Index, SDValue &Base, SDValue &Offset) const;
253 
254   bool SelectVOP3Mods_NNaN(SDValue In, SDValue &Src, SDValue &SrcMods) const;
255   bool SelectVOP3ModsImpl(SDValue In, SDValue &Src, unsigned &SrcMods) const;
256   bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
257   bool SelectVOP3NoMods(SDValue In, SDValue &Src) const;
258   bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
259                        SDValue &Clamp, SDValue &Omod) const;
260   bool SelectVOP3NoMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
261                          SDValue &Clamp, SDValue &Omod) const;
262 
263   bool SelectVOP3OMods(SDValue In, SDValue &Src,
264                        SDValue &Clamp, SDValue &Omod) const;
265 
266   bool SelectVOP3PMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
267 
268   bool SelectVOP3OpSel(SDValue In, SDValue &Src, SDValue &SrcMods) const;
269 
270   bool SelectVOP3OpSelMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
271   bool SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src, unsigned &Mods) const;
272   bool SelectVOP3PMadMixMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
273 
274   SDValue getHi16Elt(SDValue In) const;
275 
276   SDValue getMaterializedScalarImm32(int64_t Val, const SDLoc &DL) const;
277 
278   void SelectADD_SUB_I64(SDNode *N);
279   void SelectAddcSubb(SDNode *N);
280   void SelectUADDO_USUBO(SDNode *N);
281   void SelectDIV_SCALE(SDNode *N);
282   void SelectMAD_64_32(SDNode *N);
283   void SelectFMA_W_CHAIN(SDNode *N);
284   void SelectFMUL_W_CHAIN(SDNode *N);
285 
286   SDNode *getS_BFE(unsigned Opcode, const SDLoc &DL, SDValue Val,
287                    uint32_t Offset, uint32_t Width);
288   void SelectS_BFEFromShifts(SDNode *N);
289   void SelectS_BFE(SDNode *N);
290   bool isCBranchSCC(const SDNode *N) const;
291   void SelectBRCOND(SDNode *N);
292   void SelectFMAD_FMA(SDNode *N);
293   void SelectATOMIC_CMP_SWAP(SDNode *N);
294   void SelectDSAppendConsume(SDNode *N, unsigned IntrID);
295   void SelectDS_GWS(SDNode *N, unsigned IntrID);
296   void SelectInterpP1F16(SDNode *N);
297   void SelectINTRINSIC_W_CHAIN(SDNode *N);
298   void SelectINTRINSIC_WO_CHAIN(SDNode *N);
299   void SelectINTRINSIC_VOID(SDNode *N);
300 
301 protected:
302   // Include the pieces autogenerated from the target description.
303 #include "AMDGPUGenDAGISel.inc"
304 };
305 
306 class R600DAGToDAGISel : public AMDGPUDAGToDAGISel {
307   const R600Subtarget *Subtarget;
308 
309   bool isConstantLoad(const MemSDNode *N, int cbID) const;
310   bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
311   bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
312                                        SDValue& Offset);
313 public:
314   explicit R600DAGToDAGISel(TargetMachine *TM, CodeGenOpt::Level OptLevel) :
315       AMDGPUDAGToDAGISel(TM, OptLevel) {}
316 
317   void Select(SDNode *N) override;
318 
319   bool SelectADDRIndirect(SDValue Addr, SDValue &Base,
320                           SDValue &Offset) override;
321   bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
322                           SDValue &Offset) override;
323 
324   bool runOnMachineFunction(MachineFunction &MF) override;
325 
326   void PreprocessISelDAG() override {}
327 
328 protected:
329   // Include the pieces autogenerated from the target description.
330 #include "R600GenDAGISel.inc"
331 };
332 
333 static SDValue stripBitcast(SDValue Val) {
334   return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val;
335 }
336 
337 // Figure out if this is really an extract of the high 16-bits of a dword.
338 static bool isExtractHiElt(SDValue In, SDValue &Out) {
339   In = stripBitcast(In);
340   if (In.getOpcode() != ISD::TRUNCATE)
341     return false;
342 
343   SDValue Srl = In.getOperand(0);
344   if (Srl.getOpcode() == ISD::SRL) {
345     if (ConstantSDNode *ShiftAmt = dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
346       if (ShiftAmt->getZExtValue() == 16) {
347         Out = stripBitcast(Srl.getOperand(0));
348         return true;
349       }
350     }
351   }
352 
353   return false;
354 }
355 
356 // Look through operations that obscure just looking at the low 16-bits of the
357 // same register.
358 static SDValue stripExtractLoElt(SDValue In) {
359   if (In.getOpcode() == ISD::TRUNCATE) {
360     SDValue Src = In.getOperand(0);
361     if (Src.getValueType().getSizeInBits() == 32)
362       return stripBitcast(Src);
363   }
364 
365   return In;
366 }
367 
368 }  // end anonymous namespace
369 
370 INITIALIZE_PASS_BEGIN(AMDGPUDAGToDAGISel, "amdgpu-isel",
371                       "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
372 INITIALIZE_PASS_DEPENDENCY(AMDGPUArgumentUsageInfo)
373 INITIALIZE_PASS_DEPENDENCY(AMDGPUPerfHintAnalysis)
374 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
375 #ifdef EXPENSIVE_CHECKS
376 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
377 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
378 #endif
379 INITIALIZE_PASS_END(AMDGPUDAGToDAGISel, "amdgpu-isel",
380                     "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
381 
382 /// This pass converts a legalized DAG into a AMDGPU-specific
383 // DAG, ready for instruction scheduling.
384 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine *TM,
385                                         CodeGenOpt::Level OptLevel) {
386   return new AMDGPUDAGToDAGISel(TM, OptLevel);
387 }
388 
389 /// This pass converts a legalized DAG into a R600-specific
390 // DAG, ready for instruction scheduling.
391 FunctionPass *llvm::createR600ISelDag(TargetMachine *TM,
392                                       CodeGenOpt::Level OptLevel) {
393   return new R600DAGToDAGISel(TM, OptLevel);
394 }
395 
396 bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
397 #ifdef EXPENSIVE_CHECKS
398   DominatorTree & DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
399   LoopInfo * LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
400   for (auto &L : LI->getLoopsInPreorder()) {
401     assert(L->isLCSSAForm(DT));
402   }
403 #endif
404   Subtarget = &MF.getSubtarget<GCNSubtarget>();
405   Mode = AMDGPU::SIModeRegisterDefaults(MF.getFunction());
406   return SelectionDAGISel::runOnMachineFunction(MF);
407 }
408 
409 bool AMDGPUDAGToDAGISel::matchLoadD16FromBuildVector(SDNode *N) const {
410   assert(Subtarget->d16PreservesUnusedBits());
411   MVT VT = N->getValueType(0).getSimpleVT();
412   if (VT != MVT::v2i16 && VT != MVT::v2f16)
413     return false;
414 
415   SDValue Lo = N->getOperand(0);
416   SDValue Hi = N->getOperand(1);
417 
418   LoadSDNode *LdHi = dyn_cast<LoadSDNode>(stripBitcast(Hi));
419 
420   // build_vector lo, (load ptr) -> load_d16_hi ptr, lo
421   // build_vector lo, (zextload ptr from i8) -> load_d16_hi_u8 ptr, lo
422   // build_vector lo, (sextload ptr from i8) -> load_d16_hi_i8 ptr, lo
423 
424   // Need to check for possible indirect dependencies on the other half of the
425   // vector to avoid introducing a cycle.
426   if (LdHi && Hi.hasOneUse() && !LdHi->isPredecessorOf(Lo.getNode())) {
427     SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
428 
429     SDValue TiedIn = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Lo);
430     SDValue Ops[] = {
431       LdHi->getChain(), LdHi->getBasePtr(), TiedIn
432     };
433 
434     unsigned LoadOp = AMDGPUISD::LOAD_D16_HI;
435     if (LdHi->getMemoryVT() == MVT::i8) {
436       LoadOp = LdHi->getExtensionType() == ISD::SEXTLOAD ?
437         AMDGPUISD::LOAD_D16_HI_I8 : AMDGPUISD::LOAD_D16_HI_U8;
438     } else {
439       assert(LdHi->getMemoryVT() == MVT::i16);
440     }
441 
442     SDValue NewLoadHi =
443       CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdHi), VTList,
444                                   Ops, LdHi->getMemoryVT(),
445                                   LdHi->getMemOperand());
446 
447     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadHi);
448     CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdHi, 1), NewLoadHi.getValue(1));
449     return true;
450   }
451 
452   // build_vector (load ptr), hi -> load_d16_lo ptr, hi
453   // build_vector (zextload ptr from i8), hi -> load_d16_lo_u8 ptr, hi
454   // build_vector (sextload ptr from i8), hi -> load_d16_lo_i8 ptr, hi
455   LoadSDNode *LdLo = dyn_cast<LoadSDNode>(stripBitcast(Lo));
456   if (LdLo && Lo.hasOneUse()) {
457     SDValue TiedIn = getHi16Elt(Hi);
458     if (!TiedIn || LdLo->isPredecessorOf(TiedIn.getNode()))
459       return false;
460 
461     SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
462     unsigned LoadOp = AMDGPUISD::LOAD_D16_LO;
463     if (LdLo->getMemoryVT() == MVT::i8) {
464       LoadOp = LdLo->getExtensionType() == ISD::SEXTLOAD ?
465         AMDGPUISD::LOAD_D16_LO_I8 : AMDGPUISD::LOAD_D16_LO_U8;
466     } else {
467       assert(LdLo->getMemoryVT() == MVT::i16);
468     }
469 
470     TiedIn = CurDAG->getNode(ISD::BITCAST, SDLoc(N), VT, TiedIn);
471 
472     SDValue Ops[] = {
473       LdLo->getChain(), LdLo->getBasePtr(), TiedIn
474     };
475 
476     SDValue NewLoadLo =
477       CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdLo), VTList,
478                                   Ops, LdLo->getMemoryVT(),
479                                   LdLo->getMemOperand());
480 
481     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadLo);
482     CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdLo, 1), NewLoadLo.getValue(1));
483     return true;
484   }
485 
486   return false;
487 }
488 
489 void AMDGPUDAGToDAGISel::PreprocessISelDAG() {
490   if (!Subtarget->d16PreservesUnusedBits())
491     return;
492 
493   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
494 
495   bool MadeChange = false;
496   while (Position != CurDAG->allnodes_begin()) {
497     SDNode *N = &*--Position;
498     if (N->use_empty())
499       continue;
500 
501     switch (N->getOpcode()) {
502     case ISD::BUILD_VECTOR:
503       MadeChange |= matchLoadD16FromBuildVector(N);
504       break;
505     default:
506       break;
507     }
508   }
509 
510   if (MadeChange) {
511     CurDAG->RemoveDeadNodes();
512     LLVM_DEBUG(dbgs() << "After PreProcess:\n";
513                CurDAG->dump(););
514   }
515 }
516 
517 bool AMDGPUDAGToDAGISel::isNoNanSrc(SDValue N) const {
518   if (TM.Options.NoNaNsFPMath)
519     return true;
520 
521   // TODO: Move into isKnownNeverNaN
522   if (N->getFlags().isDefined())
523     return N->getFlags().hasNoNaNs();
524 
525   return CurDAG->isKnownNeverNaN(N);
526 }
527 
528 bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N,
529                                            bool Negated) const {
530   if (N->isUndef())
531     return true;
532 
533   const SIInstrInfo *TII = Subtarget->getInstrInfo();
534   if (Negated) {
535     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
536       return TII->isInlineConstant(-C->getAPIntValue());
537 
538     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
539       return TII->isInlineConstant(-C->getValueAPF().bitcastToAPInt());
540 
541   } else {
542     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
543       return TII->isInlineConstant(C->getAPIntValue());
544 
545     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
546       return TII->isInlineConstant(C->getValueAPF().bitcastToAPInt());
547   }
548 
549   return false;
550 }
551 
552 /// Determine the register class for \p OpNo
553 /// \returns The register class of the virtual register that will be used for
554 /// the given operand number \OpNo or NULL if the register class cannot be
555 /// determined.
556 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
557                                                           unsigned OpNo) const {
558   if (!N->isMachineOpcode()) {
559     if (N->getOpcode() == ISD::CopyToReg) {
560       unsigned Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
561       if (Register::isVirtualRegister(Reg)) {
562         MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo();
563         return MRI.getRegClass(Reg);
564       }
565 
566       const SIRegisterInfo *TRI
567         = static_cast<const GCNSubtarget *>(Subtarget)->getRegisterInfo();
568       return TRI->getPhysRegClass(Reg);
569     }
570 
571     return nullptr;
572   }
573 
574   switch (N->getMachineOpcode()) {
575   default: {
576     const MCInstrDesc &Desc =
577         Subtarget->getInstrInfo()->get(N->getMachineOpcode());
578     unsigned OpIdx = Desc.getNumDefs() + OpNo;
579     if (OpIdx >= Desc.getNumOperands())
580       return nullptr;
581     int RegClass = Desc.OpInfo[OpIdx].RegClass;
582     if (RegClass == -1)
583       return nullptr;
584 
585     return Subtarget->getRegisterInfo()->getRegClass(RegClass);
586   }
587   case AMDGPU::REG_SEQUENCE: {
588     unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
589     const TargetRegisterClass *SuperRC =
590         Subtarget->getRegisterInfo()->getRegClass(RCID);
591 
592     SDValue SubRegOp = N->getOperand(OpNo + 1);
593     unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
594     return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
595                                                               SubRegIdx);
596   }
597   }
598 }
599 
600 SDNode *AMDGPUDAGToDAGISel::glueCopyToOp(SDNode *N, SDValue NewChain,
601                                          SDValue Glue) const {
602   SmallVector <SDValue, 8> Ops;
603   Ops.push_back(NewChain); // Replace the chain.
604   for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
605     Ops.push_back(N->getOperand(i));
606 
607   Ops.push_back(Glue);
608   return CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops);
609 }
610 
611 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N, SDValue Val) const {
612   const SITargetLowering& Lowering =
613     *static_cast<const SITargetLowering*>(getTargetLowering());
614 
615   assert(N->getOperand(0).getValueType() == MVT::Other && "Expected chain");
616 
617   SDValue M0 = Lowering.copyToM0(*CurDAG, N->getOperand(0), SDLoc(N), Val);
618   return glueCopyToOp(N, M0, M0.getValue(1));
619 }
620 
621 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0LDSInit(SDNode *N) const {
622   unsigned AS = cast<MemSDNode>(N)->getAddressSpace();
623   if (AS == AMDGPUAS::LOCAL_ADDRESS) {
624     if (Subtarget->ldsRequiresM0Init())
625       return glueCopyToM0(N, CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32));
626   } else if (AS == AMDGPUAS::REGION_ADDRESS) {
627     MachineFunction &MF = CurDAG->getMachineFunction();
628     unsigned Value = MF.getInfo<SIMachineFunctionInfo>()->getGDSSize();
629     return
630         glueCopyToM0(N, CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i32));
631   }
632   return N;
633 }
634 
635 MachineSDNode *AMDGPUDAGToDAGISel::buildSMovImm64(SDLoc &DL, uint64_t Imm,
636                                                   EVT VT) const {
637   SDNode *Lo = CurDAG->getMachineNode(
638       AMDGPU::S_MOV_B32, DL, MVT::i32,
639       CurDAG->getTargetConstant(Imm & 0xFFFFFFFF, DL, MVT::i32));
640   SDNode *Hi =
641       CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
642                              CurDAG->getTargetConstant(Imm >> 32, DL, MVT::i32));
643   const SDValue Ops[] = {
644       CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
645       SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
646       SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)};
647 
648   return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, VT, Ops);
649 }
650 
651 void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) {
652   EVT VT = N->getValueType(0);
653   unsigned NumVectorElts = VT.getVectorNumElements();
654   EVT EltVT = VT.getVectorElementType();
655   SDLoc DL(N);
656   SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
657 
658   if (NumVectorElts == 1) {
659     CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0),
660                          RegClass);
661     return;
662   }
663 
664   assert(NumVectorElts <= 32 && "Vectors with more than 32 elements not "
665                                   "supported yet");
666   // 32 = Max Num Vector Elements
667   // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
668   // 1 = Vector Register Class
669   SmallVector<SDValue, 32 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
670 
671   bool IsGCN = CurDAG->getSubtarget().getTargetTriple().getArch() ==
672                Triple::amdgcn;
673   RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
674   bool IsRegSeq = true;
675   unsigned NOps = N->getNumOperands();
676   for (unsigned i = 0; i < NOps; i++) {
677     // XXX: Why is this here?
678     if (isa<RegisterSDNode>(N->getOperand(i))) {
679       IsRegSeq = false;
680       break;
681     }
682     unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i)
683                          : R600RegisterInfo::getSubRegFromChannel(i);
684     RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
685     RegSeqArgs[1 + (2 * i) + 1] = CurDAG->getTargetConstant(Sub, DL, MVT::i32);
686   }
687   if (NOps != NumVectorElts) {
688     // Fill in the missing undef elements if this was a scalar_to_vector.
689     assert(N->getOpcode() == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
690     MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
691                                                    DL, EltVT);
692     for (unsigned i = NOps; i < NumVectorElts; ++i) {
693       unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i)
694                            : R600RegisterInfo::getSubRegFromChannel(i);
695       RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
696       RegSeqArgs[1 + (2 * i) + 1] =
697           CurDAG->getTargetConstant(Sub, DL, MVT::i32);
698     }
699   }
700 
701   if (!IsRegSeq)
702     SelectCode(N);
703   CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs);
704 }
705 
706 void AMDGPUDAGToDAGISel::Select(SDNode *N) {
707   unsigned int Opc = N->getOpcode();
708   if (N->isMachineOpcode()) {
709     N->setNodeId(-1);
710     return;   // Already selected.
711   }
712 
713   // isa<MemSDNode> almost works but is slightly too permissive for some DS
714   // intrinsics.
715   if (Opc == ISD::LOAD || Opc == ISD::STORE || isa<AtomicSDNode>(N) ||
716       (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC ||
717        Opc == ISD::ATOMIC_LOAD_FADD ||
718        Opc == AMDGPUISD::ATOMIC_LOAD_FMIN ||
719        Opc == AMDGPUISD::ATOMIC_LOAD_FMAX)) {
720     N = glueCopyToM0LDSInit(N);
721     SelectCode(N);
722     return;
723   }
724 
725   switch (Opc) {
726   default:
727     break;
728   // We are selecting i64 ADD here instead of custom lower it during
729   // DAG legalization, so we can fold some i64 ADDs used for address
730   // calculation into the LOAD and STORE instructions.
731   case ISD::ADDC:
732   case ISD::ADDE:
733   case ISD::SUBC:
734   case ISD::SUBE: {
735     if (N->getValueType(0) != MVT::i64)
736       break;
737 
738     SelectADD_SUB_I64(N);
739     return;
740   }
741   case ISD::ADDCARRY:
742   case ISD::SUBCARRY:
743     if (N->getValueType(0) != MVT::i32)
744       break;
745 
746     SelectAddcSubb(N);
747     return;
748   case ISD::UADDO:
749   case ISD::USUBO: {
750     SelectUADDO_USUBO(N);
751     return;
752   }
753   case AMDGPUISD::FMUL_W_CHAIN: {
754     SelectFMUL_W_CHAIN(N);
755     return;
756   }
757   case AMDGPUISD::FMA_W_CHAIN: {
758     SelectFMA_W_CHAIN(N);
759     return;
760   }
761 
762   case ISD::SCALAR_TO_VECTOR:
763   case ISD::BUILD_VECTOR: {
764     EVT VT = N->getValueType(0);
765     unsigned NumVectorElts = VT.getVectorNumElements();
766     if (VT.getScalarSizeInBits() == 16) {
767       if (Opc == ISD::BUILD_VECTOR && NumVectorElts == 2) {
768         if (SDNode *Packed = packConstantV2I16(N, *CurDAG)) {
769           ReplaceNode(N, Packed);
770           return;
771         }
772       }
773 
774       break;
775     }
776 
777     assert(VT.getVectorElementType().bitsEq(MVT::i32));
778     unsigned RegClassID =
779         SIRegisterInfo::getSGPRClassForBitWidth(NumVectorElts * 32)->getID();
780     SelectBuildVector(N, RegClassID);
781     return;
782   }
783   case ISD::BUILD_PAIR: {
784     SDValue RC, SubReg0, SubReg1;
785     SDLoc DL(N);
786     if (N->getValueType(0) == MVT::i128) {
787       RC = CurDAG->getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32);
788       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32);
789       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32);
790     } else if (N->getValueType(0) == MVT::i64) {
791       RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32);
792       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
793       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
794     } else {
795       llvm_unreachable("Unhandled value type for BUILD_PAIR");
796     }
797     const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
798                             N->getOperand(1), SubReg1 };
799     ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
800                                           N->getValueType(0), Ops));
801     return;
802   }
803 
804   case ISD::Constant:
805   case ISD::ConstantFP: {
806     if (N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
807       break;
808 
809     uint64_t Imm;
810     if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
811       Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
812     else {
813       ConstantSDNode *C = cast<ConstantSDNode>(N);
814       Imm = C->getZExtValue();
815     }
816 
817     SDLoc DL(N);
818     ReplaceNode(N, buildSMovImm64(DL, Imm, N->getValueType(0)));
819     return;
820   }
821   case AMDGPUISD::BFE_I32:
822   case AMDGPUISD::BFE_U32: {
823     // There is a scalar version available, but unlike the vector version which
824     // has a separate operand for the offset and width, the scalar version packs
825     // the width and offset into a single operand. Try to move to the scalar
826     // version if the offsets are constant, so that we can try to keep extended
827     // loads of kernel arguments in SGPRs.
828 
829     // TODO: Technically we could try to pattern match scalar bitshifts of
830     // dynamic values, but it's probably not useful.
831     ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
832     if (!Offset)
833       break;
834 
835     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
836     if (!Width)
837       break;
838 
839     bool Signed = Opc == AMDGPUISD::BFE_I32;
840 
841     uint32_t OffsetVal = Offset->getZExtValue();
842     uint32_t WidthVal = Width->getZExtValue();
843 
844     ReplaceNode(N, getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
845                             SDLoc(N), N->getOperand(0), OffsetVal, WidthVal));
846     return;
847   }
848   case AMDGPUISD::DIV_SCALE: {
849     SelectDIV_SCALE(N);
850     return;
851   }
852   case AMDGPUISD::MAD_I64_I32:
853   case AMDGPUISD::MAD_U64_U32: {
854     SelectMAD_64_32(N);
855     return;
856   }
857   case ISD::CopyToReg: {
858     const SITargetLowering& Lowering =
859       *static_cast<const SITargetLowering*>(getTargetLowering());
860     N = Lowering.legalizeTargetIndependentNode(N, *CurDAG);
861     break;
862   }
863   case ISD::AND:
864   case ISD::SRL:
865   case ISD::SRA:
866   case ISD::SIGN_EXTEND_INREG:
867     if (N->getValueType(0) != MVT::i32)
868       break;
869 
870     SelectS_BFE(N);
871     return;
872   case ISD::BRCOND:
873     SelectBRCOND(N);
874     return;
875   case ISD::FMAD:
876   case ISD::FMA:
877     SelectFMAD_FMA(N);
878     return;
879   case AMDGPUISD::ATOMIC_CMP_SWAP:
880     SelectATOMIC_CMP_SWAP(N);
881     return;
882   case AMDGPUISD::CVT_PKRTZ_F16_F32:
883   case AMDGPUISD::CVT_PKNORM_I16_F32:
884   case AMDGPUISD::CVT_PKNORM_U16_F32:
885   case AMDGPUISD::CVT_PK_U16_U32:
886   case AMDGPUISD::CVT_PK_I16_I32: {
887     // Hack around using a legal type if f16 is illegal.
888     if (N->getValueType(0) == MVT::i32) {
889       MVT NewVT = Opc == AMDGPUISD::CVT_PKRTZ_F16_F32 ? MVT::v2f16 : MVT::v2i16;
890       N = CurDAG->MorphNodeTo(N, N->getOpcode(), CurDAG->getVTList(NewVT),
891                               { N->getOperand(0), N->getOperand(1) });
892       SelectCode(N);
893       return;
894     }
895 
896     break;
897   }
898   case ISD::INTRINSIC_W_CHAIN: {
899     SelectINTRINSIC_W_CHAIN(N);
900     return;
901   }
902   case ISD::INTRINSIC_WO_CHAIN: {
903     SelectINTRINSIC_WO_CHAIN(N);
904     return;
905   }
906   case ISD::INTRINSIC_VOID: {
907     SelectINTRINSIC_VOID(N);
908     return;
909   }
910   }
911 
912   SelectCode(N);
913 }
914 
915 bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const {
916   const BasicBlock *BB = FuncInfo->MBB->getBasicBlock();
917   const Instruction *Term = BB->getTerminator();
918   return Term->getMetadata("amdgpu.uniform") ||
919          Term->getMetadata("structurizecfg.uniform");
920 }
921 
922 StringRef AMDGPUDAGToDAGISel::getPassName() const {
923   return "AMDGPU DAG->DAG Pattern Instruction Selection";
924 }
925 
926 //===----------------------------------------------------------------------===//
927 // Complex Patterns
928 //===----------------------------------------------------------------------===//
929 
930 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
931                                             SDValue &Offset) {
932   return false;
933 }
934 
935 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
936                                             SDValue &Offset) {
937   ConstantSDNode *C;
938   SDLoc DL(Addr);
939 
940   if ((C = dyn_cast<ConstantSDNode>(Addr))) {
941     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
942     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
943   } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
944              (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
945     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
946     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
947   } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
948             (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
949     Base = Addr.getOperand(0);
950     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
951   } else {
952     Base = Addr;
953     Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
954   }
955 
956   return true;
957 }
958 
959 SDValue AMDGPUDAGToDAGISel::getMaterializedScalarImm32(int64_t Val,
960                                                        const SDLoc &DL) const {
961   SDNode *Mov = CurDAG->getMachineNode(
962     AMDGPU::S_MOV_B32, DL, MVT::i32,
963     CurDAG->getTargetConstant(Val, DL, MVT::i32));
964   return SDValue(Mov, 0);
965 }
966 
967 // FIXME: Should only handle addcarry/subcarry
968 void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
969   SDLoc DL(N);
970   SDValue LHS = N->getOperand(0);
971   SDValue RHS = N->getOperand(1);
972 
973   unsigned Opcode = N->getOpcode();
974   bool ConsumeCarry = (Opcode == ISD::ADDE || Opcode == ISD::SUBE);
975   bool ProduceCarry =
976       ConsumeCarry || Opcode == ISD::ADDC || Opcode == ISD::SUBC;
977   bool IsAdd = Opcode == ISD::ADD || Opcode == ISD::ADDC || Opcode == ISD::ADDE;
978 
979   SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
980   SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
981 
982   SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
983                                        DL, MVT::i32, LHS, Sub0);
984   SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
985                                        DL, MVT::i32, LHS, Sub1);
986 
987   SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
988                                        DL, MVT::i32, RHS, Sub0);
989   SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
990                                        DL, MVT::i32, RHS, Sub1);
991 
992   SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
993 
994   static const unsigned OpcMap[2][2][2] = {
995       {{AMDGPU::S_SUB_U32, AMDGPU::S_ADD_U32},
996        {AMDGPU::V_SUB_CO_U32_e32, AMDGPU::V_ADD_CO_U32_e32}},
997       {{AMDGPU::S_SUBB_U32, AMDGPU::S_ADDC_U32},
998        {AMDGPU::V_SUBB_U32_e32, AMDGPU::V_ADDC_U32_e32}}};
999 
1000   unsigned Opc = OpcMap[0][N->isDivergent()][IsAdd];
1001   unsigned CarryOpc = OpcMap[1][N->isDivergent()][IsAdd];
1002 
1003   SDNode *AddLo;
1004   if (!ConsumeCarry) {
1005     SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
1006     AddLo = CurDAG->getMachineNode(Opc, DL, VTList, Args);
1007   } else {
1008     SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0), N->getOperand(2) };
1009     AddLo = CurDAG->getMachineNode(CarryOpc, DL, VTList, Args);
1010   }
1011   SDValue AddHiArgs[] = {
1012     SDValue(Hi0, 0),
1013     SDValue(Hi1, 0),
1014     SDValue(AddLo, 1)
1015   };
1016   SDNode *AddHi = CurDAG->getMachineNode(CarryOpc, DL, VTList, AddHiArgs);
1017 
1018   SDValue RegSequenceArgs[] = {
1019     CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
1020     SDValue(AddLo,0),
1021     Sub0,
1022     SDValue(AddHi,0),
1023     Sub1,
1024   };
1025   SDNode *RegSequence = CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
1026                                                MVT::i64, RegSequenceArgs);
1027 
1028   if (ProduceCarry) {
1029     // Replace the carry-use
1030     ReplaceUses(SDValue(N, 1), SDValue(AddHi, 1));
1031   }
1032 
1033   // Replace the remaining uses.
1034   ReplaceNode(N, RegSequence);
1035 }
1036 
1037 void AMDGPUDAGToDAGISel::SelectAddcSubb(SDNode *N) {
1038   SDLoc DL(N);
1039   SDValue LHS = N->getOperand(0);
1040   SDValue RHS = N->getOperand(1);
1041   SDValue CI = N->getOperand(2);
1042 
1043   if (N->isDivergent()) {
1044     unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::V_ADDC_U32_e64
1045                                                    : AMDGPU::V_SUBB_U32_e64;
1046     CurDAG->SelectNodeTo(
1047         N, Opc, N->getVTList(),
1048         {LHS, RHS, CI,
1049          CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
1050   } else {
1051     unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::S_ADD_CO_PSEUDO
1052                                                    : AMDGPU::S_SUB_CO_PSEUDO;
1053     CurDAG->SelectNodeTo(N, Opc, N->getVTList(), {LHS, RHS, CI});
1054   }
1055 }
1056 
1057 void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) {
1058   // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
1059   // carry out despite the _i32 name. These were renamed in VI to _U32.
1060   // FIXME: We should probably rename the opcodes here.
1061   bool IsAdd = N->getOpcode() == ISD::UADDO;
1062   bool IsVALU = N->isDivergent();
1063 
1064   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); UI != E;
1065        ++UI)
1066     if (UI.getUse().getResNo() == 1) {
1067       if ((IsAdd && (UI->getOpcode() != ISD::ADDCARRY)) ||
1068           (!IsAdd && (UI->getOpcode() != ISD::SUBCARRY))) {
1069         IsVALU = true;
1070         break;
1071       }
1072     }
1073 
1074   if (IsVALU) {
1075     unsigned Opc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
1076 
1077     CurDAG->SelectNodeTo(
1078         N, Opc, N->getVTList(),
1079         {N->getOperand(0), N->getOperand(1),
1080          CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
1081   } else {
1082     unsigned Opc = N->getOpcode() == ISD::UADDO ? AMDGPU::S_UADDO_PSEUDO
1083                                                 : AMDGPU::S_USUBO_PSEUDO;
1084 
1085     CurDAG->SelectNodeTo(N, Opc, N->getVTList(),
1086                          {N->getOperand(0), N->getOperand(1)});
1087   }
1088 }
1089 
1090 void AMDGPUDAGToDAGISel::SelectFMA_W_CHAIN(SDNode *N) {
1091   SDLoc SL(N);
1092   //  src0_modifiers, src0,  src1_modifiers, src1, src2_modifiers, src2, clamp, omod
1093   SDValue Ops[10];
1094 
1095   SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[6], Ops[7]);
1096   SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
1097   SelectVOP3Mods(N->getOperand(3), Ops[5], Ops[4]);
1098   Ops[8] = N->getOperand(0);
1099   Ops[9] = N->getOperand(4);
1100 
1101   CurDAG->SelectNodeTo(N, AMDGPU::V_FMA_F32, N->getVTList(), Ops);
1102 }
1103 
1104 void AMDGPUDAGToDAGISel::SelectFMUL_W_CHAIN(SDNode *N) {
1105   SDLoc SL(N);
1106   //    src0_modifiers, src0,  src1_modifiers, src1, clamp, omod
1107   SDValue Ops[8];
1108 
1109   SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[4], Ops[5]);
1110   SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
1111   Ops[6] = N->getOperand(0);
1112   Ops[7] = N->getOperand(3);
1113 
1114   CurDAG->SelectNodeTo(N, AMDGPU::V_MUL_F32_e64, N->getVTList(), Ops);
1115 }
1116 
1117 // We need to handle this here because tablegen doesn't support matching
1118 // instructions with multiple outputs.
1119 void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
1120   SDLoc SL(N);
1121   EVT VT = N->getValueType(0);
1122 
1123   assert(VT == MVT::f32 || VT == MVT::f64);
1124 
1125   unsigned Opc
1126     = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
1127 
1128   SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2) };
1129   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1130 }
1131 
1132 // We need to handle this here because tablegen doesn't support matching
1133 // instructions with multiple outputs.
1134 void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) {
1135   SDLoc SL(N);
1136   bool Signed = N->getOpcode() == AMDGPUISD::MAD_I64_I32;
1137   unsigned Opc = Signed ? AMDGPU::V_MAD_I64_I32 : AMDGPU::V_MAD_U64_U32;
1138 
1139   SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1);
1140   SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
1141                     Clamp };
1142   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1143 }
1144 
1145 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(SDValue Base, unsigned Offset,
1146                                          unsigned OffsetBits) const {
1147   if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
1148       (OffsetBits == 8 && !isUInt<8>(Offset)))
1149     return false;
1150 
1151   if (Subtarget->hasUsableDSOffset() ||
1152       Subtarget->unsafeDSOffsetFoldingEnabled())
1153     return true;
1154 
1155   // On Southern Islands instruction with a negative base value and an offset
1156   // don't seem to work.
1157   return CurDAG->SignBitIsZero(Base);
1158 }
1159 
1160 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
1161                                               SDValue &Offset) const {
1162   SDLoc DL(Addr);
1163   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1164     SDValue N0 = Addr.getOperand(0);
1165     SDValue N1 = Addr.getOperand(1);
1166     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1167     if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) {
1168       // (add n0, c0)
1169       Base = N0;
1170       Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1171       return true;
1172     }
1173   } else if (Addr.getOpcode() == ISD::SUB) {
1174     // sub C, x -> add (sub 0, x), C
1175     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1176       int64_t ByteOffset = C->getSExtValue();
1177       if (isUInt<16>(ByteOffset)) {
1178         SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1179 
1180         // XXX - This is kind of hacky. Create a dummy sub node so we can check
1181         // the known bits in isDSOffsetLegal. We need to emit the selected node
1182         // here, so this is thrown away.
1183         SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
1184                                       Zero, Addr.getOperand(1));
1185 
1186         if (isDSOffsetLegal(Sub, ByteOffset, 16)) {
1187           SmallVector<SDValue, 3> Opnds;
1188           Opnds.push_back(Zero);
1189           Opnds.push_back(Addr.getOperand(1));
1190 
1191           // FIXME: Select to VOP3 version for with-carry.
1192           unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32;
1193           if (Subtarget->hasAddNoCarry()) {
1194             SubOp = AMDGPU::V_SUB_U32_e64;
1195             Opnds.push_back(
1196                 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
1197           }
1198 
1199           MachineSDNode *MachineSub =
1200               CurDAG->getMachineNode(SubOp, DL, MVT::i32, Opnds);
1201 
1202           Base = SDValue(MachineSub, 0);
1203           Offset = CurDAG->getTargetConstant(ByteOffset, DL, MVT::i16);
1204           return true;
1205         }
1206       }
1207     }
1208   } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1209     // If we have a constant address, prefer to put the constant into the
1210     // offset. This can save moves to load the constant address since multiple
1211     // operations can share the zero base address register, and enables merging
1212     // into read2 / write2 instructions.
1213 
1214     SDLoc DL(Addr);
1215 
1216     if (isUInt<16>(CAddr->getZExtValue())) {
1217       SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1218       MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
1219                                  DL, MVT::i32, Zero);
1220       Base = SDValue(MovZero, 0);
1221       Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1222       return true;
1223     }
1224   }
1225 
1226   // default case
1227   Base = Addr;
1228   Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i16);
1229   return true;
1230 }
1231 
1232 // TODO: If offset is too big, put low 16-bit into offset.
1233 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
1234                                                    SDValue &Offset0,
1235                                                    SDValue &Offset1) const {
1236   SDLoc DL(Addr);
1237 
1238   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1239     SDValue N0 = Addr.getOperand(0);
1240     SDValue N1 = Addr.getOperand(1);
1241     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1242     unsigned DWordOffset0 = C1->getZExtValue() / 4;
1243     unsigned DWordOffset1 = DWordOffset0 + 1;
1244     // (add n0, c0)
1245     if (isDSOffsetLegal(N0, DWordOffset1, 8)) {
1246       Base = N0;
1247       Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
1248       Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
1249       return true;
1250     }
1251   } else if (Addr.getOpcode() == ISD::SUB) {
1252     // sub C, x -> add (sub 0, x), C
1253     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1254       unsigned DWordOffset0 = C->getZExtValue() / 4;
1255       unsigned DWordOffset1 = DWordOffset0 + 1;
1256 
1257       if (isUInt<8>(DWordOffset0)) {
1258         SDLoc DL(Addr);
1259         SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1260 
1261         // XXX - This is kind of hacky. Create a dummy sub node so we can check
1262         // the known bits in isDSOffsetLegal. We need to emit the selected node
1263         // here, so this is thrown away.
1264         SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
1265                                       Zero, Addr.getOperand(1));
1266 
1267         if (isDSOffsetLegal(Sub, DWordOffset1, 8)) {
1268           SmallVector<SDValue, 3> Opnds;
1269           Opnds.push_back(Zero);
1270           Opnds.push_back(Addr.getOperand(1));
1271           unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32;
1272           if (Subtarget->hasAddNoCarry()) {
1273             SubOp = AMDGPU::V_SUB_U32_e64;
1274             Opnds.push_back(
1275                 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
1276           }
1277 
1278           MachineSDNode *MachineSub
1279             = CurDAG->getMachineNode(SubOp, DL, MVT::i32, Opnds);
1280 
1281           Base = SDValue(MachineSub, 0);
1282           Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
1283           Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
1284           return true;
1285         }
1286       }
1287     }
1288   } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1289     unsigned DWordOffset0 = CAddr->getZExtValue() / 4;
1290     unsigned DWordOffset1 = DWordOffset0 + 1;
1291     assert(4 * DWordOffset0 == CAddr->getZExtValue());
1292 
1293     if (isUInt<8>(DWordOffset0) && isUInt<8>(DWordOffset1)) {
1294       SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1295       MachineSDNode *MovZero
1296         = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
1297                                  DL, MVT::i32, Zero);
1298       Base = SDValue(MovZero, 0);
1299       Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
1300       Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
1301       return true;
1302     }
1303   }
1304 
1305   // default case
1306 
1307   Base = Addr;
1308   Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
1309   Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);
1310   return true;
1311 }
1312 
1313 bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
1314                                      SDValue &VAddr, SDValue &SOffset,
1315                                      SDValue &Offset, SDValue &Offen,
1316                                      SDValue &Idxen, SDValue &Addr64,
1317                                      SDValue &GLC, SDValue &SLC,
1318                                      SDValue &TFE, SDValue &DLC,
1319                                      SDValue &SWZ) const {
1320   // Subtarget prefers to use flat instruction
1321   // FIXME: This should be a pattern predicate and not reach here
1322   if (Subtarget->useFlatForGlobal())
1323     return false;
1324 
1325   SDLoc DL(Addr);
1326 
1327   if (!GLC.getNode())
1328     GLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
1329   if (!SLC.getNode())
1330     SLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
1331   TFE = CurDAG->getTargetConstant(0, DL, MVT::i1);
1332   DLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
1333   SWZ = CurDAG->getTargetConstant(0, DL, MVT::i1);
1334 
1335   Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1336   Offen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1337   Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1);
1338   SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1339 
1340   ConstantSDNode *C1 = nullptr;
1341   SDValue N0 = Addr;
1342   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1343     C1 = cast<ConstantSDNode>(Addr.getOperand(1));
1344     if (isUInt<32>(C1->getZExtValue()))
1345       N0 = Addr.getOperand(0);
1346     else
1347       C1 = nullptr;
1348   }
1349 
1350   if (N0.getOpcode() == ISD::ADD) {
1351     // (add N2, N3) -> addr64, or
1352     // (add (add N2, N3), C1) -> addr64
1353     SDValue N2 = N0.getOperand(0);
1354     SDValue N3 = N0.getOperand(1);
1355     Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1356 
1357     if (N2->isDivergent()) {
1358       if (N3->isDivergent()) {
1359         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
1360         // addr64, and construct the resource from a 0 address.
1361         Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1362         VAddr = N0;
1363       } else {
1364         // N2 is divergent, N3 is not.
1365         Ptr = N3;
1366         VAddr = N2;
1367       }
1368     } else {
1369       // N2 is not divergent.
1370       Ptr = N2;
1371       VAddr = N3;
1372     }
1373     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1374   } else if (N0->isDivergent()) {
1375     // N0 is divergent. Use it as the addr64, and construct the resource from a
1376     // 0 address.
1377     Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1378     VAddr = N0;
1379     Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1380   } else {
1381     // N0 -> offset, or
1382     // (N0 + C1) -> offset
1383     VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
1384     Ptr = N0;
1385   }
1386 
1387   if (!C1) {
1388     // No offset.
1389     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1390     return true;
1391   }
1392 
1393   if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue())) {
1394     // Legal offset for instruction.
1395     Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1396     return true;
1397   }
1398 
1399   // Illegal offset, store it in soffset.
1400   Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1401   SOffset =
1402       SDValue(CurDAG->getMachineNode(
1403                   AMDGPU::S_MOV_B32, DL, MVT::i32,
1404                   CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)),
1405               0);
1406   return true;
1407 }
1408 
1409 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
1410                                            SDValue &VAddr, SDValue &SOffset,
1411                                            SDValue &Offset, SDValue &GLC,
1412                                            SDValue &SLC, SDValue &TFE,
1413                                            SDValue &DLC, SDValue &SWZ) const {
1414   SDValue Ptr, Offen, Idxen, Addr64;
1415 
1416   // addr64 bit was removed for volcanic islands.
1417   // FIXME: This should be a pattern predicate and not reach here
1418   if (!Subtarget->hasAddr64())
1419     return false;
1420 
1421   if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
1422               GLC, SLC, TFE, DLC, SWZ))
1423     return false;
1424 
1425   ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
1426   if (C->getSExtValue()) {
1427     SDLoc DL(Addr);
1428 
1429     const SITargetLowering& Lowering =
1430       *static_cast<const SITargetLowering*>(getTargetLowering());
1431 
1432     SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0);
1433     return true;
1434   }
1435 
1436   return false;
1437 }
1438 
1439 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
1440                                            SDValue &VAddr, SDValue &SOffset,
1441                                            SDValue &Offset,
1442                                            SDValue &SLC) const {
1443   SLC = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i1);
1444   SDValue GLC, TFE, DLC, SWZ;
1445 
1446   return SelectMUBUFAddr64(Addr, SRsrc, VAddr, SOffset, Offset, GLC, SLC, TFE, DLC, SWZ);
1447 }
1448 
1449 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
1450   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
1451   return PSV && PSV->isStack();
1452 }
1453 
1454 std::pair<SDValue, SDValue> AMDGPUDAGToDAGISel::foldFrameIndex(SDValue N) const {
1455   SDLoc DL(N);
1456   const MachineFunction &MF = CurDAG->getMachineFunction();
1457   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1458 
1459   if (auto FI = dyn_cast<FrameIndexSDNode>(N)) {
1460     SDValue TFI = CurDAG->getTargetFrameIndex(FI->getIndex(),
1461                                               FI->getValueType(0));
1462 
1463     // If we can resolve this to a frame index access, this will be relative to
1464     // either the stack or frame pointer SGPR.
1465     return std::make_pair(
1466         TFI, CurDAG->getRegister(Info->getStackPtrOffsetReg(), MVT::i32));
1467   }
1468 
1469   // If we don't know this private access is a local stack object, it needs to
1470   // be relative to the entry point's scratch wave offset.
1471   return std::make_pair(N, CurDAG->getTargetConstant(0, DL, MVT::i32));
1472 }
1473 
1474 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(SDNode *Parent,
1475                                                  SDValue Addr, SDValue &Rsrc,
1476                                                  SDValue &VAddr, SDValue &SOffset,
1477                                                  SDValue &ImmOffset) const {
1478 
1479   SDLoc DL(Addr);
1480   MachineFunction &MF = CurDAG->getMachineFunction();
1481   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1482 
1483   Rsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1484 
1485   if (ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1486     int64_t Imm = CAddr->getSExtValue();
1487     const int64_t NullPtr =
1488         AMDGPUTargetMachine::getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS);
1489     // Don't fold null pointer.
1490     if (Imm != NullPtr) {
1491       SDValue HighBits = CurDAG->getTargetConstant(Imm & ~4095, DL, MVT::i32);
1492       MachineSDNode *MovHighBits = CurDAG->getMachineNode(
1493         AMDGPU::V_MOV_B32_e32, DL, MVT::i32, HighBits);
1494       VAddr = SDValue(MovHighBits, 0);
1495 
1496       // In a call sequence, stores to the argument stack area are relative to the
1497       // stack pointer.
1498       const MachinePointerInfo &PtrInfo
1499         = cast<MemSDNode>(Parent)->getPointerInfo();
1500       SOffset = isStackPtrRelative(PtrInfo)
1501         ? CurDAG->getRegister(Info->getStackPtrOffsetReg(), MVT::i32)
1502         : CurDAG->getTargetConstant(0, DL, MVT::i32);
1503       ImmOffset = CurDAG->getTargetConstant(Imm & 4095, DL, MVT::i16);
1504       return true;
1505     }
1506   }
1507 
1508   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1509     // (add n0, c1)
1510 
1511     SDValue N0 = Addr.getOperand(0);
1512     SDValue N1 = Addr.getOperand(1);
1513 
1514     // Offsets in vaddr must be positive if range checking is enabled.
1515     //
1516     // The total computation of vaddr + soffset + offset must not overflow.  If
1517     // vaddr is negative, even if offset is 0 the sgpr offset add will end up
1518     // overflowing.
1519     //
1520     // Prior to gfx9, MUBUF instructions with the vaddr offset enabled would
1521     // always perform a range check. If a negative vaddr base index was used,
1522     // this would fail the range check. The overall address computation would
1523     // compute a valid address, but this doesn't happen due to the range
1524     // check. For out-of-bounds MUBUF loads, a 0 is returned.
1525     //
1526     // Therefore it should be safe to fold any VGPR offset on gfx9 into the
1527     // MUBUF vaddr, but not on older subtargets which can only do this if the
1528     // sign bit is known 0.
1529     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1530     if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue()) &&
1531         (!Subtarget->privateMemoryResourceIsRangeChecked() ||
1532          CurDAG->SignBitIsZero(N0))) {
1533       std::tie(VAddr, SOffset) = foldFrameIndex(N0);
1534       ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1535       return true;
1536     }
1537   }
1538 
1539   // (node)
1540   std::tie(VAddr, SOffset) = foldFrameIndex(Addr);
1541   ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1542   return true;
1543 }
1544 
1545 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffset(SDNode *Parent,
1546                                                   SDValue Addr,
1547                                                   SDValue &SRsrc,
1548                                                   SDValue &SOffset,
1549                                                   SDValue &Offset) const {
1550   ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr);
1551   if (!CAddr || !SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue()))
1552     return false;
1553 
1554   SDLoc DL(Addr);
1555   MachineFunction &MF = CurDAG->getMachineFunction();
1556   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1557 
1558   SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1559 
1560   const MachinePointerInfo &PtrInfo = cast<MemSDNode>(Parent)->getPointerInfo();
1561 
1562   // FIXME: Get from MachinePointerInfo? We should only be using the frame
1563   // offset if we know this is in a call sequence.
1564   SOffset = isStackPtrRelative(PtrInfo)
1565                 ? CurDAG->getRegister(Info->getStackPtrOffsetReg(), MVT::i32)
1566                 : CurDAG->getTargetConstant(0, DL, MVT::i32);
1567 
1568   Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1569   return true;
1570 }
1571 
1572 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1573                                            SDValue &SOffset, SDValue &Offset,
1574                                            SDValue &GLC, SDValue &SLC,
1575                                            SDValue &TFE, SDValue &DLC,
1576                                            SDValue &SWZ) const {
1577   SDValue Ptr, VAddr, Offen, Idxen, Addr64;
1578   const SIInstrInfo *TII =
1579     static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
1580 
1581   if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
1582               GLC, SLC, TFE, DLC, SWZ))
1583     return false;
1584 
1585   if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
1586       !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
1587       !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
1588     uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
1589                     APInt::getAllOnesValue(32).getZExtValue(); // Size
1590     SDLoc DL(Addr);
1591 
1592     const SITargetLowering& Lowering =
1593       *static_cast<const SITargetLowering*>(getTargetLowering());
1594 
1595     SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0);
1596     return true;
1597   }
1598   return false;
1599 }
1600 
1601 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1602                                            SDValue &Soffset, SDValue &Offset
1603                                            ) const {
1604   SDValue GLC, SLC, TFE, DLC, SWZ;
1605 
1606   return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE, DLC, SWZ);
1607 }
1608 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1609                                            SDValue &Soffset, SDValue &Offset,
1610                                            SDValue &SLC) const {
1611   SDValue GLC, TFE, DLC, SWZ;
1612 
1613   return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE, DLC, SWZ);
1614 }
1615 
1616 // Find a load or store from corresponding pattern root.
1617 // Roots may be build_vector, bitconvert or their combinations.
1618 static MemSDNode* findMemSDNode(SDNode *N) {
1619   N = AMDGPUTargetLowering::stripBitcast(SDValue(N,0)).getNode();
1620   if (MemSDNode *MN = dyn_cast<MemSDNode>(N))
1621     return MN;
1622   assert(isa<BuildVectorSDNode>(N));
1623   for (SDValue V : N->op_values())
1624     if (MemSDNode *MN =
1625           dyn_cast<MemSDNode>(AMDGPUTargetLowering::stripBitcast(V)))
1626       return MN;
1627   llvm_unreachable("cannot find MemSDNode in the pattern!");
1628 }
1629 
1630 static bool getBaseWithOffsetUsingSplitOR(SelectionDAG &DAG, SDValue Addr,
1631                                           SDValue &N0, SDValue &N1) {
1632   if (Addr.getValueType() == MVT::i64 && Addr.getOpcode() == ISD::BITCAST &&
1633       Addr.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
1634     // As we split 64-bit `or` earlier, it's complicated pattern to match, i.e.
1635     // (i64 (bitcast (v2i32 (build_vector
1636     //                        (or (extract_vector_elt V, 0), OFFSET),
1637     //                        (extract_vector_elt V, 1)))))
1638     SDValue Lo = Addr.getOperand(0).getOperand(0);
1639     if (Lo.getOpcode() == ISD::OR && DAG.isBaseWithConstantOffset(Lo)) {
1640       SDValue BaseLo = Lo.getOperand(0);
1641       SDValue BaseHi = Addr.getOperand(0).getOperand(1);
1642       // Check that split base (Lo and Hi) are extracted from the same one.
1643       if (BaseLo.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
1644           BaseHi.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
1645           BaseLo.getOperand(0) == BaseHi.getOperand(0) &&
1646           // Lo is statically extracted from index 0.
1647           isa<ConstantSDNode>(BaseLo.getOperand(1)) &&
1648           BaseLo.getConstantOperandVal(1) == 0 &&
1649           // Hi is statically extracted from index 0.
1650           isa<ConstantSDNode>(BaseHi.getOperand(1)) &&
1651           BaseHi.getConstantOperandVal(1) == 1) {
1652         N0 = BaseLo.getOperand(0).getOperand(0);
1653         N1 = Lo.getOperand(1);
1654         return true;
1655       }
1656     }
1657   }
1658   return false;
1659 }
1660 
1661 template <bool IsSigned>
1662 bool AMDGPUDAGToDAGISel::SelectFlatOffset(SDNode *N,
1663                                           SDValue Addr,
1664                                           SDValue &VAddr,
1665                                           SDValue &Offset,
1666                                           SDValue &SLC) const {
1667   int64_t OffsetVal = 0;
1668 
1669   if (Subtarget->hasFlatInstOffsets() &&
1670       (!Subtarget->hasFlatSegmentOffsetBug() ||
1671        findMemSDNode(N)->getAddressSpace() != AMDGPUAS::FLAT_ADDRESS)) {
1672     SDValue N0, N1;
1673     if (CurDAG->isBaseWithConstantOffset(Addr)) {
1674       N0 = Addr.getOperand(0);
1675       N1 = Addr.getOperand(1);
1676     } else if (getBaseWithOffsetUsingSplitOR(*CurDAG, Addr, N0, N1)) {
1677       assert(N0 && N1 && isa<ConstantSDNode>(N1));
1678     }
1679     if (N0 && N1) {
1680       uint64_t COffsetVal = cast<ConstantSDNode>(N1)->getSExtValue();
1681 
1682       const SIInstrInfo *TII = Subtarget->getInstrInfo();
1683       unsigned AS = findMemSDNode(N)->getAddressSpace();
1684       if (TII->isLegalFLATOffset(COffsetVal, AS, IsSigned)) {
1685         Addr = N0;
1686         OffsetVal = COffsetVal;
1687       } else {
1688         // If the offset doesn't fit, put the low bits into the offset field and
1689         // add the rest.
1690         //
1691         // For a FLAT instruction the hardware decides whether to access
1692         // global/scratch/shared memory based on the high bits of vaddr,
1693         // ignoring the offset field, so we have to ensure that when we add
1694         // remainder to vaddr it still points into the same underlying object.
1695         // The easiest way to do that is to make sure that we split the offset
1696         // into two pieces that are both >= 0 or both <= 0.
1697 
1698         SDLoc DL(N);
1699         uint64_t RemainderOffset = COffsetVal;
1700         uint64_t ImmField = 0;
1701         const unsigned NumBits = TII->getNumFlatOffsetBits(AS, IsSigned);
1702         if (IsSigned) {
1703           // Use signed division by a power of two to truncate towards 0.
1704           int64_t D = 1LL << (NumBits - 1);
1705           RemainderOffset = (static_cast<int64_t>(COffsetVal) / D) * D;
1706           ImmField = COffsetVal - RemainderOffset;
1707         } else if (static_cast<int64_t>(COffsetVal) >= 0) {
1708           ImmField = COffsetVal & maskTrailingOnes<uint64_t>(NumBits);
1709           RemainderOffset = COffsetVal - ImmField;
1710         }
1711         assert(TII->isLegalFLATOffset(ImmField, AS, IsSigned));
1712         assert(RemainderOffset + ImmField == COffsetVal);
1713 
1714         OffsetVal = ImmField;
1715 
1716         // TODO: Should this try to use a scalar add pseudo if the base address
1717         // is uniform and saddr is usable?
1718         SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
1719         SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
1720 
1721         SDNode *N0Lo = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
1722                                               MVT::i32, N0, Sub0);
1723         SDNode *N0Hi = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
1724                                               MVT::i32, N0, Sub1);
1725 
1726         SDValue AddOffsetLo =
1727             getMaterializedScalarImm32(Lo_32(RemainderOffset), DL);
1728         SDValue AddOffsetHi =
1729             getMaterializedScalarImm32(Hi_32(RemainderOffset), DL);
1730 
1731         SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i1);
1732         SDValue Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
1733 
1734         SDNode *Add =
1735             CurDAG->getMachineNode(AMDGPU::V_ADD_CO_U32_e64, DL, VTs,
1736                                    {AddOffsetLo, SDValue(N0Lo, 0), Clamp});
1737 
1738         SDNode *Addc = CurDAG->getMachineNode(
1739             AMDGPU::V_ADDC_U32_e64, DL, VTs,
1740             {AddOffsetHi, SDValue(N0Hi, 0), SDValue(Add, 1), Clamp});
1741 
1742         SDValue RegSequenceArgs[] = {
1743             CurDAG->getTargetConstant(AMDGPU::VReg_64RegClassID, DL, MVT::i32),
1744             SDValue(Add, 0), Sub0, SDValue(Addc, 0), Sub1};
1745 
1746         Addr = SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
1747                                               MVT::i64, RegSequenceArgs),
1748                        0);
1749       }
1750     }
1751   }
1752 
1753   VAddr = Addr;
1754   Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i16);
1755   SLC = CurDAG->getTargetConstant(0, SDLoc(), MVT::i1);
1756   return true;
1757 }
1758 
1759 bool AMDGPUDAGToDAGISel::SelectFlatAtomic(SDNode *N,
1760                                           SDValue Addr,
1761                                           SDValue &VAddr,
1762                                           SDValue &Offset,
1763                                           SDValue &SLC) const {
1764   return SelectFlatOffset<false>(N, Addr, VAddr, Offset, SLC);
1765 }
1766 
1767 bool AMDGPUDAGToDAGISel::SelectFlatAtomicSigned(SDNode *N,
1768                                                 SDValue Addr,
1769                                                 SDValue &VAddr,
1770                                                 SDValue &Offset,
1771                                                 SDValue &SLC) const {
1772   return SelectFlatOffset<true>(N, Addr, VAddr, Offset, SLC);
1773 }
1774 
1775 bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode,
1776                                           SDValue &Offset, bool &Imm) const {
1777   ConstantSDNode *C = dyn_cast<ConstantSDNode>(ByteOffsetNode);
1778   if (!C) {
1779     if (ByteOffsetNode.getValueType().isScalarInteger() &&
1780         ByteOffsetNode.getValueType().getSizeInBits() == 32) {
1781       Offset = ByteOffsetNode;
1782       Imm = false;
1783       return true;
1784     }
1785     if (ByteOffsetNode.getOpcode() == ISD::ZERO_EXTEND) {
1786       if (ByteOffsetNode.getOperand(0).getValueType().getSizeInBits() == 32) {
1787         Offset = ByteOffsetNode.getOperand(0);
1788         Imm = false;
1789         return true;
1790       }
1791     }
1792     return false;
1793   }
1794 
1795   SDLoc SL(ByteOffsetNode);
1796   // GFX9 and GFX10 have signed byte immediate offsets.
1797   int64_t ByteOffset = C->getSExtValue();
1798   Optional<int64_t> EncodedOffset =
1799       AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset, false);
1800   if (EncodedOffset) {
1801     Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);
1802     Imm = true;
1803     return true;
1804   }
1805 
1806   // SGPR and literal offsets are unsigned.
1807   if (ByteOffset < 0)
1808     return false;
1809 
1810   EncodedOffset = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget, ByteOffset);
1811   if (EncodedOffset) {
1812     Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);
1813     return true;
1814   }
1815 
1816   if (!isUInt<32>(ByteOffset) && !isInt<32>(ByteOffset))
1817     return false;
1818 
1819   SDValue C32Bit = CurDAG->getTargetConstant(ByteOffset, SL, MVT::i32);
1820   Offset = SDValue(
1821       CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, C32Bit), 0);
1822 
1823   return true;
1824 }
1825 
1826 SDValue AMDGPUDAGToDAGISel::Expand32BitAddress(SDValue Addr) const {
1827   if (Addr.getValueType() != MVT::i32)
1828     return Addr;
1829 
1830   // Zero-extend a 32-bit address.
1831   SDLoc SL(Addr);
1832 
1833   const MachineFunction &MF = CurDAG->getMachineFunction();
1834   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1835   unsigned AddrHiVal = Info->get32BitAddressHighBits();
1836   SDValue AddrHi = CurDAG->getTargetConstant(AddrHiVal, SL, MVT::i32);
1837 
1838   const SDValue Ops[] = {
1839     CurDAG->getTargetConstant(AMDGPU::SReg_64_XEXECRegClassID, SL, MVT::i32),
1840     Addr,
1841     CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32),
1842     SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, AddrHi),
1843             0),
1844     CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32),
1845   };
1846 
1847   return SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, SL, MVT::i64,
1848                                         Ops), 0);
1849 }
1850 
1851 bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase,
1852                                      SDValue &Offset, bool &Imm) const {
1853   SDLoc SL(Addr);
1854 
1855   // A 32-bit (address + offset) should not cause unsigned 32-bit integer
1856   // wraparound, because s_load instructions perform the addition in 64 bits.
1857   if ((Addr.getValueType() != MVT::i32 ||
1858        Addr->getFlags().hasNoUnsignedWrap())) {
1859     SDValue N0, N1;
1860     // Extract the base and offset if possible.
1861     if (CurDAG->isBaseWithConstantOffset(Addr) ||
1862         Addr.getOpcode() == ISD::ADD) {
1863       N0 = Addr.getOperand(0);
1864       N1 = Addr.getOperand(1);
1865     } else if (getBaseWithOffsetUsingSplitOR(*CurDAG, Addr, N0, N1)) {
1866       assert(N0 && N1 && isa<ConstantSDNode>(N1));
1867     }
1868     if (N0 && N1) {
1869       if (SelectSMRDOffset(N1, Offset, Imm)) {
1870         SBase = Expand32BitAddress(N0);
1871         return true;
1872       }
1873     }
1874   }
1875   SBase = Expand32BitAddress(Addr);
1876   Offset = CurDAG->getTargetConstant(0, SL, MVT::i32);
1877   Imm = true;
1878   return true;
1879 }
1880 
1881 bool AMDGPUDAGToDAGISel::SelectSMRDImm(SDValue Addr, SDValue &SBase,
1882                                        SDValue &Offset) const {
1883   bool Imm = false;
1884   return SelectSMRD(Addr, SBase, Offset, Imm) && Imm;
1885 }
1886 
1887 bool AMDGPUDAGToDAGISel::SelectSMRDImm32(SDValue Addr, SDValue &SBase,
1888                                          SDValue &Offset) const {
1889 
1890   assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
1891 
1892   bool Imm = false;
1893   if (!SelectSMRD(Addr, SBase, Offset, Imm))
1894     return false;
1895 
1896   return !Imm && isa<ConstantSDNode>(Offset);
1897 }
1898 
1899 bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDValue Addr, SDValue &SBase,
1900                                         SDValue &Offset) const {
1901   bool Imm = false;
1902   return SelectSMRD(Addr, SBase, Offset, Imm) && !Imm &&
1903          !isa<ConstantSDNode>(Offset);
1904 }
1905 
1906 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm(SDValue Addr,
1907                                              SDValue &Offset) const {
1908   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr)) {
1909     // The immediate offset for S_BUFFER instructions is unsigned.
1910     if (auto Imm =
1911             AMDGPU::getSMRDEncodedOffset(*Subtarget, C->getZExtValue(), true)) {
1912       Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32);
1913       return true;
1914     }
1915   }
1916 
1917   return false;
1918 }
1919 
1920 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(SDValue Addr,
1921                                                SDValue &Offset) const {
1922   assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
1923 
1924   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr)) {
1925     if (auto Imm = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget,
1926                                                          C->getZExtValue())) {
1927       Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32);
1928       return true;
1929     }
1930   }
1931 
1932   return false;
1933 }
1934 
1935 bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index,
1936                                             SDValue &Base,
1937                                             SDValue &Offset) const {
1938   SDLoc DL(Index);
1939 
1940   if (CurDAG->isBaseWithConstantOffset(Index)) {
1941     SDValue N0 = Index.getOperand(0);
1942     SDValue N1 = Index.getOperand(1);
1943     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1944 
1945     // (add n0, c0)
1946     // Don't peel off the offset (c0) if doing so could possibly lead
1947     // the base (n0) to be negative.
1948     // (or n0, |c0|) can never change a sign given isBaseWithConstantOffset.
1949     if (C1->getSExtValue() <= 0 || CurDAG->SignBitIsZero(N0) ||
1950         (Index->getOpcode() == ISD::OR && C1->getSExtValue() >= 0)) {
1951       Base = N0;
1952       Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32);
1953       return true;
1954     }
1955   }
1956 
1957   if (isa<ConstantSDNode>(Index))
1958     return false;
1959 
1960   Base = Index;
1961   Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1962   return true;
1963 }
1964 
1965 SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, const SDLoc &DL,
1966                                      SDValue Val, uint32_t Offset,
1967                                      uint32_t Width) {
1968   // Transformation function, pack the offset and width of a BFE into
1969   // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
1970   // source, bits [5:0] contain the offset and bits [22:16] the width.
1971   uint32_t PackedVal = Offset | (Width << 16);
1972   SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32);
1973 
1974   return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
1975 }
1976 
1977 void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
1978   // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c)
1979   // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c)
1980   // Predicate: 0 < b <= c < 32
1981 
1982   const SDValue &Shl = N->getOperand(0);
1983   ConstantSDNode *B = dyn_cast<ConstantSDNode>(Shl->getOperand(1));
1984   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
1985 
1986   if (B && C) {
1987     uint32_t BVal = B->getZExtValue();
1988     uint32_t CVal = C->getZExtValue();
1989 
1990     if (0 < BVal && BVal <= CVal && CVal < 32) {
1991       bool Signed = N->getOpcode() == ISD::SRA;
1992       unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1993 
1994       ReplaceNode(N, getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0), CVal - BVal,
1995                               32 - CVal));
1996       return;
1997     }
1998   }
1999   SelectCode(N);
2000 }
2001 
2002 void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
2003   switch (N->getOpcode()) {
2004   case ISD::AND:
2005     if (N->getOperand(0).getOpcode() == ISD::SRL) {
2006       // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)"
2007       // Predicate: isMask(mask)
2008       const SDValue &Srl = N->getOperand(0);
2009       ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1));
2010       ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
2011 
2012       if (Shift && Mask) {
2013         uint32_t ShiftVal = Shift->getZExtValue();
2014         uint32_t MaskVal = Mask->getZExtValue();
2015 
2016         if (isMask_32(MaskVal)) {
2017           uint32_t WidthVal = countPopulation(MaskVal);
2018 
2019           ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
2020                                   Srl.getOperand(0), ShiftVal, WidthVal));
2021           return;
2022         }
2023       }
2024     }
2025     break;
2026   case ISD::SRL:
2027     if (N->getOperand(0).getOpcode() == ISD::AND) {
2028       // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)"
2029       // Predicate: isMask(mask >> b)
2030       const SDValue &And = N->getOperand(0);
2031       ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1));
2032       ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1));
2033 
2034       if (Shift && Mask) {
2035         uint32_t ShiftVal = Shift->getZExtValue();
2036         uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal;
2037 
2038         if (isMask_32(MaskVal)) {
2039           uint32_t WidthVal = countPopulation(MaskVal);
2040 
2041           ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
2042                                   And.getOperand(0), ShiftVal, WidthVal));
2043           return;
2044         }
2045       }
2046     } else if (N->getOperand(0).getOpcode() == ISD::SHL) {
2047       SelectS_BFEFromShifts(N);
2048       return;
2049     }
2050     break;
2051   case ISD::SRA:
2052     if (N->getOperand(0).getOpcode() == ISD::SHL) {
2053       SelectS_BFEFromShifts(N);
2054       return;
2055     }
2056     break;
2057 
2058   case ISD::SIGN_EXTEND_INREG: {
2059     // sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8
2060     SDValue Src = N->getOperand(0);
2061     if (Src.getOpcode() != ISD::SRL)
2062       break;
2063 
2064     const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1));
2065     if (!Amt)
2066       break;
2067 
2068     unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
2069     ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_I32, SDLoc(N), Src.getOperand(0),
2070                             Amt->getZExtValue(), Width));
2071     return;
2072   }
2073   }
2074 
2075   SelectCode(N);
2076 }
2077 
2078 bool AMDGPUDAGToDAGISel::isCBranchSCC(const SDNode *N) const {
2079   assert(N->getOpcode() == ISD::BRCOND);
2080   if (!N->hasOneUse())
2081     return false;
2082 
2083   SDValue Cond = N->getOperand(1);
2084   if (Cond.getOpcode() == ISD::CopyToReg)
2085     Cond = Cond.getOperand(2);
2086 
2087   if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
2088     return false;
2089 
2090   MVT VT = Cond.getOperand(0).getSimpleValueType();
2091   if (VT == MVT::i32)
2092     return true;
2093 
2094   if (VT == MVT::i64) {
2095     auto ST = static_cast<const GCNSubtarget *>(Subtarget);
2096 
2097     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
2098     return (CC == ISD::SETEQ || CC == ISD::SETNE) && ST->hasScalarCompareEq64();
2099   }
2100 
2101   return false;
2102 }
2103 
2104 void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
2105   SDValue Cond = N->getOperand(1);
2106 
2107   if (Cond.isUndef()) {
2108     CurDAG->SelectNodeTo(N, AMDGPU::SI_BR_UNDEF, MVT::Other,
2109                          N->getOperand(2), N->getOperand(0));
2110     return;
2111   }
2112 
2113   const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget);
2114   const SIRegisterInfo *TRI = ST->getRegisterInfo();
2115 
2116   bool UseSCCBr = isCBranchSCC(N) && isUniformBr(N);
2117   unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ;
2118   Register CondReg = UseSCCBr ? AMDGPU::SCC : TRI->getVCC();
2119   SDLoc SL(N);
2120 
2121   if (!UseSCCBr) {
2122     // This is the case that we are selecting to S_CBRANCH_VCCNZ.  We have not
2123     // analyzed what generates the vcc value, so we do not know whether vcc
2124     // bits for disabled lanes are 0.  Thus we need to mask out bits for
2125     // disabled lanes.
2126     //
2127     // For the case that we select S_CBRANCH_SCC1 and it gets
2128     // changed to S_CBRANCH_VCCNZ in SIFixSGPRCopies, SIFixSGPRCopies calls
2129     // SIInstrInfo::moveToVALU which inserts the S_AND).
2130     //
2131     // We could add an analysis of what generates the vcc value here and omit
2132     // the S_AND when is unnecessary. But it would be better to add a separate
2133     // pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it
2134     // catches both cases.
2135     Cond = SDValue(CurDAG->getMachineNode(ST->isWave32() ? AMDGPU::S_AND_B32
2136                                                          : AMDGPU::S_AND_B64,
2137                      SL, MVT::i1,
2138                      CurDAG->getRegister(ST->isWave32() ? AMDGPU::EXEC_LO
2139                                                         : AMDGPU::EXEC,
2140                                          MVT::i1),
2141                     Cond),
2142                    0);
2143   }
2144 
2145   SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, CondReg, Cond);
2146   CurDAG->SelectNodeTo(N, BrOp, MVT::Other,
2147                        N->getOperand(2), // Basic Block
2148                        VCC.getValue(0));
2149 }
2150 
2151 void AMDGPUDAGToDAGISel::SelectFMAD_FMA(SDNode *N) {
2152   MVT VT = N->getSimpleValueType(0);
2153   bool IsFMA = N->getOpcode() == ISD::FMA;
2154   if (VT != MVT::f32 || (!Subtarget->hasMadMixInsts() &&
2155                          !Subtarget->hasFmaMixInsts()) ||
2156       ((IsFMA && Subtarget->hasMadMixInsts()) ||
2157        (!IsFMA && Subtarget->hasFmaMixInsts()))) {
2158     SelectCode(N);
2159     return;
2160   }
2161 
2162   SDValue Src0 = N->getOperand(0);
2163   SDValue Src1 = N->getOperand(1);
2164   SDValue Src2 = N->getOperand(2);
2165   unsigned Src0Mods, Src1Mods, Src2Mods;
2166 
2167   // Avoid using v_mad_mix_f32/v_fma_mix_f32 unless there is actually an operand
2168   // using the conversion from f16.
2169   bool Sel0 = SelectVOP3PMadMixModsImpl(Src0, Src0, Src0Mods);
2170   bool Sel1 = SelectVOP3PMadMixModsImpl(Src1, Src1, Src1Mods);
2171   bool Sel2 = SelectVOP3PMadMixModsImpl(Src2, Src2, Src2Mods);
2172 
2173   assert((IsFMA || !Mode.allFP32Denormals()) &&
2174          "fmad selected with denormals enabled");
2175   // TODO: We can select this with f32 denormals enabled if all the sources are
2176   // converted from f16 (in which case fmad isn't legal).
2177 
2178   if (Sel0 || Sel1 || Sel2) {
2179     // For dummy operands.
2180     SDValue Zero = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
2181     SDValue Ops[] = {
2182       CurDAG->getTargetConstant(Src0Mods, SDLoc(), MVT::i32), Src0,
2183       CurDAG->getTargetConstant(Src1Mods, SDLoc(), MVT::i32), Src1,
2184       CurDAG->getTargetConstant(Src2Mods, SDLoc(), MVT::i32), Src2,
2185       CurDAG->getTargetConstant(0, SDLoc(), MVT::i1),
2186       Zero, Zero
2187     };
2188 
2189     CurDAG->SelectNodeTo(N,
2190                          IsFMA ? AMDGPU::V_FMA_MIX_F32 : AMDGPU::V_MAD_MIX_F32,
2191                          MVT::f32, Ops);
2192   } else {
2193     SelectCode(N);
2194   }
2195 }
2196 
2197 // This is here because there isn't a way to use the generated sub0_sub1 as the
2198 // subreg index to EXTRACT_SUBREG in tablegen.
2199 void AMDGPUDAGToDAGISel::SelectATOMIC_CMP_SWAP(SDNode *N) {
2200   MemSDNode *Mem = cast<MemSDNode>(N);
2201   unsigned AS = Mem->getAddressSpace();
2202   if (AS == AMDGPUAS::FLAT_ADDRESS) {
2203     SelectCode(N);
2204     return;
2205   }
2206 
2207   MVT VT = N->getSimpleValueType(0);
2208   bool Is32 = (VT == MVT::i32);
2209   SDLoc SL(N);
2210 
2211   MachineSDNode *CmpSwap = nullptr;
2212   if (Subtarget->hasAddr64()) {
2213     SDValue SRsrc, VAddr, SOffset, Offset, SLC;
2214 
2215     if (SelectMUBUFAddr64(Mem->getBasePtr(), SRsrc, VAddr, SOffset, Offset, SLC)) {
2216       unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN :
2217         AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN;
2218       SDValue CmpVal = Mem->getOperand(2);
2219 
2220       // XXX - Do we care about glue operands?
2221 
2222       SDValue Ops[] = {
2223         CmpVal, VAddr, SRsrc, SOffset, Offset, SLC, Mem->getChain()
2224       };
2225 
2226       CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
2227     }
2228   }
2229 
2230   if (!CmpSwap) {
2231     SDValue SRsrc, SOffset, Offset, SLC;
2232     if (SelectMUBUFOffset(Mem->getBasePtr(), SRsrc, SOffset, Offset, SLC)) {
2233       unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN :
2234         AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN;
2235 
2236       SDValue CmpVal = Mem->getOperand(2);
2237       SDValue Ops[] = {
2238         CmpVal, SRsrc, SOffset, Offset, SLC, Mem->getChain()
2239       };
2240 
2241       CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
2242     }
2243   }
2244 
2245   if (!CmpSwap) {
2246     SelectCode(N);
2247     return;
2248   }
2249 
2250   MachineMemOperand *MMO = Mem->getMemOperand();
2251   CurDAG->setNodeMemRefs(CmpSwap, {MMO});
2252 
2253   unsigned SubReg = Is32 ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
2254   SDValue Extract
2255     = CurDAG->getTargetExtractSubreg(SubReg, SL, VT, SDValue(CmpSwap, 0));
2256 
2257   ReplaceUses(SDValue(N, 0), Extract);
2258   ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 1));
2259   CurDAG->RemoveDeadNode(N);
2260 }
2261 
2262 void AMDGPUDAGToDAGISel::SelectDSAppendConsume(SDNode *N, unsigned IntrID) {
2263   // The address is assumed to be uniform, so if it ends up in a VGPR, it will
2264   // be copied to an SGPR with readfirstlane.
2265   unsigned Opc = IntrID == Intrinsic::amdgcn_ds_append ?
2266     AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
2267 
2268   SDValue Chain = N->getOperand(0);
2269   SDValue Ptr = N->getOperand(2);
2270   MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2271   MachineMemOperand *MMO = M->getMemOperand();
2272   bool IsGDS = M->getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
2273 
2274   SDValue Offset;
2275   if (CurDAG->isBaseWithConstantOffset(Ptr)) {
2276     SDValue PtrBase = Ptr.getOperand(0);
2277     SDValue PtrOffset = Ptr.getOperand(1);
2278 
2279     const APInt &OffsetVal = cast<ConstantSDNode>(PtrOffset)->getAPIntValue();
2280     if (isDSOffsetLegal(PtrBase, OffsetVal.getZExtValue(), 16)) {
2281       N = glueCopyToM0(N, PtrBase);
2282       Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i32);
2283     }
2284   }
2285 
2286   if (!Offset) {
2287     N = glueCopyToM0(N, Ptr);
2288     Offset = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
2289   }
2290 
2291   SDValue Ops[] = {
2292     Offset,
2293     CurDAG->getTargetConstant(IsGDS, SDLoc(), MVT::i32),
2294     Chain,
2295     N->getOperand(N->getNumOperands() - 1) // New glue
2296   };
2297 
2298   SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2299   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2300 }
2301 
2302 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
2303   switch (IntrID) {
2304   case Intrinsic::amdgcn_ds_gws_init:
2305     return AMDGPU::DS_GWS_INIT;
2306   case Intrinsic::amdgcn_ds_gws_barrier:
2307     return AMDGPU::DS_GWS_BARRIER;
2308   case Intrinsic::amdgcn_ds_gws_sema_v:
2309     return AMDGPU::DS_GWS_SEMA_V;
2310   case Intrinsic::amdgcn_ds_gws_sema_br:
2311     return AMDGPU::DS_GWS_SEMA_BR;
2312   case Intrinsic::amdgcn_ds_gws_sema_p:
2313     return AMDGPU::DS_GWS_SEMA_P;
2314   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2315     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
2316   default:
2317     llvm_unreachable("not a gws intrinsic");
2318   }
2319 }
2320 
2321 void AMDGPUDAGToDAGISel::SelectDS_GWS(SDNode *N, unsigned IntrID) {
2322   if (IntrID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
2323       !Subtarget->hasGWSSemaReleaseAll()) {
2324     // Let this error.
2325     SelectCode(N);
2326     return;
2327   }
2328 
2329   // Chain, intrinsic ID, vsrc, offset
2330   const bool HasVSrc = N->getNumOperands() == 4;
2331   assert(HasVSrc || N->getNumOperands() == 3);
2332 
2333   SDLoc SL(N);
2334   SDValue BaseOffset = N->getOperand(HasVSrc ? 3 : 2);
2335   int ImmOffset = 0;
2336   MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2337   MachineMemOperand *MMO = M->getMemOperand();
2338 
2339   // Don't worry if the offset ends up in a VGPR. Only one lane will have
2340   // effect, so SIFixSGPRCopies will validly insert readfirstlane.
2341 
2342   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
2343   // offset field) % 64. Some versions of the programming guide omit the m0
2344   // part, or claim it's from offset 0.
2345   if (ConstantSDNode *ConstOffset = dyn_cast<ConstantSDNode>(BaseOffset)) {
2346     // If we have a constant offset, try to use the 0 in m0 as the base.
2347     // TODO: Look into changing the default m0 initialization value. If the
2348     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
2349     // the immediate offset.
2350     glueCopyToM0(N, CurDAG->getTargetConstant(0, SL, MVT::i32));
2351     ImmOffset = ConstOffset->getZExtValue();
2352   } else {
2353     if (CurDAG->isBaseWithConstantOffset(BaseOffset)) {
2354       ImmOffset = BaseOffset.getConstantOperandVal(1);
2355       BaseOffset = BaseOffset.getOperand(0);
2356     }
2357 
2358     // Prefer to do the shift in an SGPR since it should be possible to use m0
2359     // as the result directly. If it's already an SGPR, it will be eliminated
2360     // later.
2361     SDNode *SGPROffset
2362       = CurDAG->getMachineNode(AMDGPU::V_READFIRSTLANE_B32, SL, MVT::i32,
2363                                BaseOffset);
2364     // Shift to offset in m0
2365     SDNode *M0Base
2366       = CurDAG->getMachineNode(AMDGPU::S_LSHL_B32, SL, MVT::i32,
2367                                SDValue(SGPROffset, 0),
2368                                CurDAG->getTargetConstant(16, SL, MVT::i32));
2369     glueCopyToM0(N, SDValue(M0Base, 0));
2370   }
2371 
2372   SDValue Chain = N->getOperand(0);
2373   SDValue OffsetField = CurDAG->getTargetConstant(ImmOffset, SL, MVT::i32);
2374 
2375   // TODO: Can this just be removed from the instruction?
2376   SDValue GDS = CurDAG->getTargetConstant(1, SL, MVT::i1);
2377 
2378   const unsigned Opc = gwsIntrinToOpcode(IntrID);
2379   SmallVector<SDValue, 5> Ops;
2380   if (HasVSrc)
2381     Ops.push_back(N->getOperand(2));
2382   Ops.push_back(OffsetField);
2383   Ops.push_back(GDS);
2384   Ops.push_back(Chain);
2385 
2386   SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2387   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2388 }
2389 
2390 void AMDGPUDAGToDAGISel::SelectInterpP1F16(SDNode *N) {
2391   if (Subtarget->getLDSBankCount() != 16) {
2392     // This is a single instruction with a pattern.
2393     SelectCode(N);
2394     return;
2395   }
2396 
2397   SDLoc DL(N);
2398 
2399   // This requires 2 instructions. It is possible to write a pattern to support
2400   // this, but the generated isel emitter doesn't correctly deal with multiple
2401   // output instructions using the same physical register input. The copy to m0
2402   // is incorrectly placed before the second instruction.
2403   //
2404   // TODO: Match source modifiers.
2405   //
2406   // def : Pat <
2407   //   (int_amdgcn_interp_p1_f16
2408   //    (VOP3Mods f32:$src0, i32:$src0_modifiers),
2409   //                             (i32 timm:$attrchan), (i32 timm:$attr),
2410   //                             (i1 timm:$high), M0),
2411   //   (V_INTERP_P1LV_F16 $src0_modifiers, VGPR_32:$src0, timm:$attr,
2412   //       timm:$attrchan, 0,
2413   //       (V_INTERP_MOV_F32 2, timm:$attr, timm:$attrchan), timm:$high)> {
2414   //   let Predicates = [has16BankLDS];
2415   // }
2416 
2417   // 16 bank LDS
2418   SDValue ToM0 = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, AMDGPU::M0,
2419                                       N->getOperand(5), SDValue());
2420 
2421   SDVTList VTs = CurDAG->getVTList(MVT::f32, MVT::Other);
2422 
2423   SDNode *InterpMov =
2424     CurDAG->getMachineNode(AMDGPU::V_INTERP_MOV_F32, DL, VTs, {
2425         CurDAG->getTargetConstant(2, DL, MVT::i32), // P0
2426         N->getOperand(3),  // Attr
2427         N->getOperand(2),  // Attrchan
2428         ToM0.getValue(1) // In glue
2429   });
2430 
2431   SDNode *InterpP1LV =
2432     CurDAG->getMachineNode(AMDGPU::V_INTERP_P1LV_F16, DL, MVT::f32, {
2433         CurDAG->getTargetConstant(0, DL, MVT::i32), // $src0_modifiers
2434         N->getOperand(1), // Src0
2435         N->getOperand(3), // Attr
2436         N->getOperand(2), // Attrchan
2437         CurDAG->getTargetConstant(0, DL, MVT::i32), // $src2_modifiers
2438         SDValue(InterpMov, 0), // Src2 - holds two f16 values selected by high
2439         N->getOperand(4), // high
2440         CurDAG->getTargetConstant(0, DL, MVT::i1), // $clamp
2441         CurDAG->getTargetConstant(0, DL, MVT::i32), // $omod
2442         SDValue(InterpMov, 1)
2443   });
2444 
2445   CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), SDValue(InterpP1LV, 0));
2446 }
2447 
2448 void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) {
2449   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2450   switch (IntrID) {
2451   case Intrinsic::amdgcn_ds_append:
2452   case Intrinsic::amdgcn_ds_consume: {
2453     if (N->getValueType(0) != MVT::i32)
2454       break;
2455     SelectDSAppendConsume(N, IntrID);
2456     return;
2457   }
2458   }
2459 
2460   SelectCode(N);
2461 }
2462 
2463 void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
2464   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
2465   unsigned Opcode;
2466   switch (IntrID) {
2467   case Intrinsic::amdgcn_wqm:
2468     Opcode = AMDGPU::WQM;
2469     break;
2470   case Intrinsic::amdgcn_softwqm:
2471     Opcode = AMDGPU::SOFT_WQM;
2472     break;
2473   case Intrinsic::amdgcn_wwm:
2474     Opcode = AMDGPU::WWM;
2475     break;
2476   case Intrinsic::amdgcn_interp_p1_f16:
2477     SelectInterpP1F16(N);
2478     return;
2479   default:
2480     SelectCode(N);
2481     return;
2482   }
2483 
2484   SDValue Src = N->getOperand(1);
2485   CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), {Src});
2486 }
2487 
2488 void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) {
2489   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2490   switch (IntrID) {
2491   case Intrinsic::amdgcn_ds_gws_init:
2492   case Intrinsic::amdgcn_ds_gws_barrier:
2493   case Intrinsic::amdgcn_ds_gws_sema_v:
2494   case Intrinsic::amdgcn_ds_gws_sema_br:
2495   case Intrinsic::amdgcn_ds_gws_sema_p:
2496   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2497     SelectDS_GWS(N, IntrID);
2498     return;
2499   default:
2500     break;
2501   }
2502 
2503   SelectCode(N);
2504 }
2505 
2506 bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src,
2507                                             unsigned &Mods) const {
2508   Mods = 0;
2509   Src = In;
2510 
2511   if (Src.getOpcode() == ISD::FNEG) {
2512     Mods |= SISrcMods::NEG;
2513     Src = Src.getOperand(0);
2514   }
2515 
2516   if (Src.getOpcode() == ISD::FABS) {
2517     Mods |= SISrcMods::ABS;
2518     Src = Src.getOperand(0);
2519   }
2520 
2521   return true;
2522 }
2523 
2524 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
2525                                         SDValue &SrcMods) const {
2526   unsigned Mods;
2527   if (SelectVOP3ModsImpl(In, Src, Mods)) {
2528     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2529     return true;
2530   }
2531 
2532   return false;
2533 }
2534 
2535 bool AMDGPUDAGToDAGISel::SelectVOP3Mods_NNaN(SDValue In, SDValue &Src,
2536                                              SDValue &SrcMods) const {
2537   SelectVOP3Mods(In, Src, SrcMods);
2538   return isNoNanSrc(Src);
2539 }
2540 
2541 bool AMDGPUDAGToDAGISel::SelectVOP3NoMods(SDValue In, SDValue &Src) const {
2542   if (In.getOpcode() == ISD::FABS || In.getOpcode() == ISD::FNEG)
2543     return false;
2544 
2545   Src = In;
2546   return true;
2547 }
2548 
2549 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
2550                                          SDValue &SrcMods, SDValue &Clamp,
2551                                          SDValue &Omod) const {
2552   SDLoc DL(In);
2553   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2554   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2555 
2556   return SelectVOP3Mods(In, Src, SrcMods);
2557 }
2558 
2559 bool AMDGPUDAGToDAGISel::SelectVOP3OMods(SDValue In, SDValue &Src,
2560                                          SDValue &Clamp, SDValue &Omod) const {
2561   Src = In;
2562 
2563   SDLoc DL(In);
2564   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2565   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2566 
2567   return true;
2568 }
2569 
2570 bool AMDGPUDAGToDAGISel::SelectVOP3PMods(SDValue In, SDValue &Src,
2571                                          SDValue &SrcMods) const {
2572   unsigned Mods = 0;
2573   Src = In;
2574 
2575   if (Src.getOpcode() == ISD::FNEG) {
2576     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
2577     Src = Src.getOperand(0);
2578   }
2579 
2580   if (Src.getOpcode() == ISD::BUILD_VECTOR) {
2581     unsigned VecMods = Mods;
2582 
2583     SDValue Lo = stripBitcast(Src.getOperand(0));
2584     SDValue Hi = stripBitcast(Src.getOperand(1));
2585 
2586     if (Lo.getOpcode() == ISD::FNEG) {
2587       Lo = stripBitcast(Lo.getOperand(0));
2588       Mods ^= SISrcMods::NEG;
2589     }
2590 
2591     if (Hi.getOpcode() == ISD::FNEG) {
2592       Hi = stripBitcast(Hi.getOperand(0));
2593       Mods ^= SISrcMods::NEG_HI;
2594     }
2595 
2596     if (isExtractHiElt(Lo, Lo))
2597       Mods |= SISrcMods::OP_SEL_0;
2598 
2599     if (isExtractHiElt(Hi, Hi))
2600       Mods |= SISrcMods::OP_SEL_1;
2601 
2602     Lo = stripExtractLoElt(Lo);
2603     Hi = stripExtractLoElt(Hi);
2604 
2605     if (Lo == Hi && !isInlineImmediate(Lo.getNode())) {
2606       // Really a scalar input. Just select from the low half of the register to
2607       // avoid packing.
2608 
2609       Src = Lo;
2610       SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2611       return true;
2612     }
2613 
2614     Mods = VecMods;
2615   }
2616 
2617   // Packed instructions do not have abs modifiers.
2618   Mods |= SISrcMods::OP_SEL_1;
2619 
2620   SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2621   return true;
2622 }
2623 
2624 bool AMDGPUDAGToDAGISel::SelectVOP3OpSel(SDValue In, SDValue &Src,
2625                                          SDValue &SrcMods) const {
2626   Src = In;
2627   // FIXME: Handle op_sel
2628   SrcMods = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
2629   return true;
2630 }
2631 
2632 bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods(SDValue In, SDValue &Src,
2633                                              SDValue &SrcMods) const {
2634   // FIXME: Handle op_sel
2635   return SelectVOP3Mods(In, Src, SrcMods);
2636 }
2637 
2638 // The return value is not whether the match is possible (which it always is),
2639 // but whether or not it a conversion is really used.
2640 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src,
2641                                                    unsigned &Mods) const {
2642   Mods = 0;
2643   SelectVOP3ModsImpl(In, Src, Mods);
2644 
2645   if (Src.getOpcode() == ISD::FP_EXTEND) {
2646     Src = Src.getOperand(0);
2647     assert(Src.getValueType() == MVT::f16);
2648     Src = stripBitcast(Src);
2649 
2650     // Be careful about folding modifiers if we already have an abs. fneg is
2651     // applied last, so we don't want to apply an earlier fneg.
2652     if ((Mods & SISrcMods::ABS) == 0) {
2653       unsigned ModsTmp;
2654       SelectVOP3ModsImpl(Src, Src, ModsTmp);
2655 
2656       if ((ModsTmp & SISrcMods::NEG) != 0)
2657         Mods ^= SISrcMods::NEG;
2658 
2659       if ((ModsTmp & SISrcMods::ABS) != 0)
2660         Mods |= SISrcMods::ABS;
2661     }
2662 
2663     // op_sel/op_sel_hi decide the source type and source.
2664     // If the source's op_sel_hi is set, it indicates to do a conversion from fp16.
2665     // If the sources's op_sel is set, it picks the high half of the source
2666     // register.
2667 
2668     Mods |= SISrcMods::OP_SEL_1;
2669     if (isExtractHiElt(Src, Src)) {
2670       Mods |= SISrcMods::OP_SEL_0;
2671 
2672       // TODO: Should we try to look for neg/abs here?
2673     }
2674 
2675     return true;
2676   }
2677 
2678   return false;
2679 }
2680 
2681 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixMods(SDValue In, SDValue &Src,
2682                                                SDValue &SrcMods) const {
2683   unsigned Mods = 0;
2684   SelectVOP3PMadMixModsImpl(In, Src, Mods);
2685   SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2686   return true;
2687 }
2688 
2689 SDValue AMDGPUDAGToDAGISel::getHi16Elt(SDValue In) const {
2690   if (In.isUndef())
2691     return CurDAG->getUNDEF(MVT::i32);
2692 
2693   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(In)) {
2694     SDLoc SL(In);
2695     return CurDAG->getConstant(C->getZExtValue() << 16, SL, MVT::i32);
2696   }
2697 
2698   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(In)) {
2699     SDLoc SL(In);
2700     return CurDAG->getConstant(
2701       C->getValueAPF().bitcastToAPInt().getZExtValue() << 16, SL, MVT::i32);
2702   }
2703 
2704   SDValue Src;
2705   if (isExtractHiElt(In, Src))
2706     return Src;
2707 
2708   return SDValue();
2709 }
2710 
2711 bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const {
2712   assert(CurDAG->getTarget().getTargetTriple().getArch() == Triple::amdgcn);
2713 
2714   const SIRegisterInfo *SIRI =
2715     static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
2716   const SIInstrInfo * SII =
2717     static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
2718 
2719   unsigned Limit = 0;
2720   bool AllUsesAcceptSReg = true;
2721   for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
2722     Limit < 10 && U != E; ++U, ++Limit) {
2723     const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
2724 
2725     // If the register class is unknown, it could be an unknown
2726     // register class that needs to be an SGPR, e.g. an inline asm
2727     // constraint
2728     if (!RC || SIRI->isSGPRClass(RC))
2729       return false;
2730 
2731     if (RC != &AMDGPU::VS_32RegClass) {
2732       AllUsesAcceptSReg = false;
2733       SDNode * User = *U;
2734       if (User->isMachineOpcode()) {
2735         unsigned Opc = User->getMachineOpcode();
2736         MCInstrDesc Desc = SII->get(Opc);
2737         if (Desc.isCommutable()) {
2738           unsigned OpIdx = Desc.getNumDefs() + U.getOperandNo();
2739           unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
2740           if (SII->findCommutedOpIndices(Desc, OpIdx, CommuteIdx1)) {
2741             unsigned CommutedOpNo = CommuteIdx1 - Desc.getNumDefs();
2742             const TargetRegisterClass *CommutedRC = getOperandRegClass(*U, CommutedOpNo);
2743             if (CommutedRC == &AMDGPU::VS_32RegClass)
2744               AllUsesAcceptSReg = true;
2745           }
2746         }
2747       }
2748       // If "AllUsesAcceptSReg == false" so far we haven't suceeded
2749       // commuting current user. This means have at least one use
2750       // that strictly require VGPR. Thus, we will not attempt to commute
2751       // other user instructions.
2752       if (!AllUsesAcceptSReg)
2753         break;
2754     }
2755   }
2756   return !AllUsesAcceptSReg && (Limit < 10);
2757 }
2758 
2759 bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode * N) const {
2760   auto Ld = cast<LoadSDNode>(N);
2761 
2762   return Ld->getAlignment() >= 4 &&
2763         (
2764           (
2765             (
2766               Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS       ||
2767               Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT
2768             )
2769             &&
2770             !N->isDivergent()
2771           )
2772           ||
2773           (
2774             Subtarget->getScalarizeGlobalBehavior() &&
2775             Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
2776             Ld->isSimple() &&
2777             !N->isDivergent() &&
2778             static_cast<const SITargetLowering *>(
2779               getTargetLowering())->isMemOpHasNoClobberedMemOperand(N)
2780           )
2781         );
2782 }
2783 
2784 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
2785   const AMDGPUTargetLowering& Lowering =
2786     *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
2787   bool IsModified = false;
2788   do {
2789     IsModified = false;
2790 
2791     // Go over all selected nodes and try to fold them a bit more
2792     SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_begin();
2793     while (Position != CurDAG->allnodes_end()) {
2794       SDNode *Node = &*Position++;
2795       MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(Node);
2796       if (!MachineNode)
2797         continue;
2798 
2799       SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
2800       if (ResNode != Node) {
2801         if (ResNode)
2802           ReplaceUses(Node, ResNode);
2803         IsModified = true;
2804       }
2805     }
2806     CurDAG->RemoveDeadNodes();
2807   } while (IsModified);
2808 }
2809 
2810 bool R600DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
2811   Subtarget = &MF.getSubtarget<R600Subtarget>();
2812   return SelectionDAGISel::runOnMachineFunction(MF);
2813 }
2814 
2815 bool R600DAGToDAGISel::isConstantLoad(const MemSDNode *N, int CbId) const {
2816   if (!N->readMem())
2817     return false;
2818   if (CbId == -1)
2819     return N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
2820            N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
2821 
2822   return N->getAddressSpace() == AMDGPUAS::CONSTANT_BUFFER_0 + CbId;
2823 }
2824 
2825 bool R600DAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
2826                                                          SDValue& IntPtr) {
2827   if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
2828     IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr),
2829                                        true);
2830     return true;
2831   }
2832   return false;
2833 }
2834 
2835 bool R600DAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
2836     SDValue& BaseReg, SDValue &Offset) {
2837   if (!isa<ConstantSDNode>(Addr)) {
2838     BaseReg = Addr;
2839     Offset = CurDAG->getIntPtrConstant(0, SDLoc(Addr), true);
2840     return true;
2841   }
2842   return false;
2843 }
2844 
2845 void R600DAGToDAGISel::Select(SDNode *N) {
2846   unsigned int Opc = N->getOpcode();
2847   if (N->isMachineOpcode()) {
2848     N->setNodeId(-1);
2849     return;   // Already selected.
2850   }
2851 
2852   switch (Opc) {
2853   default: break;
2854   case AMDGPUISD::BUILD_VERTICAL_VECTOR:
2855   case ISD::SCALAR_TO_VECTOR:
2856   case ISD::BUILD_VECTOR: {
2857     EVT VT = N->getValueType(0);
2858     unsigned NumVectorElts = VT.getVectorNumElements();
2859     unsigned RegClassID;
2860     // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
2861     // that adds a 128 bits reg copy when going through TwoAddressInstructions
2862     // pass. We want to avoid 128 bits copies as much as possible because they
2863     // can't be bundled by our scheduler.
2864     switch(NumVectorElts) {
2865     case 2: RegClassID = R600::R600_Reg64RegClassID; break;
2866     case 4:
2867       if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
2868         RegClassID = R600::R600_Reg128VerticalRegClassID;
2869       else
2870         RegClassID = R600::R600_Reg128RegClassID;
2871       break;
2872     default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
2873     }
2874     SelectBuildVector(N, RegClassID);
2875     return;
2876   }
2877   }
2878 
2879   SelectCode(N);
2880 }
2881 
2882 bool R600DAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
2883                                           SDValue &Offset) {
2884   ConstantSDNode *C;
2885   SDLoc DL(Addr);
2886 
2887   if ((C = dyn_cast<ConstantSDNode>(Addr))) {
2888     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
2889     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
2890   } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
2891              (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
2892     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
2893     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
2894   } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
2895             (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
2896     Base = Addr.getOperand(0);
2897     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
2898   } else {
2899     Base = Addr;
2900     Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
2901   }
2902 
2903   return true;
2904 }
2905 
2906 bool R600DAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
2907                                           SDValue &Offset) {
2908   ConstantSDNode *IMMOffset;
2909 
2910   if (Addr.getOpcode() == ISD::ADD
2911       && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
2912       && isInt<16>(IMMOffset->getZExtValue())) {
2913 
2914       Base = Addr.getOperand(0);
2915       Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
2916                                          MVT::i32);
2917       return true;
2918   // If the pointer address is constant, we can move it to the offset field.
2919   } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
2920              && isInt<16>(IMMOffset->getZExtValue())) {
2921     Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
2922                                   SDLoc(CurDAG->getEntryNode()),
2923                                   R600::ZERO, MVT::i32);
2924     Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
2925                                        MVT::i32);
2926     return true;
2927   }
2928 
2929   // Default case, no offset
2930   Base = Addr;
2931   Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
2932   return true;
2933 }
2934