1 //===-- AMDGPUISelDAGToDAG.cpp - A dag to dag inst selector for AMDGPU ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //==-----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Defines an instruction selector for the AMDGPU target. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPU.h" 15 #include "AMDGPUArgumentUsageInfo.h" 16 #include "AMDGPUISelLowering.h" // For AMDGPUISD 17 #include "AMDGPUInstrInfo.h" 18 #include "AMDGPUPerfHintAnalysis.h" 19 #include "AMDGPURegisterInfo.h" 20 #include "AMDGPUSubtarget.h" 21 #include "AMDGPUTargetMachine.h" 22 #include "SIDefines.h" 23 #include "SIISelLowering.h" 24 #include "SIInstrInfo.h" 25 #include "SIMachineFunctionInfo.h" 26 #include "SIRegisterInfo.h" 27 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 28 #include "llvm/ADT/APInt.h" 29 #include "llvm/ADT/SmallVector.h" 30 #include "llvm/ADT/StringRef.h" 31 #include "llvm/Analysis/LegacyDivergenceAnalysis.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/CodeGen/FunctionLoweringInfo.h" 34 #include "llvm/CodeGen/ISDOpcodes.h" 35 #include "llvm/CodeGen/MachineFunction.h" 36 #include "llvm/CodeGen/MachineRegisterInfo.h" 37 #include "llvm/CodeGen/SelectionDAG.h" 38 #include "llvm/CodeGen/SelectionDAGISel.h" 39 #include "llvm/CodeGen/SelectionDAGNodes.h" 40 #include "llvm/CodeGen/ValueTypes.h" 41 #include "llvm/IR/BasicBlock.h" 42 #include "llvm/IR/Instruction.h" 43 #include "llvm/MC/MCInstrDesc.h" 44 #include "llvm/Support/Casting.h" 45 #include "llvm/Support/CodeGen.h" 46 #include "llvm/Support/ErrorHandling.h" 47 #include "llvm/Support/MachineValueType.h" 48 #include "llvm/Support/MathExtras.h" 49 #include <cassert> 50 #include <cstdint> 51 #include <new> 52 #include <vector> 53 54 #define DEBUG_TYPE "isel" 55 56 using namespace llvm; 57 58 namespace llvm { 59 60 class R600InstrInfo; 61 62 } // end namespace llvm 63 64 //===----------------------------------------------------------------------===// 65 // Instruction Selector Implementation 66 //===----------------------------------------------------------------------===// 67 68 namespace { 69 70 /// AMDGPU specific code to select AMDGPU machine instructions for 71 /// SelectionDAG operations. 72 class AMDGPUDAGToDAGISel : public SelectionDAGISel { 73 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can 74 // make the right decision when generating code for different targets. 75 const GCNSubtarget *Subtarget; 76 bool EnableLateStructurizeCFG; 77 78 public: 79 explicit AMDGPUDAGToDAGISel(TargetMachine *TM = nullptr, 80 CodeGenOpt::Level OptLevel = CodeGenOpt::Default) 81 : SelectionDAGISel(*TM, OptLevel) { 82 EnableLateStructurizeCFG = AMDGPUTargetMachine::EnableLateStructurizeCFG; 83 } 84 ~AMDGPUDAGToDAGISel() override = default; 85 86 void getAnalysisUsage(AnalysisUsage &AU) const override { 87 AU.addRequired<AMDGPUArgumentUsageInfo>(); 88 AU.addRequired<AMDGPUPerfHintAnalysis>(); 89 AU.addRequired<LegacyDivergenceAnalysis>(); 90 SelectionDAGISel::getAnalysisUsage(AU); 91 } 92 93 bool matchLoadD16FromBuildVector(SDNode *N) const; 94 95 bool runOnMachineFunction(MachineFunction &MF) override; 96 void PreprocessISelDAG() override; 97 void Select(SDNode *N) override; 98 StringRef getPassName() const override; 99 void PostprocessISelDAG() override; 100 101 protected: 102 void SelectBuildVector(SDNode *N, unsigned RegClassID); 103 104 private: 105 std::pair<SDValue, SDValue> foldFrameIndex(SDValue N) const; 106 bool isNoNanSrc(SDValue N) const; 107 bool isInlineImmediate(const SDNode *N) const; 108 bool isVGPRImm(const SDNode *N) const; 109 bool isUniformLoad(const SDNode *N) const; 110 bool isUniformBr(const SDNode *N) const; 111 112 MachineSDNode *buildSMovImm64(SDLoc &DL, uint64_t Val, EVT VT) const; 113 114 SDNode *glueCopyToM0LDSInit(SDNode *N) const; 115 SDNode *glueCopyToM0(SDNode *N, SDValue Val) const; 116 117 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const; 118 virtual bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset); 119 virtual bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset); 120 bool isDSOffsetLegal(SDValue Base, unsigned Offset, 121 unsigned OffsetBits) const; 122 bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const; 123 bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0, 124 SDValue &Offset1) const; 125 bool SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr, 126 SDValue &SOffset, SDValue &Offset, SDValue &Offen, 127 SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC, 128 SDValue &TFE) const; 129 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr, 130 SDValue &SOffset, SDValue &Offset, SDValue &GLC, 131 SDValue &SLC, SDValue &TFE) const; 132 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, 133 SDValue &VAddr, SDValue &SOffset, SDValue &Offset, 134 SDValue &SLC) const; 135 bool SelectMUBUFScratchOffen(SDNode *Parent, 136 SDValue Addr, SDValue &RSrc, SDValue &VAddr, 137 SDValue &SOffset, SDValue &ImmOffset) const; 138 bool SelectMUBUFScratchOffset(SDNode *Parent, 139 SDValue Addr, SDValue &SRsrc, SDValue &Soffset, 140 SDValue &Offset) const; 141 142 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset, 143 SDValue &Offset, SDValue &GLC, SDValue &SLC, 144 SDValue &TFE) const; 145 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset, 146 SDValue &Offset, SDValue &SLC) const; 147 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset, 148 SDValue &Offset) const; 149 150 bool SelectFlatAtomic(SDValue Addr, SDValue &VAddr, 151 SDValue &Offset, SDValue &SLC) const; 152 bool SelectFlatAtomicSigned(SDValue Addr, SDValue &VAddr, 153 SDValue &Offset, SDValue &SLC) const; 154 155 template <bool IsSigned> 156 bool SelectFlatOffset(SDValue Addr, SDValue &VAddr, 157 SDValue &Offset, SDValue &SLC) const; 158 159 bool SelectSMRDOffset(SDValue ByteOffsetNode, SDValue &Offset, 160 bool &Imm) const; 161 SDValue Expand32BitAddress(SDValue Addr) const; 162 bool SelectSMRD(SDValue Addr, SDValue &SBase, SDValue &Offset, 163 bool &Imm) const; 164 bool SelectSMRDImm(SDValue Addr, SDValue &SBase, SDValue &Offset) const; 165 bool SelectSMRDImm32(SDValue Addr, SDValue &SBase, SDValue &Offset) const; 166 bool SelectSMRDSgpr(SDValue Addr, SDValue &SBase, SDValue &Offset) const; 167 bool SelectSMRDBufferImm(SDValue Addr, SDValue &Offset) const; 168 bool SelectSMRDBufferImm32(SDValue Addr, SDValue &Offset) const; 169 bool SelectMOVRELOffset(SDValue Index, SDValue &Base, SDValue &Offset) const; 170 171 bool SelectVOP3Mods_NNaN(SDValue In, SDValue &Src, SDValue &SrcMods) const; 172 bool SelectVOP3ModsImpl(SDValue In, SDValue &Src, unsigned &SrcMods) const; 173 bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const; 174 bool SelectVOP3NoMods(SDValue In, SDValue &Src) const; 175 bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods, 176 SDValue &Clamp, SDValue &Omod) const; 177 bool SelectVOP3NoMods0(SDValue In, SDValue &Src, SDValue &SrcMods, 178 SDValue &Clamp, SDValue &Omod) const; 179 180 bool SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, SDValue &SrcMods, 181 SDValue &Clamp, 182 SDValue &Omod) const; 183 184 bool SelectVOP3OMods(SDValue In, SDValue &Src, 185 SDValue &Clamp, SDValue &Omod) const; 186 187 bool SelectVOP3PMods(SDValue In, SDValue &Src, SDValue &SrcMods) const; 188 bool SelectVOP3PMods0(SDValue In, SDValue &Src, SDValue &SrcMods, 189 SDValue &Clamp) const; 190 191 bool SelectVOP3OpSel(SDValue In, SDValue &Src, SDValue &SrcMods) const; 192 bool SelectVOP3OpSel0(SDValue In, SDValue &Src, SDValue &SrcMods, 193 SDValue &Clamp) const; 194 195 bool SelectVOP3OpSelMods(SDValue In, SDValue &Src, SDValue &SrcMods) const; 196 bool SelectVOP3OpSelMods0(SDValue In, SDValue &Src, SDValue &SrcMods, 197 SDValue &Clamp) const; 198 bool SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src, unsigned &Mods) const; 199 bool SelectVOP3PMadMixMods(SDValue In, SDValue &Src, SDValue &SrcMods) const; 200 201 SDValue getHi16Elt(SDValue In) const; 202 bool SelectHi16Elt(SDValue In, SDValue &Src) const; 203 204 void SelectADD_SUB_I64(SDNode *N); 205 void SelectUADDO_USUBO(SDNode *N); 206 void SelectDIV_SCALE(SDNode *N); 207 void SelectMAD_64_32(SDNode *N); 208 void SelectFMA_W_CHAIN(SDNode *N); 209 void SelectFMUL_W_CHAIN(SDNode *N); 210 211 SDNode *getS_BFE(unsigned Opcode, const SDLoc &DL, SDValue Val, 212 uint32_t Offset, uint32_t Width); 213 void SelectS_BFEFromShifts(SDNode *N); 214 void SelectS_BFE(SDNode *N); 215 bool isCBranchSCC(const SDNode *N) const; 216 void SelectBRCOND(SDNode *N); 217 void SelectFMAD_FMA(SDNode *N); 218 void SelectATOMIC_CMP_SWAP(SDNode *N); 219 void SelectINTRINSIC_W_CHAIN(SDNode *N); 220 221 protected: 222 // Include the pieces autogenerated from the target description. 223 #include "AMDGPUGenDAGISel.inc" 224 }; 225 226 class R600DAGToDAGISel : public AMDGPUDAGToDAGISel { 227 const R600Subtarget *Subtarget; 228 229 bool isConstantLoad(const MemSDNode *N, int cbID) const; 230 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr); 231 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg, 232 SDValue& Offset); 233 public: 234 explicit R600DAGToDAGISel(TargetMachine *TM, CodeGenOpt::Level OptLevel) : 235 AMDGPUDAGToDAGISel(TM, OptLevel) {} 236 237 void Select(SDNode *N) override; 238 239 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, 240 SDValue &Offset) override; 241 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, 242 SDValue &Offset) override; 243 244 bool runOnMachineFunction(MachineFunction &MF) override; 245 246 void PreprocessISelDAG() override {} 247 248 protected: 249 // Include the pieces autogenerated from the target description. 250 #include "R600GenDAGISel.inc" 251 }; 252 253 static SDValue stripBitcast(SDValue Val) { 254 return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val; 255 } 256 257 // Figure out if this is really an extract of the high 16-bits of a dword. 258 static bool isExtractHiElt(SDValue In, SDValue &Out) { 259 In = stripBitcast(In); 260 if (In.getOpcode() != ISD::TRUNCATE) 261 return false; 262 263 SDValue Srl = In.getOperand(0); 264 if (Srl.getOpcode() == ISD::SRL) { 265 if (ConstantSDNode *ShiftAmt = dyn_cast<ConstantSDNode>(Srl.getOperand(1))) { 266 if (ShiftAmt->getZExtValue() == 16) { 267 Out = stripBitcast(Srl.getOperand(0)); 268 return true; 269 } 270 } 271 } 272 273 return false; 274 } 275 276 // Look through operations that obscure just looking at the low 16-bits of the 277 // same register. 278 static SDValue stripExtractLoElt(SDValue In) { 279 if (In.getOpcode() == ISD::TRUNCATE) { 280 SDValue Src = In.getOperand(0); 281 if (Src.getValueType().getSizeInBits() == 32) 282 return stripBitcast(Src); 283 } 284 285 return In; 286 } 287 288 } // end anonymous namespace 289 290 INITIALIZE_PASS_BEGIN(AMDGPUDAGToDAGISel, "amdgpu-isel", 291 "AMDGPU DAG->DAG Pattern Instruction Selection", false, false) 292 INITIALIZE_PASS_DEPENDENCY(AMDGPUArgumentUsageInfo) 293 INITIALIZE_PASS_DEPENDENCY(AMDGPUPerfHintAnalysis) 294 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis) 295 INITIALIZE_PASS_END(AMDGPUDAGToDAGISel, "amdgpu-isel", 296 "AMDGPU DAG->DAG Pattern Instruction Selection", false, false) 297 298 /// This pass converts a legalized DAG into a AMDGPU-specific 299 // DAG, ready for instruction scheduling. 300 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine *TM, 301 CodeGenOpt::Level OptLevel) { 302 return new AMDGPUDAGToDAGISel(TM, OptLevel); 303 } 304 305 /// This pass converts a legalized DAG into a R600-specific 306 // DAG, ready for instruction scheduling. 307 FunctionPass *llvm::createR600ISelDag(TargetMachine *TM, 308 CodeGenOpt::Level OptLevel) { 309 return new R600DAGToDAGISel(TM, OptLevel); 310 } 311 312 bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { 313 Subtarget = &MF.getSubtarget<GCNSubtarget>(); 314 return SelectionDAGISel::runOnMachineFunction(MF); 315 } 316 317 bool AMDGPUDAGToDAGISel::matchLoadD16FromBuildVector(SDNode *N) const { 318 assert(Subtarget->d16PreservesUnusedBits()); 319 MVT VT = N->getValueType(0).getSimpleVT(); 320 if (VT != MVT::v2i16 && VT != MVT::v2f16) 321 return false; 322 323 SDValue Lo = N->getOperand(0); 324 SDValue Hi = N->getOperand(1); 325 326 LoadSDNode *LdHi = dyn_cast<LoadSDNode>(stripBitcast(Hi)); 327 328 // build_vector lo, (load ptr) -> load_d16_hi ptr, lo 329 // build_vector lo, (zextload ptr from i8) -> load_d16_hi_u8 ptr, lo 330 // build_vector lo, (sextload ptr from i8) -> load_d16_hi_i8 ptr, lo 331 332 // Need to check for possible indirect dependencies on the other half of the 333 // vector to avoid introducing a cycle. 334 if (LdHi && Hi.hasOneUse() && !LdHi->isPredecessorOf(Lo.getNode())) { 335 SDVTList VTList = CurDAG->getVTList(VT, MVT::Other); 336 337 SDValue TiedIn = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Lo); 338 SDValue Ops[] = { 339 LdHi->getChain(), LdHi->getBasePtr(), TiedIn 340 }; 341 342 unsigned LoadOp = AMDGPUISD::LOAD_D16_HI; 343 if (LdHi->getMemoryVT() == MVT::i8) { 344 LoadOp = LdHi->getExtensionType() == ISD::SEXTLOAD ? 345 AMDGPUISD::LOAD_D16_HI_I8 : AMDGPUISD::LOAD_D16_HI_U8; 346 } else { 347 assert(LdHi->getMemoryVT() == MVT::i16); 348 } 349 350 SDValue NewLoadHi = 351 CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdHi), VTList, 352 Ops, LdHi->getMemoryVT(), 353 LdHi->getMemOperand()); 354 355 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadHi); 356 CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdHi, 1), NewLoadHi.getValue(1)); 357 return true; 358 } 359 360 // build_vector (load ptr), hi -> load_d16_lo ptr, hi 361 // build_vector (zextload ptr from i8), hi -> load_d16_lo_u8 ptr, hi 362 // build_vector (sextload ptr from i8), hi -> load_d16_lo_i8 ptr, hi 363 LoadSDNode *LdLo = dyn_cast<LoadSDNode>(stripBitcast(Lo)); 364 if (LdLo && Lo.hasOneUse()) { 365 SDValue TiedIn = getHi16Elt(Hi); 366 if (!TiedIn || LdLo->isPredecessorOf(TiedIn.getNode())) 367 return false; 368 369 SDVTList VTList = CurDAG->getVTList(VT, MVT::Other); 370 unsigned LoadOp = AMDGPUISD::LOAD_D16_LO; 371 if (LdLo->getMemoryVT() == MVT::i8) { 372 LoadOp = LdLo->getExtensionType() == ISD::SEXTLOAD ? 373 AMDGPUISD::LOAD_D16_LO_I8 : AMDGPUISD::LOAD_D16_LO_U8; 374 } else { 375 assert(LdLo->getMemoryVT() == MVT::i16); 376 } 377 378 TiedIn = CurDAG->getNode(ISD::BITCAST, SDLoc(N), VT, TiedIn); 379 380 SDValue Ops[] = { 381 LdLo->getChain(), LdLo->getBasePtr(), TiedIn 382 }; 383 384 SDValue NewLoadLo = 385 CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdLo), VTList, 386 Ops, LdLo->getMemoryVT(), 387 LdLo->getMemOperand()); 388 389 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadLo); 390 CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdLo, 1), NewLoadLo.getValue(1)); 391 return true; 392 } 393 394 return false; 395 } 396 397 void AMDGPUDAGToDAGISel::PreprocessISelDAG() { 398 if (!Subtarget->d16PreservesUnusedBits()) 399 return; 400 401 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end(); 402 403 bool MadeChange = false; 404 while (Position != CurDAG->allnodes_begin()) { 405 SDNode *N = &*--Position; 406 if (N->use_empty()) 407 continue; 408 409 switch (N->getOpcode()) { 410 case ISD::BUILD_VECTOR: 411 MadeChange |= matchLoadD16FromBuildVector(N); 412 break; 413 default: 414 break; 415 } 416 } 417 418 if (MadeChange) { 419 CurDAG->RemoveDeadNodes(); 420 LLVM_DEBUG(dbgs() << "After PreProcess:\n"; 421 CurDAG->dump();); 422 } 423 } 424 425 bool AMDGPUDAGToDAGISel::isNoNanSrc(SDValue N) const { 426 if (TM.Options.NoNaNsFPMath) 427 return true; 428 429 // TODO: Move into isKnownNeverNaN 430 if (N->getFlags().isDefined()) 431 return N->getFlags().hasNoNaNs(); 432 433 return CurDAG->isKnownNeverNaN(N); 434 } 435 436 bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N) const { 437 const SIInstrInfo *TII = Subtarget->getInstrInfo(); 438 439 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) 440 return TII->isInlineConstant(C->getAPIntValue()); 441 442 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N)) 443 return TII->isInlineConstant(C->getValueAPF().bitcastToAPInt()); 444 445 return false; 446 } 447 448 /// Determine the register class for \p OpNo 449 /// \returns The register class of the virtual register that will be used for 450 /// the given operand number \OpNo or NULL if the register class cannot be 451 /// determined. 452 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N, 453 unsigned OpNo) const { 454 if (!N->isMachineOpcode()) { 455 if (N->getOpcode() == ISD::CopyToReg) { 456 unsigned Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg(); 457 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 458 MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo(); 459 return MRI.getRegClass(Reg); 460 } 461 462 const SIRegisterInfo *TRI 463 = static_cast<const GCNSubtarget *>(Subtarget)->getRegisterInfo(); 464 return TRI->getPhysRegClass(Reg); 465 } 466 467 return nullptr; 468 } 469 470 switch (N->getMachineOpcode()) { 471 default: { 472 const MCInstrDesc &Desc = 473 Subtarget->getInstrInfo()->get(N->getMachineOpcode()); 474 unsigned OpIdx = Desc.getNumDefs() + OpNo; 475 if (OpIdx >= Desc.getNumOperands()) 476 return nullptr; 477 int RegClass = Desc.OpInfo[OpIdx].RegClass; 478 if (RegClass == -1) 479 return nullptr; 480 481 return Subtarget->getRegisterInfo()->getRegClass(RegClass); 482 } 483 case AMDGPU::REG_SEQUENCE: { 484 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 485 const TargetRegisterClass *SuperRC = 486 Subtarget->getRegisterInfo()->getRegClass(RCID); 487 488 SDValue SubRegOp = N->getOperand(OpNo + 1); 489 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue(); 490 return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC, 491 SubRegIdx); 492 } 493 } 494 } 495 496 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N, SDValue Val) const { 497 const SITargetLowering& Lowering = 498 *static_cast<const SITargetLowering*>(getTargetLowering()); 499 500 // Write max value to m0 before each load operation 501 502 SDValue M0 = Lowering.copyToM0(*CurDAG, CurDAG->getEntryNode(), SDLoc(N), 503 Val); 504 505 SDValue Glue = M0.getValue(1); 506 507 SmallVector <SDValue, 8> Ops; 508 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 509 Ops.push_back(N->getOperand(i)); 510 511 Ops.push_back(Glue); 512 return CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops); 513 } 514 515 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0LDSInit(SDNode *N) const { 516 if (cast<MemSDNode>(N)->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS || 517 !Subtarget->ldsRequiresM0Init()) 518 return N; 519 return glueCopyToM0(N, CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32)); 520 } 521 522 MachineSDNode *AMDGPUDAGToDAGISel::buildSMovImm64(SDLoc &DL, uint64_t Imm, 523 EVT VT) const { 524 SDNode *Lo = CurDAG->getMachineNode( 525 AMDGPU::S_MOV_B32, DL, MVT::i32, 526 CurDAG->getConstant(Imm & 0xFFFFFFFF, DL, MVT::i32)); 527 SDNode *Hi = 528 CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, 529 CurDAG->getConstant(Imm >> 32, DL, MVT::i32)); 530 const SDValue Ops[] = { 531 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32), 532 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 533 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)}; 534 535 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, VT, Ops); 536 } 537 538 static unsigned selectSGPRVectorRegClassID(unsigned NumVectorElts) { 539 switch (NumVectorElts) { 540 case 1: 541 return AMDGPU::SReg_32_XM0RegClassID; 542 case 2: 543 return AMDGPU::SReg_64RegClassID; 544 case 4: 545 return AMDGPU::SReg_128RegClassID; 546 case 8: 547 return AMDGPU::SReg_256RegClassID; 548 case 16: 549 return AMDGPU::SReg_512RegClassID; 550 } 551 552 llvm_unreachable("invalid vector size"); 553 } 554 555 static bool getConstantValue(SDValue N, uint32_t &Out) { 556 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) { 557 Out = C->getAPIntValue().getZExtValue(); 558 return true; 559 } 560 561 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N)) { 562 Out = C->getValueAPF().bitcastToAPInt().getZExtValue(); 563 return true; 564 } 565 566 return false; 567 } 568 569 void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) { 570 EVT VT = N->getValueType(0); 571 unsigned NumVectorElts = VT.getVectorNumElements(); 572 EVT EltVT = VT.getVectorElementType(); 573 SDLoc DL(N); 574 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32); 575 576 if (NumVectorElts == 1) { 577 CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0), 578 RegClass); 579 return; 580 } 581 582 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not " 583 "supported yet"); 584 // 16 = Max Num Vector Elements 585 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index) 586 // 1 = Vector Register Class 587 SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1); 588 589 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32); 590 bool IsRegSeq = true; 591 unsigned NOps = N->getNumOperands(); 592 for (unsigned i = 0; i < NOps; i++) { 593 // XXX: Why is this here? 594 if (isa<RegisterSDNode>(N->getOperand(i))) { 595 IsRegSeq = false; 596 break; 597 } 598 unsigned Sub = AMDGPURegisterInfo::getSubRegFromChannel(i); 599 RegSeqArgs[1 + (2 * i)] = N->getOperand(i); 600 RegSeqArgs[1 + (2 * i) + 1] = CurDAG->getTargetConstant(Sub, DL, MVT::i32); 601 } 602 if (NOps != NumVectorElts) { 603 // Fill in the missing undef elements if this was a scalar_to_vector. 604 assert(N->getOpcode() == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts); 605 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 606 DL, EltVT); 607 for (unsigned i = NOps; i < NumVectorElts; ++i) { 608 unsigned Sub = AMDGPURegisterInfo::getSubRegFromChannel(i); 609 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0); 610 RegSeqArgs[1 + (2 * i) + 1] = 611 CurDAG->getTargetConstant(Sub, DL, MVT::i32); 612 } 613 } 614 615 if (!IsRegSeq) 616 SelectCode(N); 617 CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs); 618 } 619 620 void AMDGPUDAGToDAGISel::Select(SDNode *N) { 621 unsigned int Opc = N->getOpcode(); 622 if (N->isMachineOpcode()) { 623 N->setNodeId(-1); 624 return; // Already selected. 625 } 626 627 if (isa<AtomicSDNode>(N) || 628 (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC || 629 Opc == ISD::ATOMIC_LOAD_FADD || 630 Opc == AMDGPUISD::ATOMIC_LOAD_FMIN || 631 Opc == AMDGPUISD::ATOMIC_LOAD_FMAX)) 632 N = glueCopyToM0LDSInit(N); 633 634 switch (Opc) { 635 default: 636 break; 637 // We are selecting i64 ADD here instead of custom lower it during 638 // DAG legalization, so we can fold some i64 ADDs used for address 639 // calculation into the LOAD and STORE instructions. 640 case ISD::ADDC: 641 case ISD::ADDE: 642 case ISD::SUBC: 643 case ISD::SUBE: { 644 if (N->getValueType(0) != MVT::i64) 645 break; 646 647 SelectADD_SUB_I64(N); 648 return; 649 } 650 case ISD::UADDO: 651 case ISD::USUBO: { 652 SelectUADDO_USUBO(N); 653 return; 654 } 655 case AMDGPUISD::FMUL_W_CHAIN: { 656 SelectFMUL_W_CHAIN(N); 657 return; 658 } 659 case AMDGPUISD::FMA_W_CHAIN: { 660 SelectFMA_W_CHAIN(N); 661 return; 662 } 663 664 case ISD::SCALAR_TO_VECTOR: 665 case ISD::BUILD_VECTOR: { 666 EVT VT = N->getValueType(0); 667 unsigned NumVectorElts = VT.getVectorNumElements(); 668 if (VT.getScalarSizeInBits() == 16) { 669 if (Opc == ISD::BUILD_VECTOR && NumVectorElts == 2) { 670 uint32_t LHSVal, RHSVal; 671 if (getConstantValue(N->getOperand(0), LHSVal) && 672 getConstantValue(N->getOperand(1), RHSVal)) { 673 uint32_t K = LHSVal | (RHSVal << 16); 674 CurDAG->SelectNodeTo(N, AMDGPU::S_MOV_B32, VT, 675 CurDAG->getTargetConstant(K, SDLoc(N), MVT::i32)); 676 return; 677 } 678 } 679 680 break; 681 } 682 683 assert(VT.getVectorElementType().bitsEq(MVT::i32)); 684 unsigned RegClassID = selectSGPRVectorRegClassID(NumVectorElts); 685 SelectBuildVector(N, RegClassID); 686 return; 687 } 688 case ISD::BUILD_PAIR: { 689 SDValue RC, SubReg0, SubReg1; 690 SDLoc DL(N); 691 if (N->getValueType(0) == MVT::i128) { 692 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32); 693 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32); 694 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32); 695 } else if (N->getValueType(0) == MVT::i64) { 696 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32); 697 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32); 698 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32); 699 } else { 700 llvm_unreachable("Unhandled value type for BUILD_PAIR"); 701 } 702 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0, 703 N->getOperand(1), SubReg1 }; 704 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, 705 N->getValueType(0), Ops)); 706 return; 707 } 708 709 case ISD::Constant: 710 case ISD::ConstantFP: { 711 if (N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N)) 712 break; 713 714 uint64_t Imm; 715 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N)) 716 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue(); 717 else { 718 ConstantSDNode *C = cast<ConstantSDNode>(N); 719 Imm = C->getZExtValue(); 720 } 721 722 SDLoc DL(N); 723 ReplaceNode(N, buildSMovImm64(DL, Imm, N->getValueType(0))); 724 return; 725 } 726 case ISD::LOAD: 727 case ISD::STORE: 728 case ISD::ATOMIC_LOAD: 729 case ISD::ATOMIC_STORE: { 730 N = glueCopyToM0LDSInit(N); 731 break; 732 } 733 734 case AMDGPUISD::BFE_I32: 735 case AMDGPUISD::BFE_U32: { 736 // There is a scalar version available, but unlike the vector version which 737 // has a separate operand for the offset and width, the scalar version packs 738 // the width and offset into a single operand. Try to move to the scalar 739 // version if the offsets are constant, so that we can try to keep extended 740 // loads of kernel arguments in SGPRs. 741 742 // TODO: Technically we could try to pattern match scalar bitshifts of 743 // dynamic values, but it's probably not useful. 744 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 745 if (!Offset) 746 break; 747 748 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2)); 749 if (!Width) 750 break; 751 752 bool Signed = Opc == AMDGPUISD::BFE_I32; 753 754 uint32_t OffsetVal = Offset->getZExtValue(); 755 uint32_t WidthVal = Width->getZExtValue(); 756 757 ReplaceNode(N, getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32, 758 SDLoc(N), N->getOperand(0), OffsetVal, WidthVal)); 759 return; 760 } 761 case AMDGPUISD::DIV_SCALE: { 762 SelectDIV_SCALE(N); 763 return; 764 } 765 case AMDGPUISD::MAD_I64_I32: 766 case AMDGPUISD::MAD_U64_U32: { 767 SelectMAD_64_32(N); 768 return; 769 } 770 case ISD::CopyToReg: { 771 const SITargetLowering& Lowering = 772 *static_cast<const SITargetLowering*>(getTargetLowering()); 773 N = Lowering.legalizeTargetIndependentNode(N, *CurDAG); 774 break; 775 } 776 case ISD::AND: 777 case ISD::SRL: 778 case ISD::SRA: 779 case ISD::SIGN_EXTEND_INREG: 780 if (N->getValueType(0) != MVT::i32) 781 break; 782 783 SelectS_BFE(N); 784 return; 785 case ISD::BRCOND: 786 SelectBRCOND(N); 787 return; 788 case ISD::FMAD: 789 case ISD::FMA: 790 SelectFMAD_FMA(N); 791 return; 792 case AMDGPUISD::ATOMIC_CMP_SWAP: 793 SelectATOMIC_CMP_SWAP(N); 794 return; 795 case AMDGPUISD::CVT_PKRTZ_F16_F32: 796 case AMDGPUISD::CVT_PKNORM_I16_F32: 797 case AMDGPUISD::CVT_PKNORM_U16_F32: 798 case AMDGPUISD::CVT_PK_U16_U32: 799 case AMDGPUISD::CVT_PK_I16_I32: { 800 // Hack around using a legal type if f16 is illegal. 801 if (N->getValueType(0) == MVT::i32) { 802 MVT NewVT = Opc == AMDGPUISD::CVT_PKRTZ_F16_F32 ? MVT::v2f16 : MVT::v2i16; 803 N = CurDAG->MorphNodeTo(N, N->getOpcode(), CurDAG->getVTList(NewVT), 804 { N->getOperand(0), N->getOperand(1) }); 805 SelectCode(N); 806 return; 807 } 808 809 break; 810 } 811 case ISD::INTRINSIC_W_CHAIN: { 812 SelectINTRINSIC_W_CHAIN(N); 813 return; 814 } 815 } 816 817 SelectCode(N); 818 } 819 820 bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const { 821 const BasicBlock *BB = FuncInfo->MBB->getBasicBlock(); 822 const Instruction *Term = BB->getTerminator(); 823 return Term->getMetadata("amdgpu.uniform") || 824 Term->getMetadata("structurizecfg.uniform"); 825 } 826 827 StringRef AMDGPUDAGToDAGISel::getPassName() const { 828 return "AMDGPU DAG->DAG Pattern Instruction Selection"; 829 } 830 831 //===----------------------------------------------------------------------===// 832 // Complex Patterns 833 //===----------------------------------------------------------------------===// 834 835 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base, 836 SDValue &Offset) { 837 return false; 838 } 839 840 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base, 841 SDValue &Offset) { 842 ConstantSDNode *C; 843 SDLoc DL(Addr); 844 845 if ((C = dyn_cast<ConstantSDNode>(Addr))) { 846 Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32); 847 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); 848 } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) && 849 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) { 850 Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32); 851 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); 852 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) && 853 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) { 854 Base = Addr.getOperand(0); 855 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); 856 } else { 857 Base = Addr; 858 Offset = CurDAG->getTargetConstant(0, DL, MVT::i32); 859 } 860 861 return true; 862 } 863 864 // FIXME: Should only handle addcarry/subcarry 865 void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) { 866 SDLoc DL(N); 867 SDValue LHS = N->getOperand(0); 868 SDValue RHS = N->getOperand(1); 869 870 unsigned Opcode = N->getOpcode(); 871 bool ConsumeCarry = (Opcode == ISD::ADDE || Opcode == ISD::SUBE); 872 bool ProduceCarry = 873 ConsumeCarry || Opcode == ISD::ADDC || Opcode == ISD::SUBC; 874 bool IsAdd = Opcode == ISD::ADD || Opcode == ISD::ADDC || Opcode == ISD::ADDE; 875 876 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32); 877 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32); 878 879 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, 880 DL, MVT::i32, LHS, Sub0); 881 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, 882 DL, MVT::i32, LHS, Sub1); 883 884 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, 885 DL, MVT::i32, RHS, Sub0); 886 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, 887 DL, MVT::i32, RHS, Sub1); 888 889 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue); 890 891 unsigned Opc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; 892 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; 893 894 SDNode *AddLo; 895 if (!ConsumeCarry) { 896 SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) }; 897 AddLo = CurDAG->getMachineNode(Opc, DL, VTList, Args); 898 } else { 899 SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0), N->getOperand(2) }; 900 AddLo = CurDAG->getMachineNode(CarryOpc, DL, VTList, Args); 901 } 902 SDValue AddHiArgs[] = { 903 SDValue(Hi0, 0), 904 SDValue(Hi1, 0), 905 SDValue(AddLo, 1) 906 }; 907 SDNode *AddHi = CurDAG->getMachineNode(CarryOpc, DL, VTList, AddHiArgs); 908 909 SDValue RegSequenceArgs[] = { 910 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32), 911 SDValue(AddLo,0), 912 Sub0, 913 SDValue(AddHi,0), 914 Sub1, 915 }; 916 SDNode *RegSequence = CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL, 917 MVT::i64, RegSequenceArgs); 918 919 if (ProduceCarry) { 920 // Replace the carry-use 921 ReplaceUses(SDValue(N, 1), SDValue(AddHi, 1)); 922 } 923 924 // Replace the remaining uses. 925 ReplaceNode(N, RegSequence); 926 } 927 928 void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) { 929 // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned 930 // carry out despite the _i32 name. These were renamed in VI to _U32. 931 // FIXME: We should probably rename the opcodes here. 932 unsigned Opc = N->getOpcode() == ISD::UADDO ? 933 AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64; 934 935 CurDAG->SelectNodeTo(N, Opc, N->getVTList(), 936 { N->getOperand(0), N->getOperand(1) }); 937 } 938 939 void AMDGPUDAGToDAGISel::SelectFMA_W_CHAIN(SDNode *N) { 940 SDLoc SL(N); 941 // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp, omod 942 SDValue Ops[10]; 943 944 SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[6], Ops[7]); 945 SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]); 946 SelectVOP3Mods(N->getOperand(3), Ops[5], Ops[4]); 947 Ops[8] = N->getOperand(0); 948 Ops[9] = N->getOperand(4); 949 950 CurDAG->SelectNodeTo(N, AMDGPU::V_FMA_F32, N->getVTList(), Ops); 951 } 952 953 void AMDGPUDAGToDAGISel::SelectFMUL_W_CHAIN(SDNode *N) { 954 SDLoc SL(N); 955 // src0_modifiers, src0, src1_modifiers, src1, clamp, omod 956 SDValue Ops[8]; 957 958 SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[4], Ops[5]); 959 SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]); 960 Ops[6] = N->getOperand(0); 961 Ops[7] = N->getOperand(3); 962 963 CurDAG->SelectNodeTo(N, AMDGPU::V_MUL_F32_e64, N->getVTList(), Ops); 964 } 965 966 // We need to handle this here because tablegen doesn't support matching 967 // instructions with multiple outputs. 968 void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) { 969 SDLoc SL(N); 970 EVT VT = N->getValueType(0); 971 972 assert(VT == MVT::f32 || VT == MVT::f64); 973 974 unsigned Opc 975 = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32; 976 977 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2) }; 978 CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops); 979 } 980 981 // We need to handle this here because tablegen doesn't support matching 982 // instructions with multiple outputs. 983 void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) { 984 SDLoc SL(N); 985 bool Signed = N->getOpcode() == AMDGPUISD::MAD_I64_I32; 986 unsigned Opc = Signed ? AMDGPU::V_MAD_I64_I32 : AMDGPU::V_MAD_U64_U32; 987 988 SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1); 989 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), 990 Clamp }; 991 CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops); 992 } 993 994 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(SDValue Base, unsigned Offset, 995 unsigned OffsetBits) const { 996 if ((OffsetBits == 16 && !isUInt<16>(Offset)) || 997 (OffsetBits == 8 && !isUInt<8>(Offset))) 998 return false; 999 1000 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS || 1001 Subtarget->unsafeDSOffsetFoldingEnabled()) 1002 return true; 1003 1004 // On Southern Islands instruction with a negative base value and an offset 1005 // don't seem to work. 1006 return CurDAG->SignBitIsZero(Base); 1007 } 1008 1009 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base, 1010 SDValue &Offset) const { 1011 SDLoc DL(Addr); 1012 if (CurDAG->isBaseWithConstantOffset(Addr)) { 1013 SDValue N0 = Addr.getOperand(0); 1014 SDValue N1 = Addr.getOperand(1); 1015 ConstantSDNode *C1 = cast<ConstantSDNode>(N1); 1016 if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) { 1017 // (add n0, c0) 1018 Base = N0; 1019 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16); 1020 return true; 1021 } 1022 } else if (Addr.getOpcode() == ISD::SUB) { 1023 // sub C, x -> add (sub 0, x), C 1024 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) { 1025 int64_t ByteOffset = C->getSExtValue(); 1026 if (isUInt<16>(ByteOffset)) { 1027 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); 1028 1029 // XXX - This is kind of hacky. Create a dummy sub node so we can check 1030 // the known bits in isDSOffsetLegal. We need to emit the selected node 1031 // here, so this is thrown away. 1032 SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32, 1033 Zero, Addr.getOperand(1)); 1034 1035 if (isDSOffsetLegal(Sub, ByteOffset, 16)) { 1036 // FIXME: Select to VOP3 version for with-carry. 1037 unsigned SubOp = Subtarget->hasAddNoCarry() ? 1038 AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_I32_e32; 1039 1040 MachineSDNode *MachineSub 1041 = CurDAG->getMachineNode(SubOp, DL, MVT::i32, 1042 Zero, Addr.getOperand(1)); 1043 1044 Base = SDValue(MachineSub, 0); 1045 Offset = CurDAG->getTargetConstant(ByteOffset, DL, MVT::i16); 1046 return true; 1047 } 1048 } 1049 } 1050 } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) { 1051 // If we have a constant address, prefer to put the constant into the 1052 // offset. This can save moves to load the constant address since multiple 1053 // operations can share the zero base address register, and enables merging 1054 // into read2 / write2 instructions. 1055 1056 SDLoc DL(Addr); 1057 1058 if (isUInt<16>(CAddr->getZExtValue())) { 1059 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); 1060 MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, 1061 DL, MVT::i32, Zero); 1062 Base = SDValue(MovZero, 0); 1063 Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16); 1064 return true; 1065 } 1066 } 1067 1068 // default case 1069 Base = Addr; 1070 Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i16); 1071 return true; 1072 } 1073 1074 // TODO: If offset is too big, put low 16-bit into offset. 1075 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base, 1076 SDValue &Offset0, 1077 SDValue &Offset1) const { 1078 SDLoc DL(Addr); 1079 1080 if (CurDAG->isBaseWithConstantOffset(Addr)) { 1081 SDValue N0 = Addr.getOperand(0); 1082 SDValue N1 = Addr.getOperand(1); 1083 ConstantSDNode *C1 = cast<ConstantSDNode>(N1); 1084 unsigned DWordOffset0 = C1->getZExtValue() / 4; 1085 unsigned DWordOffset1 = DWordOffset0 + 1; 1086 // (add n0, c0) 1087 if (isDSOffsetLegal(N0, DWordOffset1, 8)) { 1088 Base = N0; 1089 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8); 1090 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8); 1091 return true; 1092 } 1093 } else if (Addr.getOpcode() == ISD::SUB) { 1094 // sub C, x -> add (sub 0, x), C 1095 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) { 1096 unsigned DWordOffset0 = C->getZExtValue() / 4; 1097 unsigned DWordOffset1 = DWordOffset0 + 1; 1098 1099 if (isUInt<8>(DWordOffset0)) { 1100 SDLoc DL(Addr); 1101 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); 1102 1103 // XXX - This is kind of hacky. Create a dummy sub node so we can check 1104 // the known bits in isDSOffsetLegal. We need to emit the selected node 1105 // here, so this is thrown away. 1106 SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32, 1107 Zero, Addr.getOperand(1)); 1108 1109 if (isDSOffsetLegal(Sub, DWordOffset1, 8)) { 1110 unsigned SubOp = Subtarget->hasAddNoCarry() ? 1111 AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_I32_e32; 1112 1113 MachineSDNode *MachineSub 1114 = CurDAG->getMachineNode(SubOp, DL, MVT::i32, 1115 Zero, Addr.getOperand(1)); 1116 1117 Base = SDValue(MachineSub, 0); 1118 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8); 1119 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8); 1120 return true; 1121 } 1122 } 1123 } 1124 } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) { 1125 unsigned DWordOffset0 = CAddr->getZExtValue() / 4; 1126 unsigned DWordOffset1 = DWordOffset0 + 1; 1127 assert(4 * DWordOffset0 == CAddr->getZExtValue()); 1128 1129 if (isUInt<8>(DWordOffset0) && isUInt<8>(DWordOffset1)) { 1130 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); 1131 MachineSDNode *MovZero 1132 = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, 1133 DL, MVT::i32, Zero); 1134 Base = SDValue(MovZero, 0); 1135 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8); 1136 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8); 1137 return true; 1138 } 1139 } 1140 1141 // default case 1142 1143 Base = Addr; 1144 Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8); 1145 Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8); 1146 return true; 1147 } 1148 1149 bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr, 1150 SDValue &VAddr, SDValue &SOffset, 1151 SDValue &Offset, SDValue &Offen, 1152 SDValue &Idxen, SDValue &Addr64, 1153 SDValue &GLC, SDValue &SLC, 1154 SDValue &TFE) const { 1155 // Subtarget prefers to use flat instruction 1156 if (Subtarget->useFlatForGlobal()) 1157 return false; 1158 1159 SDLoc DL(Addr); 1160 1161 if (!GLC.getNode()) 1162 GLC = CurDAG->getTargetConstant(0, DL, MVT::i1); 1163 if (!SLC.getNode()) 1164 SLC = CurDAG->getTargetConstant(0, DL, MVT::i1); 1165 TFE = CurDAG->getTargetConstant(0, DL, MVT::i1); 1166 1167 Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1); 1168 Offen = CurDAG->getTargetConstant(0, DL, MVT::i1); 1169 Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1); 1170 SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32); 1171 1172 ConstantSDNode *C1 = nullptr; 1173 SDValue N0 = Addr; 1174 if (CurDAG->isBaseWithConstantOffset(Addr)) { 1175 C1 = cast<ConstantSDNode>(Addr.getOperand(1)); 1176 if (isUInt<32>(C1->getZExtValue())) 1177 N0 = Addr.getOperand(0); 1178 else 1179 C1 = nullptr; 1180 } 1181 1182 if (N0.getOpcode() == ISD::ADD) { 1183 // (add N2, N3) -> addr64, or 1184 // (add (add N2, N3), C1) -> addr64 1185 SDValue N2 = N0.getOperand(0); 1186 SDValue N3 = N0.getOperand(1); 1187 Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1); 1188 1189 if (N2->isDivergent()) { 1190 if (N3->isDivergent()) { 1191 // Both N2 and N3 are divergent. Use N0 (the result of the add) as the 1192 // addr64, and construct the resource from a 0 address. 1193 Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0); 1194 VAddr = N0; 1195 } else { 1196 // N2 is divergent, N3 is not. 1197 Ptr = N3; 1198 VAddr = N2; 1199 } 1200 } else { 1201 // N2 is not divergent. 1202 Ptr = N2; 1203 VAddr = N3; 1204 } 1205 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16); 1206 } else if (N0->isDivergent()) { 1207 // N0 is divergent. Use it as the addr64, and construct the resource from a 1208 // 0 address. 1209 Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0); 1210 VAddr = N0; 1211 Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1); 1212 } else { 1213 // N0 -> offset, or 1214 // (N0 + C1) -> offset 1215 VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32); 1216 Ptr = N0; 1217 } 1218 1219 if (!C1) { 1220 // No offset. 1221 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16); 1222 return true; 1223 } 1224 1225 if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue())) { 1226 // Legal offset for instruction. 1227 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16); 1228 return true; 1229 } 1230 1231 // Illegal offset, store it in soffset. 1232 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16); 1233 SOffset = 1234 SDValue(CurDAG->getMachineNode( 1235 AMDGPU::S_MOV_B32, DL, MVT::i32, 1236 CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)), 1237 0); 1238 return true; 1239 } 1240 1241 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, 1242 SDValue &VAddr, SDValue &SOffset, 1243 SDValue &Offset, SDValue &GLC, 1244 SDValue &SLC, SDValue &TFE) const { 1245 SDValue Ptr, Offen, Idxen, Addr64; 1246 1247 // addr64 bit was removed for volcanic islands. 1248 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 1249 return false; 1250 1251 if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64, 1252 GLC, SLC, TFE)) 1253 return false; 1254 1255 ConstantSDNode *C = cast<ConstantSDNode>(Addr64); 1256 if (C->getSExtValue()) { 1257 SDLoc DL(Addr); 1258 1259 const SITargetLowering& Lowering = 1260 *static_cast<const SITargetLowering*>(getTargetLowering()); 1261 1262 SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0); 1263 return true; 1264 } 1265 1266 return false; 1267 } 1268 1269 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, 1270 SDValue &VAddr, SDValue &SOffset, 1271 SDValue &Offset, 1272 SDValue &SLC) const { 1273 SLC = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i1); 1274 SDValue GLC, TFE; 1275 1276 return SelectMUBUFAddr64(Addr, SRsrc, VAddr, SOffset, Offset, GLC, SLC, TFE); 1277 } 1278 1279 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) { 1280 auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>(); 1281 return PSV && PSV->isStack(); 1282 } 1283 1284 std::pair<SDValue, SDValue> AMDGPUDAGToDAGISel::foldFrameIndex(SDValue N) const { 1285 const MachineFunction &MF = CurDAG->getMachineFunction(); 1286 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1287 1288 if (auto FI = dyn_cast<FrameIndexSDNode>(N)) { 1289 SDValue TFI = CurDAG->getTargetFrameIndex(FI->getIndex(), 1290 FI->getValueType(0)); 1291 1292 // If we can resolve this to a frame index access, this is relative to the 1293 // frame pointer SGPR. 1294 return std::make_pair(TFI, CurDAG->getRegister(Info->getFrameOffsetReg(), 1295 MVT::i32)); 1296 } 1297 1298 // If we don't know this private access is a local stack object, it needs to 1299 // be relative to the entry point's scratch wave offset register. 1300 return std::make_pair(N, CurDAG->getRegister(Info->getScratchWaveOffsetReg(), 1301 MVT::i32)); 1302 } 1303 1304 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(SDNode *Parent, 1305 SDValue Addr, SDValue &Rsrc, 1306 SDValue &VAddr, SDValue &SOffset, 1307 SDValue &ImmOffset) const { 1308 1309 SDLoc DL(Addr); 1310 MachineFunction &MF = CurDAG->getMachineFunction(); 1311 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1312 1313 Rsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32); 1314 1315 if (ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) { 1316 unsigned Imm = CAddr->getZExtValue(); 1317 1318 SDValue HighBits = CurDAG->getTargetConstant(Imm & ~4095, DL, MVT::i32); 1319 MachineSDNode *MovHighBits = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, 1320 DL, MVT::i32, HighBits); 1321 VAddr = SDValue(MovHighBits, 0); 1322 1323 // In a call sequence, stores to the argument stack area are relative to the 1324 // stack pointer. 1325 const MachinePointerInfo &PtrInfo = cast<MemSDNode>(Parent)->getPointerInfo(); 1326 unsigned SOffsetReg = isStackPtrRelative(PtrInfo) ? 1327 Info->getStackPtrOffsetReg() : Info->getScratchWaveOffsetReg(); 1328 1329 SOffset = CurDAG->getRegister(SOffsetReg, MVT::i32); 1330 ImmOffset = CurDAG->getTargetConstant(Imm & 4095, DL, MVT::i16); 1331 return true; 1332 } 1333 1334 if (CurDAG->isBaseWithConstantOffset(Addr)) { 1335 // (add n0, c1) 1336 1337 SDValue N0 = Addr.getOperand(0); 1338 SDValue N1 = Addr.getOperand(1); 1339 1340 // Offsets in vaddr must be positive if range checking is enabled. 1341 // 1342 // The total computation of vaddr + soffset + offset must not overflow. If 1343 // vaddr is negative, even if offset is 0 the sgpr offset add will end up 1344 // overflowing. 1345 // 1346 // Prior to gfx9, MUBUF instructions with the vaddr offset enabled would 1347 // always perform a range check. If a negative vaddr base index was used, 1348 // this would fail the range check. The overall address computation would 1349 // compute a valid address, but this doesn't happen due to the range 1350 // check. For out-of-bounds MUBUF loads, a 0 is returned. 1351 // 1352 // Therefore it should be safe to fold any VGPR offset on gfx9 into the 1353 // MUBUF vaddr, but not on older subtargets which can only do this if the 1354 // sign bit is known 0. 1355 ConstantSDNode *C1 = cast<ConstantSDNode>(N1); 1356 if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue()) && 1357 (!Subtarget->privateMemoryResourceIsRangeChecked() || 1358 CurDAG->SignBitIsZero(N0))) { 1359 std::tie(VAddr, SOffset) = foldFrameIndex(N0); 1360 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16); 1361 return true; 1362 } 1363 } 1364 1365 // (node) 1366 std::tie(VAddr, SOffset) = foldFrameIndex(Addr); 1367 ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16); 1368 return true; 1369 } 1370 1371 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffset(SDNode *Parent, 1372 SDValue Addr, 1373 SDValue &SRsrc, 1374 SDValue &SOffset, 1375 SDValue &Offset) const { 1376 ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr); 1377 if (!CAddr || !SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue())) 1378 return false; 1379 1380 SDLoc DL(Addr); 1381 MachineFunction &MF = CurDAG->getMachineFunction(); 1382 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1383 1384 SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32); 1385 1386 const MachinePointerInfo &PtrInfo = cast<MemSDNode>(Parent)->getPointerInfo(); 1387 unsigned SOffsetReg = isStackPtrRelative(PtrInfo) ? 1388 Info->getStackPtrOffsetReg() : Info->getScratchWaveOffsetReg(); 1389 1390 // FIXME: Get from MachinePointerInfo? We should only be using the frame 1391 // offset if we know this is in a call sequence. 1392 SOffset = CurDAG->getRegister(SOffsetReg, MVT::i32); 1393 1394 Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16); 1395 return true; 1396 } 1397 1398 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, 1399 SDValue &SOffset, SDValue &Offset, 1400 SDValue &GLC, SDValue &SLC, 1401 SDValue &TFE) const { 1402 SDValue Ptr, VAddr, Offen, Idxen, Addr64; 1403 const SIInstrInfo *TII = 1404 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 1405 1406 if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64, 1407 GLC, SLC, TFE)) 1408 return false; 1409 1410 if (!cast<ConstantSDNode>(Offen)->getSExtValue() && 1411 !cast<ConstantSDNode>(Idxen)->getSExtValue() && 1412 !cast<ConstantSDNode>(Addr64)->getSExtValue()) { 1413 uint64_t Rsrc = TII->getDefaultRsrcDataFormat() | 1414 APInt::getAllOnesValue(32).getZExtValue(); // Size 1415 SDLoc DL(Addr); 1416 1417 const SITargetLowering& Lowering = 1418 *static_cast<const SITargetLowering*>(getTargetLowering()); 1419 1420 SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0); 1421 return true; 1422 } 1423 return false; 1424 } 1425 1426 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, 1427 SDValue &Soffset, SDValue &Offset 1428 ) const { 1429 SDValue GLC, SLC, TFE; 1430 1431 return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE); 1432 } 1433 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, 1434 SDValue &Soffset, SDValue &Offset, 1435 SDValue &SLC) const { 1436 SDValue GLC, TFE; 1437 1438 return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE); 1439 } 1440 1441 template <bool IsSigned> 1442 bool AMDGPUDAGToDAGISel::SelectFlatOffset(SDValue Addr, 1443 SDValue &VAddr, 1444 SDValue &Offset, 1445 SDValue &SLC) const { 1446 int64_t OffsetVal = 0; 1447 1448 if (Subtarget->hasFlatInstOffsets() && 1449 CurDAG->isBaseWithConstantOffset(Addr)) { 1450 SDValue N0 = Addr.getOperand(0); 1451 SDValue N1 = Addr.getOperand(1); 1452 int64_t COffsetVal = cast<ConstantSDNode>(N1)->getSExtValue(); 1453 1454 if ((IsSigned && isInt<13>(COffsetVal)) || 1455 (!IsSigned && isUInt<12>(COffsetVal))) { 1456 Addr = N0; 1457 OffsetVal = COffsetVal; 1458 } 1459 } 1460 1461 VAddr = Addr; 1462 Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i16); 1463 SLC = CurDAG->getTargetConstant(0, SDLoc(), MVT::i1); 1464 1465 return true; 1466 } 1467 1468 bool AMDGPUDAGToDAGISel::SelectFlatAtomic(SDValue Addr, 1469 SDValue &VAddr, 1470 SDValue &Offset, 1471 SDValue &SLC) const { 1472 return SelectFlatOffset<false>(Addr, VAddr, Offset, SLC); 1473 } 1474 1475 bool AMDGPUDAGToDAGISel::SelectFlatAtomicSigned(SDValue Addr, 1476 SDValue &VAddr, 1477 SDValue &Offset, 1478 SDValue &SLC) const { 1479 return SelectFlatOffset<true>(Addr, VAddr, Offset, SLC); 1480 } 1481 1482 bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode, 1483 SDValue &Offset, bool &Imm) const { 1484 1485 // FIXME: Handle non-constant offsets. 1486 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ByteOffsetNode); 1487 if (!C) 1488 return false; 1489 1490 SDLoc SL(ByteOffsetNode); 1491 GCNSubtarget::Generation Gen = Subtarget->getGeneration(); 1492 int64_t ByteOffset = C->getSExtValue(); 1493 int64_t EncodedOffset = AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset); 1494 1495 if (AMDGPU::isLegalSMRDImmOffset(*Subtarget, ByteOffset)) { 1496 Offset = CurDAG->getTargetConstant(EncodedOffset, SL, MVT::i32); 1497 Imm = true; 1498 return true; 1499 } 1500 1501 if (!isUInt<32>(EncodedOffset) || !isUInt<32>(ByteOffset)) 1502 return false; 1503 1504 if (Gen == AMDGPUSubtarget::SEA_ISLANDS && isUInt<32>(EncodedOffset)) { 1505 // 32-bit Immediates are supported on Sea Islands. 1506 Offset = CurDAG->getTargetConstant(EncodedOffset, SL, MVT::i32); 1507 } else { 1508 SDValue C32Bit = CurDAG->getTargetConstant(ByteOffset, SL, MVT::i32); 1509 Offset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, 1510 C32Bit), 0); 1511 } 1512 Imm = false; 1513 return true; 1514 } 1515 1516 SDValue AMDGPUDAGToDAGISel::Expand32BitAddress(SDValue Addr) const { 1517 if (Addr.getValueType() != MVT::i32) 1518 return Addr; 1519 1520 // Zero-extend a 32-bit address. 1521 SDLoc SL(Addr); 1522 1523 const MachineFunction &MF = CurDAG->getMachineFunction(); 1524 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1525 unsigned AddrHiVal = Info->get32BitAddressHighBits(); 1526 SDValue AddrHi = CurDAG->getTargetConstant(AddrHiVal, SL, MVT::i32); 1527 1528 const SDValue Ops[] = { 1529 CurDAG->getTargetConstant(AMDGPU::SReg_64_XEXECRegClassID, SL, MVT::i32), 1530 Addr, 1531 CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32), 1532 SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, AddrHi), 1533 0), 1534 CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32), 1535 }; 1536 1537 return SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, SL, MVT::i64, 1538 Ops), 0); 1539 } 1540 1541 bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase, 1542 SDValue &Offset, bool &Imm) const { 1543 SDLoc SL(Addr); 1544 1545 // A 32-bit (address + offset) should not cause unsigned 32-bit integer 1546 // wraparound, because s_load instructions perform the addition in 64 bits. 1547 if ((Addr.getValueType() != MVT::i32 || 1548 Addr->getFlags().hasNoUnsignedWrap()) && 1549 CurDAG->isBaseWithConstantOffset(Addr)) { 1550 SDValue N0 = Addr.getOperand(0); 1551 SDValue N1 = Addr.getOperand(1); 1552 1553 if (SelectSMRDOffset(N1, Offset, Imm)) { 1554 SBase = Expand32BitAddress(N0); 1555 return true; 1556 } 1557 } 1558 SBase = Expand32BitAddress(Addr); 1559 Offset = CurDAG->getTargetConstant(0, SL, MVT::i32); 1560 Imm = true; 1561 return true; 1562 } 1563 1564 bool AMDGPUDAGToDAGISel::SelectSMRDImm(SDValue Addr, SDValue &SBase, 1565 SDValue &Offset) const { 1566 bool Imm; 1567 return SelectSMRD(Addr, SBase, Offset, Imm) && Imm; 1568 } 1569 1570 bool AMDGPUDAGToDAGISel::SelectSMRDImm32(SDValue Addr, SDValue &SBase, 1571 SDValue &Offset) const { 1572 1573 if (Subtarget->getGeneration() != AMDGPUSubtarget::SEA_ISLANDS) 1574 return false; 1575 1576 bool Imm; 1577 if (!SelectSMRD(Addr, SBase, Offset, Imm)) 1578 return false; 1579 1580 return !Imm && isa<ConstantSDNode>(Offset); 1581 } 1582 1583 bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDValue Addr, SDValue &SBase, 1584 SDValue &Offset) const { 1585 bool Imm; 1586 return SelectSMRD(Addr, SBase, Offset, Imm) && !Imm && 1587 !isa<ConstantSDNode>(Offset); 1588 } 1589 1590 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm(SDValue Addr, 1591 SDValue &Offset) const { 1592 bool Imm; 1593 return SelectSMRDOffset(Addr, Offset, Imm) && Imm; 1594 } 1595 1596 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(SDValue Addr, 1597 SDValue &Offset) const { 1598 if (Subtarget->getGeneration() != AMDGPUSubtarget::SEA_ISLANDS) 1599 return false; 1600 1601 bool Imm; 1602 if (!SelectSMRDOffset(Addr, Offset, Imm)) 1603 return false; 1604 1605 return !Imm && isa<ConstantSDNode>(Offset); 1606 } 1607 1608 bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index, 1609 SDValue &Base, 1610 SDValue &Offset) const { 1611 SDLoc DL(Index); 1612 1613 if (CurDAG->isBaseWithConstantOffset(Index)) { 1614 SDValue N0 = Index.getOperand(0); 1615 SDValue N1 = Index.getOperand(1); 1616 ConstantSDNode *C1 = cast<ConstantSDNode>(N1); 1617 1618 // (add n0, c0) 1619 // Don't peel off the offset (c0) if doing so could possibly lead 1620 // the base (n0) to be negative. 1621 if (C1->getSExtValue() <= 0 || CurDAG->SignBitIsZero(N0)) { 1622 Base = N0; 1623 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32); 1624 return true; 1625 } 1626 } 1627 1628 if (isa<ConstantSDNode>(Index)) 1629 return false; 1630 1631 Base = Index; 1632 Offset = CurDAG->getTargetConstant(0, DL, MVT::i32); 1633 return true; 1634 } 1635 1636 SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, const SDLoc &DL, 1637 SDValue Val, uint32_t Offset, 1638 uint32_t Width) { 1639 // Transformation function, pack the offset and width of a BFE into 1640 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second 1641 // source, bits [5:0] contain the offset and bits [22:16] the width. 1642 uint32_t PackedVal = Offset | (Width << 16); 1643 SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32); 1644 1645 return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst); 1646 } 1647 1648 void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) { 1649 // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c) 1650 // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c) 1651 // Predicate: 0 < b <= c < 32 1652 1653 const SDValue &Shl = N->getOperand(0); 1654 ConstantSDNode *B = dyn_cast<ConstantSDNode>(Shl->getOperand(1)); 1655 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 1656 1657 if (B && C) { 1658 uint32_t BVal = B->getZExtValue(); 1659 uint32_t CVal = C->getZExtValue(); 1660 1661 if (0 < BVal && BVal <= CVal && CVal < 32) { 1662 bool Signed = N->getOpcode() == ISD::SRA; 1663 unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32; 1664 1665 ReplaceNode(N, getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0), CVal - BVal, 1666 32 - CVal)); 1667 return; 1668 } 1669 } 1670 SelectCode(N); 1671 } 1672 1673 void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) { 1674 switch (N->getOpcode()) { 1675 case ISD::AND: 1676 if (N->getOperand(0).getOpcode() == ISD::SRL) { 1677 // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)" 1678 // Predicate: isMask(mask) 1679 const SDValue &Srl = N->getOperand(0); 1680 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1)); 1681 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1)); 1682 1683 if (Shift && Mask) { 1684 uint32_t ShiftVal = Shift->getZExtValue(); 1685 uint32_t MaskVal = Mask->getZExtValue(); 1686 1687 if (isMask_32(MaskVal)) { 1688 uint32_t WidthVal = countPopulation(MaskVal); 1689 1690 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), 1691 Srl.getOperand(0), ShiftVal, WidthVal)); 1692 return; 1693 } 1694 } 1695 } 1696 break; 1697 case ISD::SRL: 1698 if (N->getOperand(0).getOpcode() == ISD::AND) { 1699 // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)" 1700 // Predicate: isMask(mask >> b) 1701 const SDValue &And = N->getOperand(0); 1702 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1)); 1703 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1)); 1704 1705 if (Shift && Mask) { 1706 uint32_t ShiftVal = Shift->getZExtValue(); 1707 uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal; 1708 1709 if (isMask_32(MaskVal)) { 1710 uint32_t WidthVal = countPopulation(MaskVal); 1711 1712 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), 1713 And.getOperand(0), ShiftVal, WidthVal)); 1714 return; 1715 } 1716 } 1717 } else if (N->getOperand(0).getOpcode() == ISD::SHL) { 1718 SelectS_BFEFromShifts(N); 1719 return; 1720 } 1721 break; 1722 case ISD::SRA: 1723 if (N->getOperand(0).getOpcode() == ISD::SHL) { 1724 SelectS_BFEFromShifts(N); 1725 return; 1726 } 1727 break; 1728 1729 case ISD::SIGN_EXTEND_INREG: { 1730 // sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8 1731 SDValue Src = N->getOperand(0); 1732 if (Src.getOpcode() != ISD::SRL) 1733 break; 1734 1735 const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1)); 1736 if (!Amt) 1737 break; 1738 1739 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits(); 1740 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_I32, SDLoc(N), Src.getOperand(0), 1741 Amt->getZExtValue(), Width)); 1742 return; 1743 } 1744 } 1745 1746 SelectCode(N); 1747 } 1748 1749 bool AMDGPUDAGToDAGISel::isCBranchSCC(const SDNode *N) const { 1750 assert(N->getOpcode() == ISD::BRCOND); 1751 if (!N->hasOneUse()) 1752 return false; 1753 1754 SDValue Cond = N->getOperand(1); 1755 if (Cond.getOpcode() == ISD::CopyToReg) 1756 Cond = Cond.getOperand(2); 1757 1758 if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse()) 1759 return false; 1760 1761 MVT VT = Cond.getOperand(0).getSimpleValueType(); 1762 if (VT == MVT::i32) 1763 return true; 1764 1765 if (VT == MVT::i64) { 1766 auto ST = static_cast<const GCNSubtarget *>(Subtarget); 1767 1768 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 1769 return (CC == ISD::SETEQ || CC == ISD::SETNE) && ST->hasScalarCompareEq64(); 1770 } 1771 1772 return false; 1773 } 1774 1775 void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) { 1776 SDValue Cond = N->getOperand(1); 1777 1778 if (Cond.isUndef()) { 1779 CurDAG->SelectNodeTo(N, AMDGPU::SI_BR_UNDEF, MVT::Other, 1780 N->getOperand(2), N->getOperand(0)); 1781 return; 1782 } 1783 1784 bool UseSCCBr = isCBranchSCC(N) && isUniformBr(N); 1785 unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ; 1786 unsigned CondReg = UseSCCBr ? AMDGPU::SCC : AMDGPU::VCC; 1787 SDLoc SL(N); 1788 1789 if (!UseSCCBr) { 1790 // This is the case that we are selecting to S_CBRANCH_VCCNZ. We have not 1791 // analyzed what generates the vcc value, so we do not know whether vcc 1792 // bits for disabled lanes are 0. Thus we need to mask out bits for 1793 // disabled lanes. 1794 // 1795 // For the case that we select S_CBRANCH_SCC1 and it gets 1796 // changed to S_CBRANCH_VCCNZ in SIFixSGPRCopies, SIFixSGPRCopies calls 1797 // SIInstrInfo::moveToVALU which inserts the S_AND). 1798 // 1799 // We could add an analysis of what generates the vcc value here and omit 1800 // the S_AND when is unnecessary. But it would be better to add a separate 1801 // pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it 1802 // catches both cases. 1803 Cond = SDValue(CurDAG->getMachineNode(AMDGPU::S_AND_B64, SL, MVT::i1, 1804 CurDAG->getRegister(AMDGPU::EXEC, MVT::i1), 1805 Cond), 1806 0); 1807 } 1808 1809 SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, CondReg, Cond); 1810 CurDAG->SelectNodeTo(N, BrOp, MVT::Other, 1811 N->getOperand(2), // Basic Block 1812 VCC.getValue(0)); 1813 } 1814 1815 void AMDGPUDAGToDAGISel::SelectFMAD_FMA(SDNode *N) { 1816 MVT VT = N->getSimpleValueType(0); 1817 bool IsFMA = N->getOpcode() == ISD::FMA; 1818 if (VT != MVT::f32 || (!Subtarget->hasMadMixInsts() && 1819 !Subtarget->hasFmaMixInsts()) || 1820 ((IsFMA && Subtarget->hasMadMixInsts()) || 1821 (!IsFMA && Subtarget->hasFmaMixInsts()))) { 1822 SelectCode(N); 1823 return; 1824 } 1825 1826 SDValue Src0 = N->getOperand(0); 1827 SDValue Src1 = N->getOperand(1); 1828 SDValue Src2 = N->getOperand(2); 1829 unsigned Src0Mods, Src1Mods, Src2Mods; 1830 1831 // Avoid using v_mad_mix_f32/v_fma_mix_f32 unless there is actually an operand 1832 // using the conversion from f16. 1833 bool Sel0 = SelectVOP3PMadMixModsImpl(Src0, Src0, Src0Mods); 1834 bool Sel1 = SelectVOP3PMadMixModsImpl(Src1, Src1, Src1Mods); 1835 bool Sel2 = SelectVOP3PMadMixModsImpl(Src2, Src2, Src2Mods); 1836 1837 assert((IsFMA || !Subtarget->hasFP32Denormals()) && 1838 "fmad selected with denormals enabled"); 1839 // TODO: We can select this with f32 denormals enabled if all the sources are 1840 // converted from f16 (in which case fmad isn't legal). 1841 1842 if (Sel0 || Sel1 || Sel2) { 1843 // For dummy operands. 1844 SDValue Zero = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32); 1845 SDValue Ops[] = { 1846 CurDAG->getTargetConstant(Src0Mods, SDLoc(), MVT::i32), Src0, 1847 CurDAG->getTargetConstant(Src1Mods, SDLoc(), MVT::i32), Src1, 1848 CurDAG->getTargetConstant(Src2Mods, SDLoc(), MVT::i32), Src2, 1849 CurDAG->getTargetConstant(0, SDLoc(), MVT::i1), 1850 Zero, Zero 1851 }; 1852 1853 CurDAG->SelectNodeTo(N, 1854 IsFMA ? AMDGPU::V_FMA_MIX_F32 : AMDGPU::V_MAD_MIX_F32, 1855 MVT::f32, Ops); 1856 } else { 1857 SelectCode(N); 1858 } 1859 } 1860 1861 // This is here because there isn't a way to use the generated sub0_sub1 as the 1862 // subreg index to EXTRACT_SUBREG in tablegen. 1863 void AMDGPUDAGToDAGISel::SelectATOMIC_CMP_SWAP(SDNode *N) { 1864 MemSDNode *Mem = cast<MemSDNode>(N); 1865 unsigned AS = Mem->getAddressSpace(); 1866 if (AS == AMDGPUAS::FLAT_ADDRESS) { 1867 SelectCode(N); 1868 return; 1869 } 1870 1871 MVT VT = N->getSimpleValueType(0); 1872 bool Is32 = (VT == MVT::i32); 1873 SDLoc SL(N); 1874 1875 MachineSDNode *CmpSwap = nullptr; 1876 if (Subtarget->hasAddr64()) { 1877 SDValue SRsrc, VAddr, SOffset, Offset, SLC; 1878 1879 if (SelectMUBUFAddr64(Mem->getBasePtr(), SRsrc, VAddr, SOffset, Offset, SLC)) { 1880 unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN : 1881 AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN; 1882 SDValue CmpVal = Mem->getOperand(2); 1883 1884 // XXX - Do we care about glue operands? 1885 1886 SDValue Ops[] = { 1887 CmpVal, VAddr, SRsrc, SOffset, Offset, SLC, Mem->getChain() 1888 }; 1889 1890 CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops); 1891 } 1892 } 1893 1894 if (!CmpSwap) { 1895 SDValue SRsrc, SOffset, Offset, SLC; 1896 if (SelectMUBUFOffset(Mem->getBasePtr(), SRsrc, SOffset, Offset, SLC)) { 1897 unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN : 1898 AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN; 1899 1900 SDValue CmpVal = Mem->getOperand(2); 1901 SDValue Ops[] = { 1902 CmpVal, SRsrc, SOffset, Offset, SLC, Mem->getChain() 1903 }; 1904 1905 CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops); 1906 } 1907 } 1908 1909 if (!CmpSwap) { 1910 SelectCode(N); 1911 return; 1912 } 1913 1914 MachineMemOperand *MMO = Mem->getMemOperand(); 1915 CurDAG->setNodeMemRefs(CmpSwap, {MMO}); 1916 1917 unsigned SubReg = Is32 ? AMDGPU::sub0 : AMDGPU::sub0_sub1; 1918 SDValue Extract 1919 = CurDAG->getTargetExtractSubreg(SubReg, SL, VT, SDValue(CmpSwap, 0)); 1920 1921 ReplaceUses(SDValue(N, 0), Extract); 1922 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 1)); 1923 CurDAG->RemoveDeadNode(N); 1924 } 1925 1926 void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) { 1927 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 1928 if ((IntrID != Intrinsic::amdgcn_ds_append && 1929 IntrID != Intrinsic::amdgcn_ds_consume) || 1930 N->getValueType(0) != MVT::i32) { 1931 SelectCode(N); 1932 return; 1933 } 1934 1935 // The address is assumed to be uniform, so if it ends up in a VGPR, it will 1936 // be copied to an SGPR with readfirstlane. 1937 unsigned Opc = IntrID == Intrinsic::amdgcn_ds_append ? 1938 AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME; 1939 1940 SDValue Chain = N->getOperand(0); 1941 SDValue Ptr = N->getOperand(2); 1942 MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N); 1943 bool IsGDS = M->getAddressSpace() == AMDGPUAS::REGION_ADDRESS; 1944 1945 SDValue Offset; 1946 if (CurDAG->isBaseWithConstantOffset(Ptr)) { 1947 SDValue PtrBase = Ptr.getOperand(0); 1948 SDValue PtrOffset = Ptr.getOperand(1); 1949 1950 const APInt &OffsetVal = cast<ConstantSDNode>(PtrOffset)->getAPIntValue(); 1951 if (isDSOffsetLegal(PtrBase, OffsetVal.getZExtValue(), 16)) { 1952 N = glueCopyToM0(N, PtrBase); 1953 Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i32); 1954 } 1955 } 1956 1957 if (!Offset) { 1958 N = glueCopyToM0(N, Ptr); 1959 Offset = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32); 1960 } 1961 1962 SDValue Ops[] = { 1963 Offset, 1964 CurDAG->getTargetConstant(IsGDS, SDLoc(), MVT::i32), 1965 Chain, 1966 N->getOperand(N->getNumOperands() - 1) // New glue 1967 }; 1968 1969 CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops); 1970 } 1971 1972 bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src, 1973 unsigned &Mods) const { 1974 Mods = 0; 1975 Src = In; 1976 1977 if (Src.getOpcode() == ISD::FNEG) { 1978 Mods |= SISrcMods::NEG; 1979 Src = Src.getOperand(0); 1980 } 1981 1982 if (Src.getOpcode() == ISD::FABS) { 1983 Mods |= SISrcMods::ABS; 1984 Src = Src.getOperand(0); 1985 } 1986 1987 return true; 1988 } 1989 1990 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src, 1991 SDValue &SrcMods) const { 1992 unsigned Mods; 1993 if (SelectVOP3ModsImpl(In, Src, Mods)) { 1994 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); 1995 return true; 1996 } 1997 1998 return false; 1999 } 2000 2001 bool AMDGPUDAGToDAGISel::SelectVOP3Mods_NNaN(SDValue In, SDValue &Src, 2002 SDValue &SrcMods) const { 2003 SelectVOP3Mods(In, Src, SrcMods); 2004 return isNoNanSrc(Src); 2005 } 2006 2007 bool AMDGPUDAGToDAGISel::SelectVOP3NoMods(SDValue In, SDValue &Src) const { 2008 if (In.getOpcode() == ISD::FABS || In.getOpcode() == ISD::FNEG) 2009 return false; 2010 2011 Src = In; 2012 return true; 2013 } 2014 2015 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src, 2016 SDValue &SrcMods, SDValue &Clamp, 2017 SDValue &Omod) const { 2018 SDLoc DL(In); 2019 Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1); 2020 Omod = CurDAG->getTargetConstant(0, DL, MVT::i1); 2021 2022 return SelectVOP3Mods(In, Src, SrcMods); 2023 } 2024 2025 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, 2026 SDValue &SrcMods, 2027 SDValue &Clamp, 2028 SDValue &Omod) const { 2029 Clamp = Omod = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32); 2030 return SelectVOP3Mods(In, Src, SrcMods); 2031 } 2032 2033 bool AMDGPUDAGToDAGISel::SelectVOP3OMods(SDValue In, SDValue &Src, 2034 SDValue &Clamp, SDValue &Omod) const { 2035 Src = In; 2036 2037 SDLoc DL(In); 2038 Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1); 2039 Omod = CurDAG->getTargetConstant(0, DL, MVT::i1); 2040 2041 return true; 2042 } 2043 2044 bool AMDGPUDAGToDAGISel::SelectVOP3PMods(SDValue In, SDValue &Src, 2045 SDValue &SrcMods) const { 2046 unsigned Mods = 0; 2047 Src = In; 2048 2049 if (Src.getOpcode() == ISD::FNEG) { 2050 Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI); 2051 Src = Src.getOperand(0); 2052 } 2053 2054 if (Src.getOpcode() == ISD::BUILD_VECTOR) { 2055 unsigned VecMods = Mods; 2056 2057 SDValue Lo = stripBitcast(Src.getOperand(0)); 2058 SDValue Hi = stripBitcast(Src.getOperand(1)); 2059 2060 if (Lo.getOpcode() == ISD::FNEG) { 2061 Lo = stripBitcast(Lo.getOperand(0)); 2062 Mods ^= SISrcMods::NEG; 2063 } 2064 2065 if (Hi.getOpcode() == ISD::FNEG) { 2066 Hi = stripBitcast(Hi.getOperand(0)); 2067 Mods ^= SISrcMods::NEG_HI; 2068 } 2069 2070 if (isExtractHiElt(Lo, Lo)) 2071 Mods |= SISrcMods::OP_SEL_0; 2072 2073 if (isExtractHiElt(Hi, Hi)) 2074 Mods |= SISrcMods::OP_SEL_1; 2075 2076 Lo = stripExtractLoElt(Lo); 2077 Hi = stripExtractLoElt(Hi); 2078 2079 if (Lo == Hi && !isInlineImmediate(Lo.getNode())) { 2080 // Really a scalar input. Just select from the low half of the register to 2081 // avoid packing. 2082 2083 Src = Lo; 2084 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); 2085 return true; 2086 } 2087 2088 Mods = VecMods; 2089 } 2090 2091 // Packed instructions do not have abs modifiers. 2092 Mods |= SISrcMods::OP_SEL_1; 2093 2094 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); 2095 return true; 2096 } 2097 2098 bool AMDGPUDAGToDAGISel::SelectVOP3PMods0(SDValue In, SDValue &Src, 2099 SDValue &SrcMods, 2100 SDValue &Clamp) const { 2101 SDLoc SL(In); 2102 2103 // FIXME: Handle clamp and op_sel 2104 Clamp = CurDAG->getTargetConstant(0, SL, MVT::i32); 2105 2106 return SelectVOP3PMods(In, Src, SrcMods); 2107 } 2108 2109 bool AMDGPUDAGToDAGISel::SelectVOP3OpSel(SDValue In, SDValue &Src, 2110 SDValue &SrcMods) const { 2111 Src = In; 2112 // FIXME: Handle op_sel 2113 SrcMods = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32); 2114 return true; 2115 } 2116 2117 bool AMDGPUDAGToDAGISel::SelectVOP3OpSel0(SDValue In, SDValue &Src, 2118 SDValue &SrcMods, 2119 SDValue &Clamp) const { 2120 SDLoc SL(In); 2121 2122 // FIXME: Handle clamp 2123 Clamp = CurDAG->getTargetConstant(0, SL, MVT::i32); 2124 2125 return SelectVOP3OpSel(In, Src, SrcMods); 2126 } 2127 2128 bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods(SDValue In, SDValue &Src, 2129 SDValue &SrcMods) const { 2130 // FIXME: Handle op_sel 2131 return SelectVOP3Mods(In, Src, SrcMods); 2132 } 2133 2134 bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods0(SDValue In, SDValue &Src, 2135 SDValue &SrcMods, 2136 SDValue &Clamp) const { 2137 SDLoc SL(In); 2138 2139 // FIXME: Handle clamp 2140 Clamp = CurDAG->getTargetConstant(0, SL, MVT::i32); 2141 2142 return SelectVOP3OpSelMods(In, Src, SrcMods); 2143 } 2144 2145 // The return value is not whether the match is possible (which it always is), 2146 // but whether or not it a conversion is really used. 2147 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src, 2148 unsigned &Mods) const { 2149 Mods = 0; 2150 SelectVOP3ModsImpl(In, Src, Mods); 2151 2152 if (Src.getOpcode() == ISD::FP_EXTEND) { 2153 Src = Src.getOperand(0); 2154 assert(Src.getValueType() == MVT::f16); 2155 Src = stripBitcast(Src); 2156 2157 // Be careful about folding modifiers if we already have an abs. fneg is 2158 // applied last, so we don't want to apply an earlier fneg. 2159 if ((Mods & SISrcMods::ABS) == 0) { 2160 unsigned ModsTmp; 2161 SelectVOP3ModsImpl(Src, Src, ModsTmp); 2162 2163 if ((ModsTmp & SISrcMods::NEG) != 0) 2164 Mods ^= SISrcMods::NEG; 2165 2166 if ((ModsTmp & SISrcMods::ABS) != 0) 2167 Mods |= SISrcMods::ABS; 2168 } 2169 2170 // op_sel/op_sel_hi decide the source type and source. 2171 // If the source's op_sel_hi is set, it indicates to do a conversion from fp16. 2172 // If the sources's op_sel is set, it picks the high half of the source 2173 // register. 2174 2175 Mods |= SISrcMods::OP_SEL_1; 2176 if (isExtractHiElt(Src, Src)) { 2177 Mods |= SISrcMods::OP_SEL_0; 2178 2179 // TODO: Should we try to look for neg/abs here? 2180 } 2181 2182 return true; 2183 } 2184 2185 return false; 2186 } 2187 2188 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixMods(SDValue In, SDValue &Src, 2189 SDValue &SrcMods) const { 2190 unsigned Mods = 0; 2191 SelectVOP3PMadMixModsImpl(In, Src, Mods); 2192 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); 2193 return true; 2194 } 2195 2196 SDValue AMDGPUDAGToDAGISel::getHi16Elt(SDValue In) const { 2197 if (In.isUndef()) 2198 return CurDAG->getUNDEF(MVT::i32); 2199 2200 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(In)) { 2201 SDLoc SL(In); 2202 return CurDAG->getConstant(C->getZExtValue() << 16, SL, MVT::i32); 2203 } 2204 2205 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(In)) { 2206 SDLoc SL(In); 2207 return CurDAG->getConstant( 2208 C->getValueAPF().bitcastToAPInt().getZExtValue() << 16, SL, MVT::i32); 2209 } 2210 2211 SDValue Src; 2212 if (isExtractHiElt(In, Src)) 2213 return Src; 2214 2215 return SDValue(); 2216 } 2217 2218 // TODO: Can we identify things like v_mad_mixhi_f16? 2219 bool AMDGPUDAGToDAGISel::SelectHi16Elt(SDValue In, SDValue &Src) const { 2220 if (In.isUndef()) { 2221 Src = In; 2222 return true; 2223 } 2224 2225 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(In)) { 2226 SDLoc SL(In); 2227 SDValue K = CurDAG->getTargetConstant(C->getZExtValue() << 16, SL, MVT::i32); 2228 MachineSDNode *MovK = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, 2229 SL, MVT::i32, K); 2230 Src = SDValue(MovK, 0); 2231 return true; 2232 } 2233 2234 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(In)) { 2235 SDLoc SL(In); 2236 SDValue K = CurDAG->getTargetConstant( 2237 C->getValueAPF().bitcastToAPInt().getZExtValue() << 16, SL, MVT::i32); 2238 MachineSDNode *MovK = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, 2239 SL, MVT::i32, K); 2240 Src = SDValue(MovK, 0); 2241 return true; 2242 } 2243 2244 return isExtractHiElt(In, Src); 2245 } 2246 2247 bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const { 2248 if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) { 2249 return false; 2250 } 2251 const SIRegisterInfo *SIRI = 2252 static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo()); 2253 const SIInstrInfo * SII = 2254 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 2255 2256 unsigned Limit = 0; 2257 bool AllUsesAcceptSReg = true; 2258 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end(); 2259 Limit < 10 && U != E; ++U, ++Limit) { 2260 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo()); 2261 2262 // If the register class is unknown, it could be an unknown 2263 // register class that needs to be an SGPR, e.g. an inline asm 2264 // constraint 2265 if (!RC || SIRI->isSGPRClass(RC)) 2266 return false; 2267 2268 if (RC != &AMDGPU::VS_32RegClass) { 2269 AllUsesAcceptSReg = false; 2270 SDNode * User = *U; 2271 if (User->isMachineOpcode()) { 2272 unsigned Opc = User->getMachineOpcode(); 2273 MCInstrDesc Desc = SII->get(Opc); 2274 if (Desc.isCommutable()) { 2275 unsigned OpIdx = Desc.getNumDefs() + U.getOperandNo(); 2276 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex; 2277 if (SII->findCommutedOpIndices(Desc, OpIdx, CommuteIdx1)) { 2278 unsigned CommutedOpNo = CommuteIdx1 - Desc.getNumDefs(); 2279 const TargetRegisterClass *CommutedRC = getOperandRegClass(*U, CommutedOpNo); 2280 if (CommutedRC == &AMDGPU::VS_32RegClass) 2281 AllUsesAcceptSReg = true; 2282 } 2283 } 2284 } 2285 // If "AllUsesAcceptSReg == false" so far we haven't suceeded 2286 // commuting current user. This means have at least one use 2287 // that strictly require VGPR. Thus, we will not attempt to commute 2288 // other user instructions. 2289 if (!AllUsesAcceptSReg) 2290 break; 2291 } 2292 } 2293 return !AllUsesAcceptSReg && (Limit < 10); 2294 } 2295 2296 bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode * N) const { 2297 auto Ld = cast<LoadSDNode>(N); 2298 2299 return Ld->getAlignment() >= 4 && 2300 ( 2301 ( 2302 ( 2303 Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 2304 Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT 2305 ) 2306 && 2307 !N->isDivergent() 2308 ) 2309 || 2310 ( 2311 Subtarget->getScalarizeGlobalBehavior() && 2312 Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && 2313 !Ld->isVolatile() && 2314 !N->isDivergent() && 2315 static_cast<const SITargetLowering *>( 2316 getTargetLowering())->isMemOpHasNoClobberedMemOperand(N) 2317 ) 2318 ); 2319 } 2320 2321 void AMDGPUDAGToDAGISel::PostprocessISelDAG() { 2322 const AMDGPUTargetLowering& Lowering = 2323 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering()); 2324 bool IsModified = false; 2325 do { 2326 IsModified = false; 2327 2328 // Go over all selected nodes and try to fold them a bit more 2329 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_begin(); 2330 while (Position != CurDAG->allnodes_end()) { 2331 SDNode *Node = &*Position++; 2332 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(Node); 2333 if (!MachineNode) 2334 continue; 2335 2336 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG); 2337 if (ResNode != Node) { 2338 if (ResNode) 2339 ReplaceUses(Node, ResNode); 2340 IsModified = true; 2341 } 2342 } 2343 CurDAG->RemoveDeadNodes(); 2344 } while (IsModified); 2345 } 2346 2347 bool R600DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { 2348 Subtarget = &MF.getSubtarget<R600Subtarget>(); 2349 return SelectionDAGISel::runOnMachineFunction(MF); 2350 } 2351 2352 bool R600DAGToDAGISel::isConstantLoad(const MemSDNode *N, int CbId) const { 2353 if (!N->readMem()) 2354 return false; 2355 if (CbId == -1) 2356 return N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 2357 N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT; 2358 2359 return N->getAddressSpace() == AMDGPUAS::CONSTANT_BUFFER_0 + CbId; 2360 } 2361 2362 bool R600DAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr, 2363 SDValue& IntPtr) { 2364 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) { 2365 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr), 2366 true); 2367 return true; 2368 } 2369 return false; 2370 } 2371 2372 bool R600DAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr, 2373 SDValue& BaseReg, SDValue &Offset) { 2374 if (!isa<ConstantSDNode>(Addr)) { 2375 BaseReg = Addr; 2376 Offset = CurDAG->getIntPtrConstant(0, SDLoc(Addr), true); 2377 return true; 2378 } 2379 return false; 2380 } 2381 2382 void R600DAGToDAGISel::Select(SDNode *N) { 2383 unsigned int Opc = N->getOpcode(); 2384 if (N->isMachineOpcode()) { 2385 N->setNodeId(-1); 2386 return; // Already selected. 2387 } 2388 2389 switch (Opc) { 2390 default: break; 2391 case AMDGPUISD::BUILD_VERTICAL_VECTOR: 2392 case ISD::SCALAR_TO_VECTOR: 2393 case ISD::BUILD_VECTOR: { 2394 EVT VT = N->getValueType(0); 2395 unsigned NumVectorElts = VT.getVectorNumElements(); 2396 unsigned RegClassID; 2397 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG 2398 // that adds a 128 bits reg copy when going through TwoAddressInstructions 2399 // pass. We want to avoid 128 bits copies as much as possible because they 2400 // can't be bundled by our scheduler. 2401 switch(NumVectorElts) { 2402 case 2: RegClassID = R600::R600_Reg64RegClassID; break; 2403 case 4: 2404 if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR) 2405 RegClassID = R600::R600_Reg128VerticalRegClassID; 2406 else 2407 RegClassID = R600::R600_Reg128RegClassID; 2408 break; 2409 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR"); 2410 } 2411 SelectBuildVector(N, RegClassID); 2412 return; 2413 } 2414 } 2415 2416 SelectCode(N); 2417 } 2418 2419 bool R600DAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base, 2420 SDValue &Offset) { 2421 ConstantSDNode *C; 2422 SDLoc DL(Addr); 2423 2424 if ((C = dyn_cast<ConstantSDNode>(Addr))) { 2425 Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32); 2426 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); 2427 } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) && 2428 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) { 2429 Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32); 2430 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); 2431 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) && 2432 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) { 2433 Base = Addr.getOperand(0); 2434 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); 2435 } else { 2436 Base = Addr; 2437 Offset = CurDAG->getTargetConstant(0, DL, MVT::i32); 2438 } 2439 2440 return true; 2441 } 2442 2443 bool R600DAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base, 2444 SDValue &Offset) { 2445 ConstantSDNode *IMMOffset; 2446 2447 if (Addr.getOpcode() == ISD::ADD 2448 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) 2449 && isInt<16>(IMMOffset->getZExtValue())) { 2450 2451 Base = Addr.getOperand(0); 2452 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr), 2453 MVT::i32); 2454 return true; 2455 // If the pointer address is constant, we can move it to the offset field. 2456 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr)) 2457 && isInt<16>(IMMOffset->getZExtValue())) { 2458 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), 2459 SDLoc(CurDAG->getEntryNode()), 2460 R600::ZERO, MVT::i32); 2461 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr), 2462 MVT::i32); 2463 return true; 2464 } 2465 2466 // Default case, no offset 2467 Base = Addr; 2468 Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32); 2469 return true; 2470 } 2471