1 //===-- AMDGPUISelDAGToDAG.cpp - A dag to dag inst selector for AMDGPU ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //==-----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Defines an instruction selector for the AMDGPU target. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPU.h" 15 #include "AMDGPUArgumentUsageInfo.h" 16 #include "AMDGPUISelLowering.h" // For AMDGPUISD 17 #include "AMDGPUInstrInfo.h" 18 #include "AMDGPUPerfHintAnalysis.h" 19 #include "AMDGPUSubtarget.h" 20 #include "AMDGPUTargetMachine.h" 21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 22 #include "SIDefines.h" 23 #include "SIISelLowering.h" 24 #include "SIInstrInfo.h" 25 #include "SIMachineFunctionInfo.h" 26 #include "SIRegisterInfo.h" 27 #include "llvm/ADT/APInt.h" 28 #include "llvm/ADT/SmallVector.h" 29 #include "llvm/ADT/StringRef.h" 30 #include "llvm/Analysis/LegacyDivergenceAnalysis.h" 31 #include "llvm/Analysis/ValueTracking.h" 32 #include "llvm/CodeGen/FunctionLoweringInfo.h" 33 #include "llvm/CodeGen/ISDOpcodes.h" 34 #include "llvm/CodeGen/MachineFunction.h" 35 #include "llvm/CodeGen/MachineRegisterInfo.h" 36 #include "llvm/CodeGen/SelectionDAG.h" 37 #include "llvm/CodeGen/SelectionDAGISel.h" 38 #include "llvm/CodeGen/SelectionDAGNodes.h" 39 #include "llvm/CodeGen/ValueTypes.h" 40 #include "llvm/IR/BasicBlock.h" 41 #include "llvm/InitializePasses.h" 42 #ifdef EXPENSIVE_CHECKS 43 #include "llvm/IR/Dominators.h" 44 #endif 45 #include "llvm/IR/Instruction.h" 46 #include "llvm/MC/MCInstrDesc.h" 47 #include "llvm/Support/Casting.h" 48 #include "llvm/Support/CodeGen.h" 49 #include "llvm/Support/ErrorHandling.h" 50 #include "llvm/Support/MachineValueType.h" 51 #include "llvm/Support/MathExtras.h" 52 #include <cassert> 53 #include <cstdint> 54 #include <new> 55 #include <vector> 56 57 #define DEBUG_TYPE "isel" 58 59 using namespace llvm; 60 61 namespace llvm { 62 63 class R600InstrInfo; 64 65 } // end namespace llvm 66 67 //===----------------------------------------------------------------------===// 68 // Instruction Selector Implementation 69 //===----------------------------------------------------------------------===// 70 71 namespace { 72 73 static bool isNullConstantOrUndef(SDValue V) { 74 if (V.isUndef()) 75 return true; 76 77 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 78 return Const != nullptr && Const->isNullValue(); 79 } 80 81 static bool getConstantValue(SDValue N, uint32_t &Out) { 82 // This is only used for packed vectors, where ussing 0 for undef should 83 // always be good. 84 if (N.isUndef()) { 85 Out = 0; 86 return true; 87 } 88 89 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) { 90 Out = C->getAPIntValue().getSExtValue(); 91 return true; 92 } 93 94 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N)) { 95 Out = C->getValueAPF().bitcastToAPInt().getSExtValue(); 96 return true; 97 } 98 99 return false; 100 } 101 102 // TODO: Handle undef as zero 103 static SDNode *packConstantV2I16(const SDNode *N, SelectionDAG &DAG, 104 bool Negate = false) { 105 assert(N->getOpcode() == ISD::BUILD_VECTOR && N->getNumOperands() == 2); 106 uint32_t LHSVal, RHSVal; 107 if (getConstantValue(N->getOperand(0), LHSVal) && 108 getConstantValue(N->getOperand(1), RHSVal)) { 109 SDLoc SL(N); 110 uint32_t K = Negate ? 111 (-LHSVal & 0xffff) | (-RHSVal << 16) : 112 (LHSVal & 0xffff) | (RHSVal << 16); 113 return DAG.getMachineNode(AMDGPU::S_MOV_B32, SL, N->getValueType(0), 114 DAG.getTargetConstant(K, SL, MVT::i32)); 115 } 116 117 return nullptr; 118 } 119 120 static SDNode *packNegConstantV2I16(const SDNode *N, SelectionDAG &DAG) { 121 return packConstantV2I16(N, DAG, true); 122 } 123 124 /// AMDGPU specific code to select AMDGPU machine instructions for 125 /// SelectionDAG operations. 126 class AMDGPUDAGToDAGISel : public SelectionDAGISel { 127 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can 128 // make the right decision when generating code for different targets. 129 const GCNSubtarget *Subtarget; 130 131 // Default FP mode for the current function. 132 AMDGPU::SIModeRegisterDefaults Mode; 133 134 bool EnableLateStructurizeCFG; 135 136 public: 137 explicit AMDGPUDAGToDAGISel(TargetMachine *TM = nullptr, 138 CodeGenOpt::Level OptLevel = CodeGenOpt::Default) 139 : SelectionDAGISel(*TM, OptLevel) { 140 EnableLateStructurizeCFG = AMDGPUTargetMachine::EnableLateStructurizeCFG; 141 } 142 ~AMDGPUDAGToDAGISel() override = default; 143 144 void getAnalysisUsage(AnalysisUsage &AU) const override { 145 AU.addRequired<AMDGPUArgumentUsageInfo>(); 146 AU.addRequired<LegacyDivergenceAnalysis>(); 147 #ifdef EXPENSIVE_CHECKS 148 AU.addRequired<DominatorTreeWrapperPass>(); 149 AU.addRequired<LoopInfoWrapperPass>(); 150 #endif 151 SelectionDAGISel::getAnalysisUsage(AU); 152 } 153 154 bool matchLoadD16FromBuildVector(SDNode *N) const; 155 156 bool runOnMachineFunction(MachineFunction &MF) override; 157 void PreprocessISelDAG() override; 158 void Select(SDNode *N) override; 159 StringRef getPassName() const override; 160 void PostprocessISelDAG() override; 161 162 protected: 163 void SelectBuildVector(SDNode *N, unsigned RegClassID); 164 165 private: 166 std::pair<SDValue, SDValue> foldFrameIndex(SDValue N) const; 167 bool isNoNanSrc(SDValue N) const; 168 bool isInlineImmediate(const SDNode *N, bool Negated = false) const; 169 bool isNegInlineImmediate(const SDNode *N) const { 170 return isInlineImmediate(N, true); 171 } 172 173 bool isInlineImmediate16(int64_t Imm) const { 174 return AMDGPU::isInlinableLiteral16(Imm, Subtarget->hasInv2PiInlineImm()); 175 } 176 177 bool isInlineImmediate32(int64_t Imm) const { 178 return AMDGPU::isInlinableLiteral32(Imm, Subtarget->hasInv2PiInlineImm()); 179 } 180 181 bool isInlineImmediate64(int64_t Imm) const { 182 return AMDGPU::isInlinableLiteral64(Imm, Subtarget->hasInv2PiInlineImm()); 183 } 184 185 bool isInlineImmediate(const APFloat &Imm) const { 186 return Subtarget->getInstrInfo()->isInlineConstant(Imm); 187 } 188 189 bool isVGPRImm(const SDNode *N) const; 190 bool isUniformLoad(const SDNode *N) const; 191 bool isUniformBr(const SDNode *N) const; 192 193 MachineSDNode *buildSMovImm64(SDLoc &DL, uint64_t Val, EVT VT) const; 194 195 SDNode *glueCopyToOp(SDNode *N, SDValue NewChain, SDValue Glue) const; 196 SDNode *glueCopyToM0(SDNode *N, SDValue Val) const; 197 SDNode *glueCopyToM0LDSInit(SDNode *N) const; 198 199 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const; 200 virtual bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset); 201 virtual bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset); 202 bool isDSOffsetLegal(SDValue Base, unsigned Offset, 203 unsigned OffsetBits) const; 204 bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const; 205 bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0, 206 SDValue &Offset1) const; 207 bool SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr, 208 SDValue &SOffset, SDValue &Offset, SDValue &Offen, 209 SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC, 210 SDValue &TFE, SDValue &DLC, SDValue &SWZ) const; 211 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr, 212 SDValue &SOffset, SDValue &Offset, SDValue &GLC, 213 SDValue &SLC, SDValue &TFE, SDValue &DLC, 214 SDValue &SWZ) const; 215 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, 216 SDValue &VAddr, SDValue &SOffset, SDValue &Offset, 217 SDValue &SLC) const; 218 bool SelectMUBUFScratchOffen(SDNode *Parent, 219 SDValue Addr, SDValue &RSrc, SDValue &VAddr, 220 SDValue &SOffset, SDValue &ImmOffset) const; 221 bool SelectMUBUFScratchOffset(SDNode *Parent, 222 SDValue Addr, SDValue &SRsrc, SDValue &Soffset, 223 SDValue &Offset) const; 224 225 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset, 226 SDValue &Offset, SDValue &GLC, SDValue &SLC, 227 SDValue &TFE, SDValue &DLC, SDValue &SWZ) const; 228 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset, 229 SDValue &Offset, SDValue &SLC) const; 230 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset, 231 SDValue &Offset) const; 232 233 template <bool IsSigned> 234 bool SelectFlatOffset(SDNode *N, SDValue Addr, SDValue &VAddr, 235 SDValue &Offset, SDValue &SLC) const; 236 bool SelectFlatAtomic(SDNode *N, SDValue Addr, SDValue &VAddr, 237 SDValue &Offset, SDValue &SLC) const; 238 bool SelectFlatAtomicSigned(SDNode *N, SDValue Addr, SDValue &VAddr, 239 SDValue &Offset, SDValue &SLC) const; 240 241 bool SelectSMRDOffset(SDValue ByteOffsetNode, SDValue &Offset, 242 bool &Imm) const; 243 SDValue Expand32BitAddress(SDValue Addr) const; 244 bool SelectSMRD(SDValue Addr, SDValue &SBase, SDValue &Offset, 245 bool &Imm) const; 246 bool SelectSMRDImm(SDValue Addr, SDValue &SBase, SDValue &Offset) const; 247 bool SelectSMRDImm32(SDValue Addr, SDValue &SBase, SDValue &Offset) const; 248 bool SelectSMRDSgpr(SDValue Addr, SDValue &SBase, SDValue &Offset) const; 249 bool SelectSMRDBufferImm(SDValue Addr, SDValue &Offset) const; 250 bool SelectSMRDBufferImm32(SDValue Addr, SDValue &Offset) const; 251 bool SelectMOVRELOffset(SDValue Index, SDValue &Base, SDValue &Offset) const; 252 253 bool SelectVOP3Mods_NNaN(SDValue In, SDValue &Src, SDValue &SrcMods) const; 254 bool SelectVOP3ModsImpl(SDValue In, SDValue &Src, unsigned &SrcMods) const; 255 bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const; 256 bool SelectVOP3NoMods(SDValue In, SDValue &Src) const; 257 bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods, 258 SDValue &Clamp, SDValue &Omod) const; 259 bool SelectVOP3NoMods0(SDValue In, SDValue &Src, SDValue &SrcMods, 260 SDValue &Clamp, SDValue &Omod) const; 261 262 bool SelectVOP3OMods(SDValue In, SDValue &Src, 263 SDValue &Clamp, SDValue &Omod) const; 264 265 bool SelectVOP3PMods(SDValue In, SDValue &Src, SDValue &SrcMods) const; 266 267 bool SelectVOP3OpSel(SDValue In, SDValue &Src, SDValue &SrcMods) const; 268 269 bool SelectVOP3OpSelMods(SDValue In, SDValue &Src, SDValue &SrcMods) const; 270 bool SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src, unsigned &Mods) const; 271 bool SelectVOP3PMadMixMods(SDValue In, SDValue &Src, SDValue &SrcMods) const; 272 273 SDValue getHi16Elt(SDValue In) const; 274 275 SDValue getMaterializedScalarImm32(int64_t Val, const SDLoc &DL) const; 276 277 void SelectADD_SUB_I64(SDNode *N); 278 void SelectAddcSubb(SDNode *N); 279 void SelectUADDO_USUBO(SDNode *N); 280 void SelectDIV_SCALE(SDNode *N); 281 void SelectMAD_64_32(SDNode *N); 282 void SelectFMA_W_CHAIN(SDNode *N); 283 void SelectFMUL_W_CHAIN(SDNode *N); 284 285 SDNode *getS_BFE(unsigned Opcode, const SDLoc &DL, SDValue Val, 286 uint32_t Offset, uint32_t Width); 287 void SelectS_BFEFromShifts(SDNode *N); 288 void SelectS_BFE(SDNode *N); 289 bool isCBranchSCC(const SDNode *N) const; 290 void SelectBRCOND(SDNode *N); 291 void SelectFMAD_FMA(SDNode *N); 292 void SelectATOMIC_CMP_SWAP(SDNode *N); 293 void SelectDSAppendConsume(SDNode *N, unsigned IntrID); 294 void SelectDS_GWS(SDNode *N, unsigned IntrID); 295 void SelectInterpP1F16(SDNode *N); 296 void SelectINTRINSIC_W_CHAIN(SDNode *N); 297 void SelectINTRINSIC_WO_CHAIN(SDNode *N); 298 void SelectINTRINSIC_VOID(SDNode *N); 299 300 protected: 301 // Include the pieces autogenerated from the target description. 302 #include "AMDGPUGenDAGISel.inc" 303 }; 304 305 class R600DAGToDAGISel : public AMDGPUDAGToDAGISel { 306 const R600Subtarget *Subtarget; 307 308 bool isConstantLoad(const MemSDNode *N, int cbID) const; 309 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr); 310 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg, 311 SDValue& Offset); 312 public: 313 explicit R600DAGToDAGISel(TargetMachine *TM, CodeGenOpt::Level OptLevel) : 314 AMDGPUDAGToDAGISel(TM, OptLevel) {} 315 316 void Select(SDNode *N) override; 317 318 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, 319 SDValue &Offset) override; 320 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, 321 SDValue &Offset) override; 322 323 bool runOnMachineFunction(MachineFunction &MF) override; 324 325 void PreprocessISelDAG() override {} 326 327 protected: 328 // Include the pieces autogenerated from the target description. 329 #include "R600GenDAGISel.inc" 330 }; 331 332 static SDValue stripBitcast(SDValue Val) { 333 return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val; 334 } 335 336 // Figure out if this is really an extract of the high 16-bits of a dword. 337 static bool isExtractHiElt(SDValue In, SDValue &Out) { 338 In = stripBitcast(In); 339 if (In.getOpcode() != ISD::TRUNCATE) 340 return false; 341 342 SDValue Srl = In.getOperand(0); 343 if (Srl.getOpcode() == ISD::SRL) { 344 if (ConstantSDNode *ShiftAmt = dyn_cast<ConstantSDNode>(Srl.getOperand(1))) { 345 if (ShiftAmt->getZExtValue() == 16) { 346 Out = stripBitcast(Srl.getOperand(0)); 347 return true; 348 } 349 } 350 } 351 352 return false; 353 } 354 355 // Look through operations that obscure just looking at the low 16-bits of the 356 // same register. 357 static SDValue stripExtractLoElt(SDValue In) { 358 if (In.getOpcode() == ISD::TRUNCATE) { 359 SDValue Src = In.getOperand(0); 360 if (Src.getValueType().getSizeInBits() == 32) 361 return stripBitcast(Src); 362 } 363 364 return In; 365 } 366 367 } // end anonymous namespace 368 369 INITIALIZE_PASS_BEGIN(AMDGPUDAGToDAGISel, "amdgpu-isel", 370 "AMDGPU DAG->DAG Pattern Instruction Selection", false, false) 371 INITIALIZE_PASS_DEPENDENCY(AMDGPUArgumentUsageInfo) 372 INITIALIZE_PASS_DEPENDENCY(AMDGPUPerfHintAnalysis) 373 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis) 374 #ifdef EXPENSIVE_CHECKS 375 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 376 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 377 #endif 378 INITIALIZE_PASS_END(AMDGPUDAGToDAGISel, "amdgpu-isel", 379 "AMDGPU DAG->DAG Pattern Instruction Selection", false, false) 380 381 /// This pass converts a legalized DAG into a AMDGPU-specific 382 // DAG, ready for instruction scheduling. 383 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine *TM, 384 CodeGenOpt::Level OptLevel) { 385 return new AMDGPUDAGToDAGISel(TM, OptLevel); 386 } 387 388 /// This pass converts a legalized DAG into a R600-specific 389 // DAG, ready for instruction scheduling. 390 FunctionPass *llvm::createR600ISelDag(TargetMachine *TM, 391 CodeGenOpt::Level OptLevel) { 392 return new R600DAGToDAGISel(TM, OptLevel); 393 } 394 395 bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { 396 #ifdef EXPENSIVE_CHECKS 397 DominatorTree & DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 398 LoopInfo * LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 399 for (auto &L : LI->getLoopsInPreorder()) { 400 assert(L->isLCSSAForm(DT)); 401 } 402 #endif 403 Subtarget = &MF.getSubtarget<GCNSubtarget>(); 404 Mode = AMDGPU::SIModeRegisterDefaults(MF.getFunction()); 405 return SelectionDAGISel::runOnMachineFunction(MF); 406 } 407 408 bool AMDGPUDAGToDAGISel::matchLoadD16FromBuildVector(SDNode *N) const { 409 assert(Subtarget->d16PreservesUnusedBits()); 410 MVT VT = N->getValueType(0).getSimpleVT(); 411 if (VT != MVT::v2i16 && VT != MVT::v2f16) 412 return false; 413 414 SDValue Lo = N->getOperand(0); 415 SDValue Hi = N->getOperand(1); 416 417 LoadSDNode *LdHi = dyn_cast<LoadSDNode>(stripBitcast(Hi)); 418 419 // build_vector lo, (load ptr) -> load_d16_hi ptr, lo 420 // build_vector lo, (zextload ptr from i8) -> load_d16_hi_u8 ptr, lo 421 // build_vector lo, (sextload ptr from i8) -> load_d16_hi_i8 ptr, lo 422 423 // Need to check for possible indirect dependencies on the other half of the 424 // vector to avoid introducing a cycle. 425 if (LdHi && Hi.hasOneUse() && !LdHi->isPredecessorOf(Lo.getNode())) { 426 SDVTList VTList = CurDAG->getVTList(VT, MVT::Other); 427 428 SDValue TiedIn = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Lo); 429 SDValue Ops[] = { 430 LdHi->getChain(), LdHi->getBasePtr(), TiedIn 431 }; 432 433 unsigned LoadOp = AMDGPUISD::LOAD_D16_HI; 434 if (LdHi->getMemoryVT() == MVT::i8) { 435 LoadOp = LdHi->getExtensionType() == ISD::SEXTLOAD ? 436 AMDGPUISD::LOAD_D16_HI_I8 : AMDGPUISD::LOAD_D16_HI_U8; 437 } else { 438 assert(LdHi->getMemoryVT() == MVT::i16); 439 } 440 441 SDValue NewLoadHi = 442 CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdHi), VTList, 443 Ops, LdHi->getMemoryVT(), 444 LdHi->getMemOperand()); 445 446 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadHi); 447 CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdHi, 1), NewLoadHi.getValue(1)); 448 return true; 449 } 450 451 // build_vector (load ptr), hi -> load_d16_lo ptr, hi 452 // build_vector (zextload ptr from i8), hi -> load_d16_lo_u8 ptr, hi 453 // build_vector (sextload ptr from i8), hi -> load_d16_lo_i8 ptr, hi 454 LoadSDNode *LdLo = dyn_cast<LoadSDNode>(stripBitcast(Lo)); 455 if (LdLo && Lo.hasOneUse()) { 456 SDValue TiedIn = getHi16Elt(Hi); 457 if (!TiedIn || LdLo->isPredecessorOf(TiedIn.getNode())) 458 return false; 459 460 SDVTList VTList = CurDAG->getVTList(VT, MVT::Other); 461 unsigned LoadOp = AMDGPUISD::LOAD_D16_LO; 462 if (LdLo->getMemoryVT() == MVT::i8) { 463 LoadOp = LdLo->getExtensionType() == ISD::SEXTLOAD ? 464 AMDGPUISD::LOAD_D16_LO_I8 : AMDGPUISD::LOAD_D16_LO_U8; 465 } else { 466 assert(LdLo->getMemoryVT() == MVT::i16); 467 } 468 469 TiedIn = CurDAG->getNode(ISD::BITCAST, SDLoc(N), VT, TiedIn); 470 471 SDValue Ops[] = { 472 LdLo->getChain(), LdLo->getBasePtr(), TiedIn 473 }; 474 475 SDValue NewLoadLo = 476 CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdLo), VTList, 477 Ops, LdLo->getMemoryVT(), 478 LdLo->getMemOperand()); 479 480 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadLo); 481 CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdLo, 1), NewLoadLo.getValue(1)); 482 return true; 483 } 484 485 return false; 486 } 487 488 void AMDGPUDAGToDAGISel::PreprocessISelDAG() { 489 if (!Subtarget->d16PreservesUnusedBits()) 490 return; 491 492 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end(); 493 494 bool MadeChange = false; 495 while (Position != CurDAG->allnodes_begin()) { 496 SDNode *N = &*--Position; 497 if (N->use_empty()) 498 continue; 499 500 switch (N->getOpcode()) { 501 case ISD::BUILD_VECTOR: 502 MadeChange |= matchLoadD16FromBuildVector(N); 503 break; 504 default: 505 break; 506 } 507 } 508 509 if (MadeChange) { 510 CurDAG->RemoveDeadNodes(); 511 LLVM_DEBUG(dbgs() << "After PreProcess:\n"; 512 CurDAG->dump();); 513 } 514 } 515 516 bool AMDGPUDAGToDAGISel::isNoNanSrc(SDValue N) const { 517 if (TM.Options.NoNaNsFPMath) 518 return true; 519 520 // TODO: Move into isKnownNeverNaN 521 if (N->getFlags().isDefined()) 522 return N->getFlags().hasNoNaNs(); 523 524 return CurDAG->isKnownNeverNaN(N); 525 } 526 527 bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N, 528 bool Negated) const { 529 if (N->isUndef()) 530 return true; 531 532 const SIInstrInfo *TII = Subtarget->getInstrInfo(); 533 if (Negated) { 534 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) 535 return TII->isInlineConstant(-C->getAPIntValue()); 536 537 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N)) 538 return TII->isInlineConstant(-C->getValueAPF().bitcastToAPInt()); 539 540 } else { 541 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) 542 return TII->isInlineConstant(C->getAPIntValue()); 543 544 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N)) 545 return TII->isInlineConstant(C->getValueAPF().bitcastToAPInt()); 546 } 547 548 return false; 549 } 550 551 /// Determine the register class for \p OpNo 552 /// \returns The register class of the virtual register that will be used for 553 /// the given operand number \OpNo or NULL if the register class cannot be 554 /// determined. 555 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N, 556 unsigned OpNo) const { 557 if (!N->isMachineOpcode()) { 558 if (N->getOpcode() == ISD::CopyToReg) { 559 unsigned Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg(); 560 if (Register::isVirtualRegister(Reg)) { 561 MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo(); 562 return MRI.getRegClass(Reg); 563 } 564 565 const SIRegisterInfo *TRI 566 = static_cast<const GCNSubtarget *>(Subtarget)->getRegisterInfo(); 567 return TRI->getPhysRegClass(Reg); 568 } 569 570 return nullptr; 571 } 572 573 switch (N->getMachineOpcode()) { 574 default: { 575 const MCInstrDesc &Desc = 576 Subtarget->getInstrInfo()->get(N->getMachineOpcode()); 577 unsigned OpIdx = Desc.getNumDefs() + OpNo; 578 if (OpIdx >= Desc.getNumOperands()) 579 return nullptr; 580 int RegClass = Desc.OpInfo[OpIdx].RegClass; 581 if (RegClass == -1) 582 return nullptr; 583 584 return Subtarget->getRegisterInfo()->getRegClass(RegClass); 585 } 586 case AMDGPU::REG_SEQUENCE: { 587 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 588 const TargetRegisterClass *SuperRC = 589 Subtarget->getRegisterInfo()->getRegClass(RCID); 590 591 SDValue SubRegOp = N->getOperand(OpNo + 1); 592 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue(); 593 return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC, 594 SubRegIdx); 595 } 596 } 597 } 598 599 SDNode *AMDGPUDAGToDAGISel::glueCopyToOp(SDNode *N, SDValue NewChain, 600 SDValue Glue) const { 601 SmallVector <SDValue, 8> Ops; 602 Ops.push_back(NewChain); // Replace the chain. 603 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 604 Ops.push_back(N->getOperand(i)); 605 606 Ops.push_back(Glue); 607 return CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops); 608 } 609 610 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N, SDValue Val) const { 611 const SITargetLowering& Lowering = 612 *static_cast<const SITargetLowering*>(getTargetLowering()); 613 614 assert(N->getOperand(0).getValueType() == MVT::Other && "Expected chain"); 615 616 SDValue M0 = Lowering.copyToM0(*CurDAG, N->getOperand(0), SDLoc(N), Val); 617 return glueCopyToOp(N, M0, M0.getValue(1)); 618 } 619 620 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0LDSInit(SDNode *N) const { 621 unsigned AS = cast<MemSDNode>(N)->getAddressSpace(); 622 if (AS == AMDGPUAS::LOCAL_ADDRESS) { 623 if (Subtarget->ldsRequiresM0Init()) 624 return glueCopyToM0(N, CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32)); 625 } else if (AS == AMDGPUAS::REGION_ADDRESS) { 626 MachineFunction &MF = CurDAG->getMachineFunction(); 627 unsigned Value = MF.getInfo<SIMachineFunctionInfo>()->getGDSSize(); 628 return 629 glueCopyToM0(N, CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i32)); 630 } 631 return N; 632 } 633 634 MachineSDNode *AMDGPUDAGToDAGISel::buildSMovImm64(SDLoc &DL, uint64_t Imm, 635 EVT VT) const { 636 SDNode *Lo = CurDAG->getMachineNode( 637 AMDGPU::S_MOV_B32, DL, MVT::i32, 638 CurDAG->getTargetConstant(Imm & 0xFFFFFFFF, DL, MVT::i32)); 639 SDNode *Hi = 640 CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, 641 CurDAG->getTargetConstant(Imm >> 32, DL, MVT::i32)); 642 const SDValue Ops[] = { 643 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32), 644 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 645 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)}; 646 647 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, VT, Ops); 648 } 649 650 void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) { 651 EVT VT = N->getValueType(0); 652 unsigned NumVectorElts = VT.getVectorNumElements(); 653 EVT EltVT = VT.getVectorElementType(); 654 SDLoc DL(N); 655 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32); 656 657 if (NumVectorElts == 1) { 658 CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0), 659 RegClass); 660 return; 661 } 662 663 assert(NumVectorElts <= 32 && "Vectors with more than 32 elements not " 664 "supported yet"); 665 // 32 = Max Num Vector Elements 666 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index) 667 // 1 = Vector Register Class 668 SmallVector<SDValue, 32 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1); 669 670 bool IsGCN = CurDAG->getSubtarget().getTargetTriple().getArch() == 671 Triple::amdgcn; 672 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32); 673 bool IsRegSeq = true; 674 unsigned NOps = N->getNumOperands(); 675 for (unsigned i = 0; i < NOps; i++) { 676 // XXX: Why is this here? 677 if (isa<RegisterSDNode>(N->getOperand(i))) { 678 IsRegSeq = false; 679 break; 680 } 681 unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i) 682 : R600RegisterInfo::getSubRegFromChannel(i); 683 RegSeqArgs[1 + (2 * i)] = N->getOperand(i); 684 RegSeqArgs[1 + (2 * i) + 1] = CurDAG->getTargetConstant(Sub, DL, MVT::i32); 685 } 686 if (NOps != NumVectorElts) { 687 // Fill in the missing undef elements if this was a scalar_to_vector. 688 assert(N->getOpcode() == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts); 689 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 690 DL, EltVT); 691 for (unsigned i = NOps; i < NumVectorElts; ++i) { 692 unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i) 693 : R600RegisterInfo::getSubRegFromChannel(i); 694 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0); 695 RegSeqArgs[1 + (2 * i) + 1] = 696 CurDAG->getTargetConstant(Sub, DL, MVT::i32); 697 } 698 } 699 700 if (!IsRegSeq) 701 SelectCode(N); 702 CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs); 703 } 704 705 void AMDGPUDAGToDAGISel::Select(SDNode *N) { 706 unsigned int Opc = N->getOpcode(); 707 if (N->isMachineOpcode()) { 708 N->setNodeId(-1); 709 return; // Already selected. 710 } 711 712 // isa<MemSDNode> almost works but is slightly too permissive for some DS 713 // intrinsics. 714 if (Opc == ISD::LOAD || Opc == ISD::STORE || isa<AtomicSDNode>(N) || 715 (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC || 716 Opc == ISD::ATOMIC_LOAD_FADD || 717 Opc == AMDGPUISD::ATOMIC_LOAD_FMIN || 718 Opc == AMDGPUISD::ATOMIC_LOAD_FMAX)) { 719 N = glueCopyToM0LDSInit(N); 720 SelectCode(N); 721 return; 722 } 723 724 switch (Opc) { 725 default: 726 break; 727 // We are selecting i64 ADD here instead of custom lower it during 728 // DAG legalization, so we can fold some i64 ADDs used for address 729 // calculation into the LOAD and STORE instructions. 730 case ISD::ADDC: 731 case ISD::ADDE: 732 case ISD::SUBC: 733 case ISD::SUBE: { 734 if (N->getValueType(0) != MVT::i64) 735 break; 736 737 SelectADD_SUB_I64(N); 738 return; 739 } 740 case ISD::ADDCARRY: 741 case ISD::SUBCARRY: 742 if (N->getValueType(0) != MVT::i32) 743 break; 744 745 SelectAddcSubb(N); 746 return; 747 case ISD::UADDO: 748 case ISD::USUBO: { 749 SelectUADDO_USUBO(N); 750 return; 751 } 752 case AMDGPUISD::FMUL_W_CHAIN: { 753 SelectFMUL_W_CHAIN(N); 754 return; 755 } 756 case AMDGPUISD::FMA_W_CHAIN: { 757 SelectFMA_W_CHAIN(N); 758 return; 759 } 760 761 case ISD::SCALAR_TO_VECTOR: 762 case ISD::BUILD_VECTOR: { 763 EVT VT = N->getValueType(0); 764 unsigned NumVectorElts = VT.getVectorNumElements(); 765 if (VT.getScalarSizeInBits() == 16) { 766 if (Opc == ISD::BUILD_VECTOR && NumVectorElts == 2) { 767 if (SDNode *Packed = packConstantV2I16(N, *CurDAG)) { 768 ReplaceNode(N, Packed); 769 return; 770 } 771 } 772 773 break; 774 } 775 776 assert(VT.getVectorElementType().bitsEq(MVT::i32)); 777 unsigned RegClassID = 778 SIRegisterInfo::getSGPRClassForBitWidth(NumVectorElts * 32)->getID(); 779 SelectBuildVector(N, RegClassID); 780 return; 781 } 782 case ISD::BUILD_PAIR: { 783 SDValue RC, SubReg0, SubReg1; 784 SDLoc DL(N); 785 if (N->getValueType(0) == MVT::i128) { 786 RC = CurDAG->getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32); 787 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32); 788 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32); 789 } else if (N->getValueType(0) == MVT::i64) { 790 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32); 791 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32); 792 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32); 793 } else { 794 llvm_unreachable("Unhandled value type for BUILD_PAIR"); 795 } 796 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0, 797 N->getOperand(1), SubReg1 }; 798 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, 799 N->getValueType(0), Ops)); 800 return; 801 } 802 803 case ISD::Constant: 804 case ISD::ConstantFP: { 805 if (N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N)) 806 break; 807 808 uint64_t Imm; 809 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N)) 810 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue(); 811 else { 812 ConstantSDNode *C = cast<ConstantSDNode>(N); 813 Imm = C->getZExtValue(); 814 } 815 816 SDLoc DL(N); 817 ReplaceNode(N, buildSMovImm64(DL, Imm, N->getValueType(0))); 818 return; 819 } 820 case AMDGPUISD::BFE_I32: 821 case AMDGPUISD::BFE_U32: { 822 // There is a scalar version available, but unlike the vector version which 823 // has a separate operand for the offset and width, the scalar version packs 824 // the width and offset into a single operand. Try to move to the scalar 825 // version if the offsets are constant, so that we can try to keep extended 826 // loads of kernel arguments in SGPRs. 827 828 // TODO: Technically we could try to pattern match scalar bitshifts of 829 // dynamic values, but it's probably not useful. 830 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 831 if (!Offset) 832 break; 833 834 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2)); 835 if (!Width) 836 break; 837 838 bool Signed = Opc == AMDGPUISD::BFE_I32; 839 840 uint32_t OffsetVal = Offset->getZExtValue(); 841 uint32_t WidthVal = Width->getZExtValue(); 842 843 ReplaceNode(N, getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32, 844 SDLoc(N), N->getOperand(0), OffsetVal, WidthVal)); 845 return; 846 } 847 case AMDGPUISD::DIV_SCALE: { 848 SelectDIV_SCALE(N); 849 return; 850 } 851 case AMDGPUISD::MAD_I64_I32: 852 case AMDGPUISD::MAD_U64_U32: { 853 SelectMAD_64_32(N); 854 return; 855 } 856 case ISD::CopyToReg: { 857 const SITargetLowering& Lowering = 858 *static_cast<const SITargetLowering*>(getTargetLowering()); 859 N = Lowering.legalizeTargetIndependentNode(N, *CurDAG); 860 break; 861 } 862 case ISD::AND: 863 case ISD::SRL: 864 case ISD::SRA: 865 case ISD::SIGN_EXTEND_INREG: 866 if (N->getValueType(0) != MVT::i32) 867 break; 868 869 SelectS_BFE(N); 870 return; 871 case ISD::BRCOND: 872 SelectBRCOND(N); 873 return; 874 case ISD::FMAD: 875 case ISD::FMA: 876 SelectFMAD_FMA(N); 877 return; 878 case AMDGPUISD::ATOMIC_CMP_SWAP: 879 SelectATOMIC_CMP_SWAP(N); 880 return; 881 case AMDGPUISD::CVT_PKRTZ_F16_F32: 882 case AMDGPUISD::CVT_PKNORM_I16_F32: 883 case AMDGPUISD::CVT_PKNORM_U16_F32: 884 case AMDGPUISD::CVT_PK_U16_U32: 885 case AMDGPUISD::CVT_PK_I16_I32: { 886 // Hack around using a legal type if f16 is illegal. 887 if (N->getValueType(0) == MVT::i32) { 888 MVT NewVT = Opc == AMDGPUISD::CVT_PKRTZ_F16_F32 ? MVT::v2f16 : MVT::v2i16; 889 N = CurDAG->MorphNodeTo(N, N->getOpcode(), CurDAG->getVTList(NewVT), 890 { N->getOperand(0), N->getOperand(1) }); 891 SelectCode(N); 892 return; 893 } 894 895 break; 896 } 897 case ISD::INTRINSIC_W_CHAIN: { 898 SelectINTRINSIC_W_CHAIN(N); 899 return; 900 } 901 case ISD::INTRINSIC_WO_CHAIN: { 902 SelectINTRINSIC_WO_CHAIN(N); 903 return; 904 } 905 case ISD::INTRINSIC_VOID: { 906 SelectINTRINSIC_VOID(N); 907 return; 908 } 909 } 910 911 SelectCode(N); 912 } 913 914 bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const { 915 const BasicBlock *BB = FuncInfo->MBB->getBasicBlock(); 916 const Instruction *Term = BB->getTerminator(); 917 return Term->getMetadata("amdgpu.uniform") || 918 Term->getMetadata("structurizecfg.uniform"); 919 } 920 921 StringRef AMDGPUDAGToDAGISel::getPassName() const { 922 return "AMDGPU DAG->DAG Pattern Instruction Selection"; 923 } 924 925 //===----------------------------------------------------------------------===// 926 // Complex Patterns 927 //===----------------------------------------------------------------------===// 928 929 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base, 930 SDValue &Offset) { 931 return false; 932 } 933 934 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base, 935 SDValue &Offset) { 936 ConstantSDNode *C; 937 SDLoc DL(Addr); 938 939 if ((C = dyn_cast<ConstantSDNode>(Addr))) { 940 Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32); 941 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); 942 } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) && 943 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) { 944 Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32); 945 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); 946 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) && 947 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) { 948 Base = Addr.getOperand(0); 949 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); 950 } else { 951 Base = Addr; 952 Offset = CurDAG->getTargetConstant(0, DL, MVT::i32); 953 } 954 955 return true; 956 } 957 958 SDValue AMDGPUDAGToDAGISel::getMaterializedScalarImm32(int64_t Val, 959 const SDLoc &DL) const { 960 SDNode *Mov = CurDAG->getMachineNode( 961 AMDGPU::S_MOV_B32, DL, MVT::i32, 962 CurDAG->getTargetConstant(Val, DL, MVT::i32)); 963 return SDValue(Mov, 0); 964 } 965 966 // FIXME: Should only handle addcarry/subcarry 967 void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) { 968 SDLoc DL(N); 969 SDValue LHS = N->getOperand(0); 970 SDValue RHS = N->getOperand(1); 971 972 unsigned Opcode = N->getOpcode(); 973 bool ConsumeCarry = (Opcode == ISD::ADDE || Opcode == ISD::SUBE); 974 bool ProduceCarry = 975 ConsumeCarry || Opcode == ISD::ADDC || Opcode == ISD::SUBC; 976 bool IsAdd = Opcode == ISD::ADD || Opcode == ISD::ADDC || Opcode == ISD::ADDE; 977 978 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32); 979 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32); 980 981 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, 982 DL, MVT::i32, LHS, Sub0); 983 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, 984 DL, MVT::i32, LHS, Sub1); 985 986 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, 987 DL, MVT::i32, RHS, Sub0); 988 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, 989 DL, MVT::i32, RHS, Sub1); 990 991 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue); 992 993 static const unsigned OpcMap[2][2][2] = { 994 {{AMDGPU::S_SUB_U32, AMDGPU::S_ADD_U32}, 995 {AMDGPU::V_SUB_I32_e32, AMDGPU::V_ADD_I32_e32}}, 996 {{AMDGPU::S_SUBB_U32, AMDGPU::S_ADDC_U32}, 997 {AMDGPU::V_SUBB_U32_e32, AMDGPU::V_ADDC_U32_e32}}}; 998 999 unsigned Opc = OpcMap[0][N->isDivergent()][IsAdd]; 1000 unsigned CarryOpc = OpcMap[1][N->isDivergent()][IsAdd]; 1001 1002 SDNode *AddLo; 1003 if (!ConsumeCarry) { 1004 SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) }; 1005 AddLo = CurDAG->getMachineNode(Opc, DL, VTList, Args); 1006 } else { 1007 SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0), N->getOperand(2) }; 1008 AddLo = CurDAG->getMachineNode(CarryOpc, DL, VTList, Args); 1009 } 1010 SDValue AddHiArgs[] = { 1011 SDValue(Hi0, 0), 1012 SDValue(Hi1, 0), 1013 SDValue(AddLo, 1) 1014 }; 1015 SDNode *AddHi = CurDAG->getMachineNode(CarryOpc, DL, VTList, AddHiArgs); 1016 1017 SDValue RegSequenceArgs[] = { 1018 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32), 1019 SDValue(AddLo,0), 1020 Sub0, 1021 SDValue(AddHi,0), 1022 Sub1, 1023 }; 1024 SDNode *RegSequence = CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL, 1025 MVT::i64, RegSequenceArgs); 1026 1027 if (ProduceCarry) { 1028 // Replace the carry-use 1029 ReplaceUses(SDValue(N, 1), SDValue(AddHi, 1)); 1030 } 1031 1032 // Replace the remaining uses. 1033 ReplaceNode(N, RegSequence); 1034 } 1035 1036 void AMDGPUDAGToDAGISel::SelectAddcSubb(SDNode *N) { 1037 SDLoc DL(N); 1038 SDValue LHS = N->getOperand(0); 1039 SDValue RHS = N->getOperand(1); 1040 SDValue CI = N->getOperand(2); 1041 1042 if (N->isDivergent()) { 1043 unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::V_ADDC_U32_e64 1044 : AMDGPU::V_SUBB_U32_e64; 1045 CurDAG->SelectNodeTo( 1046 N, Opc, N->getVTList(), 1047 {LHS, RHS, CI, 1048 CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/}); 1049 } else { 1050 unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::S_ADD_CO_PSEUDO 1051 : AMDGPU::S_SUB_CO_PSEUDO; 1052 CurDAG->SelectNodeTo(N, Opc, N->getVTList(), {LHS, RHS, CI}); 1053 } 1054 } 1055 1056 void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) { 1057 // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned 1058 // carry out despite the _i32 name. These were renamed in VI to _U32. 1059 // FIXME: We should probably rename the opcodes here. 1060 bool IsAdd = N->getOpcode() == ISD::UADDO; 1061 bool IsVALU = N->isDivergent(); 1062 1063 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); UI != E; 1064 ++UI) 1065 if (UI.getUse().getResNo() == 1) { 1066 if ((IsAdd && (UI->getOpcode() != ISD::ADDCARRY)) || 1067 (!IsAdd && (UI->getOpcode() != ISD::SUBCARRY))) { 1068 IsVALU = true; 1069 break; 1070 } 1071 } 1072 1073 if (IsVALU) { 1074 unsigned Opc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64; 1075 1076 CurDAG->SelectNodeTo( 1077 N, Opc, N->getVTList(), 1078 {N->getOperand(0), N->getOperand(1), 1079 CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/}); 1080 } else { 1081 unsigned Opc = N->getOpcode() == ISD::UADDO ? AMDGPU::S_UADDO_PSEUDO 1082 : AMDGPU::S_USUBO_PSEUDO; 1083 1084 CurDAG->SelectNodeTo(N, Opc, N->getVTList(), 1085 {N->getOperand(0), N->getOperand(1)}); 1086 } 1087 } 1088 1089 void AMDGPUDAGToDAGISel::SelectFMA_W_CHAIN(SDNode *N) { 1090 SDLoc SL(N); 1091 // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp, omod 1092 SDValue Ops[10]; 1093 1094 SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[6], Ops[7]); 1095 SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]); 1096 SelectVOP3Mods(N->getOperand(3), Ops[5], Ops[4]); 1097 Ops[8] = N->getOperand(0); 1098 Ops[9] = N->getOperand(4); 1099 1100 CurDAG->SelectNodeTo(N, AMDGPU::V_FMA_F32, N->getVTList(), Ops); 1101 } 1102 1103 void AMDGPUDAGToDAGISel::SelectFMUL_W_CHAIN(SDNode *N) { 1104 SDLoc SL(N); 1105 // src0_modifiers, src0, src1_modifiers, src1, clamp, omod 1106 SDValue Ops[8]; 1107 1108 SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[4], Ops[5]); 1109 SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]); 1110 Ops[6] = N->getOperand(0); 1111 Ops[7] = N->getOperand(3); 1112 1113 CurDAG->SelectNodeTo(N, AMDGPU::V_MUL_F32_e64, N->getVTList(), Ops); 1114 } 1115 1116 // We need to handle this here because tablegen doesn't support matching 1117 // instructions with multiple outputs. 1118 void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) { 1119 SDLoc SL(N); 1120 EVT VT = N->getValueType(0); 1121 1122 assert(VT == MVT::f32 || VT == MVT::f64); 1123 1124 unsigned Opc 1125 = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32; 1126 1127 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2) }; 1128 CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops); 1129 } 1130 1131 // We need to handle this here because tablegen doesn't support matching 1132 // instructions with multiple outputs. 1133 void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) { 1134 SDLoc SL(N); 1135 bool Signed = N->getOpcode() == AMDGPUISD::MAD_I64_I32; 1136 unsigned Opc = Signed ? AMDGPU::V_MAD_I64_I32 : AMDGPU::V_MAD_U64_U32; 1137 1138 SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1); 1139 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), 1140 Clamp }; 1141 CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops); 1142 } 1143 1144 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(SDValue Base, unsigned Offset, 1145 unsigned OffsetBits) const { 1146 if ((OffsetBits == 16 && !isUInt<16>(Offset)) || 1147 (OffsetBits == 8 && !isUInt<8>(Offset))) 1148 return false; 1149 1150 if (Subtarget->hasUsableDSOffset() || 1151 Subtarget->unsafeDSOffsetFoldingEnabled()) 1152 return true; 1153 1154 // On Southern Islands instruction with a negative base value and an offset 1155 // don't seem to work. 1156 return CurDAG->SignBitIsZero(Base); 1157 } 1158 1159 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base, 1160 SDValue &Offset) const { 1161 SDLoc DL(Addr); 1162 if (CurDAG->isBaseWithConstantOffset(Addr)) { 1163 SDValue N0 = Addr.getOperand(0); 1164 SDValue N1 = Addr.getOperand(1); 1165 ConstantSDNode *C1 = cast<ConstantSDNode>(N1); 1166 if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) { 1167 // (add n0, c0) 1168 Base = N0; 1169 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16); 1170 return true; 1171 } 1172 } else if (Addr.getOpcode() == ISD::SUB) { 1173 // sub C, x -> add (sub 0, x), C 1174 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) { 1175 int64_t ByteOffset = C->getSExtValue(); 1176 if (isUInt<16>(ByteOffset)) { 1177 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); 1178 1179 // XXX - This is kind of hacky. Create a dummy sub node so we can check 1180 // the known bits in isDSOffsetLegal. We need to emit the selected node 1181 // here, so this is thrown away. 1182 SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32, 1183 Zero, Addr.getOperand(1)); 1184 1185 if (isDSOffsetLegal(Sub, ByteOffset, 16)) { 1186 SmallVector<SDValue, 3> Opnds; 1187 Opnds.push_back(Zero); 1188 Opnds.push_back(Addr.getOperand(1)); 1189 1190 // FIXME: Select to VOP3 version for with-carry. 1191 unsigned SubOp = AMDGPU::V_SUB_I32_e32; 1192 if (Subtarget->hasAddNoCarry()) { 1193 SubOp = AMDGPU::V_SUB_U32_e64; 1194 Opnds.push_back( 1195 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit 1196 } 1197 1198 MachineSDNode *MachineSub = 1199 CurDAG->getMachineNode(SubOp, DL, MVT::i32, Opnds); 1200 1201 Base = SDValue(MachineSub, 0); 1202 Offset = CurDAG->getTargetConstant(ByteOffset, DL, MVT::i16); 1203 return true; 1204 } 1205 } 1206 } 1207 } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) { 1208 // If we have a constant address, prefer to put the constant into the 1209 // offset. This can save moves to load the constant address since multiple 1210 // operations can share the zero base address register, and enables merging 1211 // into read2 / write2 instructions. 1212 1213 SDLoc DL(Addr); 1214 1215 if (isUInt<16>(CAddr->getZExtValue())) { 1216 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); 1217 MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, 1218 DL, MVT::i32, Zero); 1219 Base = SDValue(MovZero, 0); 1220 Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16); 1221 return true; 1222 } 1223 } 1224 1225 // default case 1226 Base = Addr; 1227 Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i16); 1228 return true; 1229 } 1230 1231 // TODO: If offset is too big, put low 16-bit into offset. 1232 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base, 1233 SDValue &Offset0, 1234 SDValue &Offset1) const { 1235 SDLoc DL(Addr); 1236 1237 if (CurDAG->isBaseWithConstantOffset(Addr)) { 1238 SDValue N0 = Addr.getOperand(0); 1239 SDValue N1 = Addr.getOperand(1); 1240 ConstantSDNode *C1 = cast<ConstantSDNode>(N1); 1241 unsigned DWordOffset0 = C1->getZExtValue() / 4; 1242 unsigned DWordOffset1 = DWordOffset0 + 1; 1243 // (add n0, c0) 1244 if (isDSOffsetLegal(N0, DWordOffset1, 8)) { 1245 Base = N0; 1246 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8); 1247 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8); 1248 return true; 1249 } 1250 } else if (Addr.getOpcode() == ISD::SUB) { 1251 // sub C, x -> add (sub 0, x), C 1252 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) { 1253 unsigned DWordOffset0 = C->getZExtValue() / 4; 1254 unsigned DWordOffset1 = DWordOffset0 + 1; 1255 1256 if (isUInt<8>(DWordOffset0)) { 1257 SDLoc DL(Addr); 1258 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); 1259 1260 // XXX - This is kind of hacky. Create a dummy sub node so we can check 1261 // the known bits in isDSOffsetLegal. We need to emit the selected node 1262 // here, so this is thrown away. 1263 SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32, 1264 Zero, Addr.getOperand(1)); 1265 1266 if (isDSOffsetLegal(Sub, DWordOffset1, 8)) { 1267 SmallVector<SDValue, 3> Opnds; 1268 Opnds.push_back(Zero); 1269 Opnds.push_back(Addr.getOperand(1)); 1270 unsigned SubOp = AMDGPU::V_SUB_I32_e32; 1271 if (Subtarget->hasAddNoCarry()) { 1272 SubOp = AMDGPU::V_SUB_U32_e64; 1273 Opnds.push_back( 1274 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit 1275 } 1276 1277 MachineSDNode *MachineSub 1278 = CurDAG->getMachineNode(SubOp, DL, MVT::i32, Opnds); 1279 1280 Base = SDValue(MachineSub, 0); 1281 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8); 1282 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8); 1283 return true; 1284 } 1285 } 1286 } 1287 } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) { 1288 unsigned DWordOffset0 = CAddr->getZExtValue() / 4; 1289 unsigned DWordOffset1 = DWordOffset0 + 1; 1290 assert(4 * DWordOffset0 == CAddr->getZExtValue()); 1291 1292 if (isUInt<8>(DWordOffset0) && isUInt<8>(DWordOffset1)) { 1293 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); 1294 MachineSDNode *MovZero 1295 = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, 1296 DL, MVT::i32, Zero); 1297 Base = SDValue(MovZero, 0); 1298 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8); 1299 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8); 1300 return true; 1301 } 1302 } 1303 1304 // default case 1305 1306 Base = Addr; 1307 Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8); 1308 Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8); 1309 return true; 1310 } 1311 1312 bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr, 1313 SDValue &VAddr, SDValue &SOffset, 1314 SDValue &Offset, SDValue &Offen, 1315 SDValue &Idxen, SDValue &Addr64, 1316 SDValue &GLC, SDValue &SLC, 1317 SDValue &TFE, SDValue &DLC, 1318 SDValue &SWZ) const { 1319 // Subtarget prefers to use flat instruction 1320 // FIXME: This should be a pattern predicate and not reach here 1321 if (Subtarget->useFlatForGlobal()) 1322 return false; 1323 1324 SDLoc DL(Addr); 1325 1326 if (!GLC.getNode()) 1327 GLC = CurDAG->getTargetConstant(0, DL, MVT::i1); 1328 if (!SLC.getNode()) 1329 SLC = CurDAG->getTargetConstant(0, DL, MVT::i1); 1330 TFE = CurDAG->getTargetConstant(0, DL, MVT::i1); 1331 DLC = CurDAG->getTargetConstant(0, DL, MVT::i1); 1332 SWZ = CurDAG->getTargetConstant(0, DL, MVT::i1); 1333 1334 Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1); 1335 Offen = CurDAG->getTargetConstant(0, DL, MVT::i1); 1336 Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1); 1337 SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32); 1338 1339 ConstantSDNode *C1 = nullptr; 1340 SDValue N0 = Addr; 1341 if (CurDAG->isBaseWithConstantOffset(Addr)) { 1342 C1 = cast<ConstantSDNode>(Addr.getOperand(1)); 1343 if (isUInt<32>(C1->getZExtValue())) 1344 N0 = Addr.getOperand(0); 1345 else 1346 C1 = nullptr; 1347 } 1348 1349 if (N0.getOpcode() == ISD::ADD) { 1350 // (add N2, N3) -> addr64, or 1351 // (add (add N2, N3), C1) -> addr64 1352 SDValue N2 = N0.getOperand(0); 1353 SDValue N3 = N0.getOperand(1); 1354 Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1); 1355 1356 if (N2->isDivergent()) { 1357 if (N3->isDivergent()) { 1358 // Both N2 and N3 are divergent. Use N0 (the result of the add) as the 1359 // addr64, and construct the resource from a 0 address. 1360 Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0); 1361 VAddr = N0; 1362 } else { 1363 // N2 is divergent, N3 is not. 1364 Ptr = N3; 1365 VAddr = N2; 1366 } 1367 } else { 1368 // N2 is not divergent. 1369 Ptr = N2; 1370 VAddr = N3; 1371 } 1372 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16); 1373 } else if (N0->isDivergent()) { 1374 // N0 is divergent. Use it as the addr64, and construct the resource from a 1375 // 0 address. 1376 Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0); 1377 VAddr = N0; 1378 Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1); 1379 } else { 1380 // N0 -> offset, or 1381 // (N0 + C1) -> offset 1382 VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32); 1383 Ptr = N0; 1384 } 1385 1386 if (!C1) { 1387 // No offset. 1388 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16); 1389 return true; 1390 } 1391 1392 if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue())) { 1393 // Legal offset for instruction. 1394 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16); 1395 return true; 1396 } 1397 1398 // Illegal offset, store it in soffset. 1399 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16); 1400 SOffset = 1401 SDValue(CurDAG->getMachineNode( 1402 AMDGPU::S_MOV_B32, DL, MVT::i32, 1403 CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)), 1404 0); 1405 return true; 1406 } 1407 1408 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, 1409 SDValue &VAddr, SDValue &SOffset, 1410 SDValue &Offset, SDValue &GLC, 1411 SDValue &SLC, SDValue &TFE, 1412 SDValue &DLC, SDValue &SWZ) const { 1413 SDValue Ptr, Offen, Idxen, Addr64; 1414 1415 // addr64 bit was removed for volcanic islands. 1416 // FIXME: This should be a pattern predicate and not reach here 1417 if (!Subtarget->hasAddr64()) 1418 return false; 1419 1420 if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64, 1421 GLC, SLC, TFE, DLC, SWZ)) 1422 return false; 1423 1424 ConstantSDNode *C = cast<ConstantSDNode>(Addr64); 1425 if (C->getSExtValue()) { 1426 SDLoc DL(Addr); 1427 1428 const SITargetLowering& Lowering = 1429 *static_cast<const SITargetLowering*>(getTargetLowering()); 1430 1431 SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0); 1432 return true; 1433 } 1434 1435 return false; 1436 } 1437 1438 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, 1439 SDValue &VAddr, SDValue &SOffset, 1440 SDValue &Offset, 1441 SDValue &SLC) const { 1442 SLC = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i1); 1443 SDValue GLC, TFE, DLC, SWZ; 1444 1445 return SelectMUBUFAddr64(Addr, SRsrc, VAddr, SOffset, Offset, GLC, SLC, TFE, DLC, SWZ); 1446 } 1447 1448 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) { 1449 auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>(); 1450 return PSV && PSV->isStack(); 1451 } 1452 1453 std::pair<SDValue, SDValue> AMDGPUDAGToDAGISel::foldFrameIndex(SDValue N) const { 1454 SDLoc DL(N); 1455 const MachineFunction &MF = CurDAG->getMachineFunction(); 1456 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1457 1458 if (auto FI = dyn_cast<FrameIndexSDNode>(N)) { 1459 SDValue TFI = CurDAG->getTargetFrameIndex(FI->getIndex(), 1460 FI->getValueType(0)); 1461 1462 // If we can resolve this to a frame index access, this will be relative to 1463 // either the stack or frame pointer SGPR. 1464 return std::make_pair( 1465 TFI, CurDAG->getRegister(Info->getStackPtrOffsetReg(), MVT::i32)); 1466 } 1467 1468 // If we don't know this private access is a local stack object, it needs to 1469 // be relative to the entry point's scratch wave offset. 1470 return std::make_pair(N, CurDAG->getTargetConstant(0, DL, MVT::i32)); 1471 } 1472 1473 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(SDNode *Parent, 1474 SDValue Addr, SDValue &Rsrc, 1475 SDValue &VAddr, SDValue &SOffset, 1476 SDValue &ImmOffset) const { 1477 1478 SDLoc DL(Addr); 1479 MachineFunction &MF = CurDAG->getMachineFunction(); 1480 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1481 1482 Rsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32); 1483 1484 if (ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) { 1485 unsigned Imm = CAddr->getZExtValue(); 1486 1487 SDValue HighBits = CurDAG->getTargetConstant(Imm & ~4095, DL, MVT::i32); 1488 MachineSDNode *MovHighBits = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, 1489 DL, MVT::i32, HighBits); 1490 VAddr = SDValue(MovHighBits, 0); 1491 1492 // In a call sequence, stores to the argument stack area are relative to the 1493 // stack pointer. 1494 const MachinePointerInfo &PtrInfo = cast<MemSDNode>(Parent)->getPointerInfo(); 1495 1496 SOffset = isStackPtrRelative(PtrInfo) 1497 ? CurDAG->getRegister(Info->getStackPtrOffsetReg(), MVT::i32) 1498 : CurDAG->getTargetConstant(0, DL, MVT::i32); 1499 ImmOffset = CurDAG->getTargetConstant(Imm & 4095, DL, MVT::i16); 1500 return true; 1501 } 1502 1503 if (CurDAG->isBaseWithConstantOffset(Addr)) { 1504 // (add n0, c1) 1505 1506 SDValue N0 = Addr.getOperand(0); 1507 SDValue N1 = Addr.getOperand(1); 1508 1509 // Offsets in vaddr must be positive if range checking is enabled. 1510 // 1511 // The total computation of vaddr + soffset + offset must not overflow. If 1512 // vaddr is negative, even if offset is 0 the sgpr offset add will end up 1513 // overflowing. 1514 // 1515 // Prior to gfx9, MUBUF instructions with the vaddr offset enabled would 1516 // always perform a range check. If a negative vaddr base index was used, 1517 // this would fail the range check. The overall address computation would 1518 // compute a valid address, but this doesn't happen due to the range 1519 // check. For out-of-bounds MUBUF loads, a 0 is returned. 1520 // 1521 // Therefore it should be safe to fold any VGPR offset on gfx9 into the 1522 // MUBUF vaddr, but not on older subtargets which can only do this if the 1523 // sign bit is known 0. 1524 ConstantSDNode *C1 = cast<ConstantSDNode>(N1); 1525 if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue()) && 1526 (!Subtarget->privateMemoryResourceIsRangeChecked() || 1527 CurDAG->SignBitIsZero(N0))) { 1528 std::tie(VAddr, SOffset) = foldFrameIndex(N0); 1529 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16); 1530 return true; 1531 } 1532 } 1533 1534 // (node) 1535 std::tie(VAddr, SOffset) = foldFrameIndex(Addr); 1536 ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16); 1537 return true; 1538 } 1539 1540 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffset(SDNode *Parent, 1541 SDValue Addr, 1542 SDValue &SRsrc, 1543 SDValue &SOffset, 1544 SDValue &Offset) const { 1545 ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr); 1546 if (!CAddr || !SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue())) 1547 return false; 1548 1549 SDLoc DL(Addr); 1550 MachineFunction &MF = CurDAG->getMachineFunction(); 1551 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1552 1553 SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32); 1554 1555 const MachinePointerInfo &PtrInfo = cast<MemSDNode>(Parent)->getPointerInfo(); 1556 1557 // FIXME: Get from MachinePointerInfo? We should only be using the frame 1558 // offset if we know this is in a call sequence. 1559 SOffset = isStackPtrRelative(PtrInfo) 1560 ? CurDAG->getRegister(Info->getStackPtrOffsetReg(), MVT::i32) 1561 : CurDAG->getTargetConstant(0, DL, MVT::i32); 1562 1563 Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16); 1564 return true; 1565 } 1566 1567 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, 1568 SDValue &SOffset, SDValue &Offset, 1569 SDValue &GLC, SDValue &SLC, 1570 SDValue &TFE, SDValue &DLC, 1571 SDValue &SWZ) const { 1572 SDValue Ptr, VAddr, Offen, Idxen, Addr64; 1573 const SIInstrInfo *TII = 1574 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 1575 1576 if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64, 1577 GLC, SLC, TFE, DLC, SWZ)) 1578 return false; 1579 1580 if (!cast<ConstantSDNode>(Offen)->getSExtValue() && 1581 !cast<ConstantSDNode>(Idxen)->getSExtValue() && 1582 !cast<ConstantSDNode>(Addr64)->getSExtValue()) { 1583 uint64_t Rsrc = TII->getDefaultRsrcDataFormat() | 1584 APInt::getAllOnesValue(32).getZExtValue(); // Size 1585 SDLoc DL(Addr); 1586 1587 const SITargetLowering& Lowering = 1588 *static_cast<const SITargetLowering*>(getTargetLowering()); 1589 1590 SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0); 1591 return true; 1592 } 1593 return false; 1594 } 1595 1596 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, 1597 SDValue &Soffset, SDValue &Offset 1598 ) const { 1599 SDValue GLC, SLC, TFE, DLC, SWZ; 1600 1601 return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE, DLC, SWZ); 1602 } 1603 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, 1604 SDValue &Soffset, SDValue &Offset, 1605 SDValue &SLC) const { 1606 SDValue GLC, TFE, DLC, SWZ; 1607 1608 return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE, DLC, SWZ); 1609 } 1610 1611 // Find a load or store from corresponding pattern root. 1612 // Roots may be build_vector, bitconvert or their combinations. 1613 static MemSDNode* findMemSDNode(SDNode *N) { 1614 N = AMDGPUTargetLowering::stripBitcast(SDValue(N,0)).getNode(); 1615 if (MemSDNode *MN = dyn_cast<MemSDNode>(N)) 1616 return MN; 1617 assert(isa<BuildVectorSDNode>(N)); 1618 for (SDValue V : N->op_values()) 1619 if (MemSDNode *MN = 1620 dyn_cast<MemSDNode>(AMDGPUTargetLowering::stripBitcast(V))) 1621 return MN; 1622 llvm_unreachable("cannot find MemSDNode in the pattern!"); 1623 } 1624 1625 template <bool IsSigned> 1626 bool AMDGPUDAGToDAGISel::SelectFlatOffset(SDNode *N, 1627 SDValue Addr, 1628 SDValue &VAddr, 1629 SDValue &Offset, 1630 SDValue &SLC) const { 1631 int64_t OffsetVal = 0; 1632 1633 if (Subtarget->hasFlatInstOffsets() && 1634 (!Subtarget->hasFlatSegmentOffsetBug() || 1635 findMemSDNode(N)->getAddressSpace() != AMDGPUAS::FLAT_ADDRESS) && 1636 CurDAG->isBaseWithConstantOffset(Addr)) { 1637 SDValue N0 = Addr.getOperand(0); 1638 SDValue N1 = Addr.getOperand(1); 1639 uint64_t COffsetVal = cast<ConstantSDNode>(N1)->getSExtValue(); 1640 1641 const SIInstrInfo *TII = Subtarget->getInstrInfo(); 1642 unsigned AS = findMemSDNode(N)->getAddressSpace(); 1643 if (TII->isLegalFLATOffset(COffsetVal, AS, IsSigned)) { 1644 Addr = N0; 1645 OffsetVal = COffsetVal; 1646 } else { 1647 // If the offset doesn't fit, put the low bits into the offset field and 1648 // add the rest. 1649 1650 SDLoc DL(N); 1651 uint64_t ImmField; 1652 const unsigned NumBits = TII->getNumFlatOffsetBits(AS, IsSigned); 1653 if (IsSigned) { 1654 ImmField = SignExtend64(COffsetVal, NumBits); 1655 1656 // Don't use a negative offset field if the base offset is positive. 1657 // Since the scheduler currently relies on the offset field, doing so 1658 // could result in strange scheduling decisions. 1659 1660 // TODO: Should we not do this in the opposite direction as well? 1661 if (static_cast<int64_t>(COffsetVal) > 0) { 1662 if (static_cast<int64_t>(ImmField) < 0) { 1663 const uint64_t OffsetMask = maskTrailingOnes<uint64_t>(NumBits - 1); 1664 ImmField = COffsetVal & OffsetMask; 1665 } 1666 } 1667 } else { 1668 // TODO: Should we do this for a negative offset? 1669 const uint64_t OffsetMask = maskTrailingOnes<uint64_t>(NumBits); 1670 ImmField = COffsetVal & OffsetMask; 1671 } 1672 1673 uint64_t RemainderOffset = COffsetVal - ImmField; 1674 1675 assert(TII->isLegalFLATOffset(ImmField, AS, IsSigned)); 1676 assert(RemainderOffset + ImmField == COffsetVal); 1677 1678 OffsetVal = ImmField; 1679 1680 // TODO: Should this try to use a scalar add pseudo if the base address is 1681 // uniform and saddr is usable? 1682 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32); 1683 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32); 1684 1685 SDNode *N0Lo = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, 1686 DL, MVT::i32, N0, Sub0); 1687 SDNode *N0Hi = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, 1688 DL, MVT::i32, N0, Sub1); 1689 1690 SDValue AddOffsetLo 1691 = getMaterializedScalarImm32(Lo_32(RemainderOffset), DL); 1692 SDValue AddOffsetHi 1693 = getMaterializedScalarImm32(Hi_32(RemainderOffset), DL); 1694 1695 SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i1); 1696 SDValue Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1); 1697 1698 SDNode *Add = CurDAG->getMachineNode( 1699 AMDGPU::V_ADD_I32_e64, DL, VTs, 1700 {AddOffsetLo, SDValue(N0Lo, 0), Clamp}); 1701 1702 SDNode *Addc = CurDAG->getMachineNode( 1703 AMDGPU::V_ADDC_U32_e64, DL, VTs, 1704 {AddOffsetHi, SDValue(N0Hi, 0), SDValue(Add, 1), Clamp}); 1705 1706 SDValue RegSequenceArgs[] = { 1707 CurDAG->getTargetConstant(AMDGPU::VReg_64RegClassID, DL, MVT::i32), 1708 SDValue(Add, 0), Sub0, SDValue(Addc, 0), Sub1 1709 }; 1710 1711 Addr = SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL, 1712 MVT::i64, RegSequenceArgs), 0); 1713 } 1714 } 1715 1716 VAddr = Addr; 1717 Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i16); 1718 SLC = CurDAG->getTargetConstant(0, SDLoc(), MVT::i1); 1719 return true; 1720 } 1721 1722 bool AMDGPUDAGToDAGISel::SelectFlatAtomic(SDNode *N, 1723 SDValue Addr, 1724 SDValue &VAddr, 1725 SDValue &Offset, 1726 SDValue &SLC) const { 1727 return SelectFlatOffset<false>(N, Addr, VAddr, Offset, SLC); 1728 } 1729 1730 bool AMDGPUDAGToDAGISel::SelectFlatAtomicSigned(SDNode *N, 1731 SDValue Addr, 1732 SDValue &VAddr, 1733 SDValue &Offset, 1734 SDValue &SLC) const { 1735 return SelectFlatOffset<true>(N, Addr, VAddr, Offset, SLC); 1736 } 1737 1738 bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode, 1739 SDValue &Offset, bool &Imm) const { 1740 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ByteOffsetNode); 1741 if (!C) { 1742 if (ByteOffsetNode.getValueType().isScalarInteger() && 1743 ByteOffsetNode.getValueType().getSizeInBits() == 32) { 1744 Offset = ByteOffsetNode; 1745 Imm = false; 1746 return true; 1747 } 1748 if (ByteOffsetNode.getOpcode() == ISD::ZERO_EXTEND) { 1749 if (ByteOffsetNode.getOperand(0).getValueType().getSizeInBits() == 32) { 1750 Offset = ByteOffsetNode.getOperand(0); 1751 Imm = false; 1752 return true; 1753 } 1754 } 1755 return false; 1756 } 1757 1758 SDLoc SL(ByteOffsetNode); 1759 // GFX9 and GFX10 have signed byte immediate offsets. 1760 int64_t ByteOffset = C->getSExtValue(); 1761 Optional<int64_t> EncodedOffset = 1762 AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset, false); 1763 if (EncodedOffset) { 1764 Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32); 1765 Imm = true; 1766 return true; 1767 } 1768 1769 // SGPR and literal offsets are unsigned. 1770 if (ByteOffset < 0) 1771 return false; 1772 1773 EncodedOffset = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget, ByteOffset); 1774 if (EncodedOffset) { 1775 Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32); 1776 return true; 1777 } 1778 1779 if (!isUInt<32>(ByteOffset) && !isInt<32>(ByteOffset)) 1780 return false; 1781 1782 SDValue C32Bit = CurDAG->getTargetConstant(ByteOffset, SL, MVT::i32); 1783 Offset = SDValue( 1784 CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, C32Bit), 0); 1785 1786 return true; 1787 } 1788 1789 SDValue AMDGPUDAGToDAGISel::Expand32BitAddress(SDValue Addr) const { 1790 if (Addr.getValueType() != MVT::i32) 1791 return Addr; 1792 1793 // Zero-extend a 32-bit address. 1794 SDLoc SL(Addr); 1795 1796 const MachineFunction &MF = CurDAG->getMachineFunction(); 1797 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1798 unsigned AddrHiVal = Info->get32BitAddressHighBits(); 1799 SDValue AddrHi = CurDAG->getTargetConstant(AddrHiVal, SL, MVT::i32); 1800 1801 const SDValue Ops[] = { 1802 CurDAG->getTargetConstant(AMDGPU::SReg_64_XEXECRegClassID, SL, MVT::i32), 1803 Addr, 1804 CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32), 1805 SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, AddrHi), 1806 0), 1807 CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32), 1808 }; 1809 1810 return SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, SL, MVT::i64, 1811 Ops), 0); 1812 } 1813 1814 bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase, 1815 SDValue &Offset, bool &Imm) const { 1816 SDLoc SL(Addr); 1817 1818 // A 32-bit (address + offset) should not cause unsigned 32-bit integer 1819 // wraparound, because s_load instructions perform the addition in 64 bits. 1820 if ((Addr.getValueType() != MVT::i32 || 1821 Addr->getFlags().hasNoUnsignedWrap()) && 1822 (CurDAG->isBaseWithConstantOffset(Addr) || 1823 Addr.getOpcode() == ISD::ADD)) { 1824 SDValue N0 = Addr.getOperand(0); 1825 SDValue N1 = Addr.getOperand(1); 1826 1827 if (SelectSMRDOffset(N1, Offset, Imm)) { 1828 SBase = Expand32BitAddress(N0); 1829 return true; 1830 } 1831 } 1832 SBase = Expand32BitAddress(Addr); 1833 Offset = CurDAG->getTargetConstant(0, SL, MVT::i32); 1834 Imm = true; 1835 return true; 1836 } 1837 1838 bool AMDGPUDAGToDAGISel::SelectSMRDImm(SDValue Addr, SDValue &SBase, 1839 SDValue &Offset) const { 1840 bool Imm = false; 1841 return SelectSMRD(Addr, SBase, Offset, Imm) && Imm; 1842 } 1843 1844 bool AMDGPUDAGToDAGISel::SelectSMRDImm32(SDValue Addr, SDValue &SBase, 1845 SDValue &Offset) const { 1846 1847 assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS); 1848 1849 bool Imm = false; 1850 if (!SelectSMRD(Addr, SBase, Offset, Imm)) 1851 return false; 1852 1853 return !Imm && isa<ConstantSDNode>(Offset); 1854 } 1855 1856 bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDValue Addr, SDValue &SBase, 1857 SDValue &Offset) const { 1858 bool Imm = false; 1859 return SelectSMRD(Addr, SBase, Offset, Imm) && !Imm && 1860 !isa<ConstantSDNode>(Offset); 1861 } 1862 1863 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm(SDValue Addr, 1864 SDValue &Offset) const { 1865 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr)) { 1866 // The immediate offset for S_BUFFER instructions is unsigned. 1867 if (auto Imm = 1868 AMDGPU::getSMRDEncodedOffset(*Subtarget, C->getZExtValue(), true)) { 1869 Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32); 1870 return true; 1871 } 1872 } 1873 1874 return false; 1875 } 1876 1877 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(SDValue Addr, 1878 SDValue &Offset) const { 1879 assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS); 1880 1881 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr)) { 1882 if (auto Imm = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget, 1883 C->getZExtValue())) { 1884 Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32); 1885 return true; 1886 } 1887 } 1888 1889 return false; 1890 } 1891 1892 bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index, 1893 SDValue &Base, 1894 SDValue &Offset) const { 1895 SDLoc DL(Index); 1896 1897 if (CurDAG->isBaseWithConstantOffset(Index)) { 1898 SDValue N0 = Index.getOperand(0); 1899 SDValue N1 = Index.getOperand(1); 1900 ConstantSDNode *C1 = cast<ConstantSDNode>(N1); 1901 1902 // (add n0, c0) 1903 // Don't peel off the offset (c0) if doing so could possibly lead 1904 // the base (n0) to be negative. 1905 // (or n0, |c0|) can never change a sign given isBaseWithConstantOffset. 1906 if (C1->getSExtValue() <= 0 || CurDAG->SignBitIsZero(N0) || 1907 (Index->getOpcode() == ISD::OR && C1->getSExtValue() >= 0)) { 1908 Base = N0; 1909 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32); 1910 return true; 1911 } 1912 } 1913 1914 if (isa<ConstantSDNode>(Index)) 1915 return false; 1916 1917 Base = Index; 1918 Offset = CurDAG->getTargetConstant(0, DL, MVT::i32); 1919 return true; 1920 } 1921 1922 SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, const SDLoc &DL, 1923 SDValue Val, uint32_t Offset, 1924 uint32_t Width) { 1925 // Transformation function, pack the offset and width of a BFE into 1926 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second 1927 // source, bits [5:0] contain the offset and bits [22:16] the width. 1928 uint32_t PackedVal = Offset | (Width << 16); 1929 SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32); 1930 1931 return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst); 1932 } 1933 1934 void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) { 1935 // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c) 1936 // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c) 1937 // Predicate: 0 < b <= c < 32 1938 1939 const SDValue &Shl = N->getOperand(0); 1940 ConstantSDNode *B = dyn_cast<ConstantSDNode>(Shl->getOperand(1)); 1941 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 1942 1943 if (B && C) { 1944 uint32_t BVal = B->getZExtValue(); 1945 uint32_t CVal = C->getZExtValue(); 1946 1947 if (0 < BVal && BVal <= CVal && CVal < 32) { 1948 bool Signed = N->getOpcode() == ISD::SRA; 1949 unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32; 1950 1951 ReplaceNode(N, getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0), CVal - BVal, 1952 32 - CVal)); 1953 return; 1954 } 1955 } 1956 SelectCode(N); 1957 } 1958 1959 void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) { 1960 switch (N->getOpcode()) { 1961 case ISD::AND: 1962 if (N->getOperand(0).getOpcode() == ISD::SRL) { 1963 // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)" 1964 // Predicate: isMask(mask) 1965 const SDValue &Srl = N->getOperand(0); 1966 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1)); 1967 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1)); 1968 1969 if (Shift && Mask) { 1970 uint32_t ShiftVal = Shift->getZExtValue(); 1971 uint32_t MaskVal = Mask->getZExtValue(); 1972 1973 if (isMask_32(MaskVal)) { 1974 uint32_t WidthVal = countPopulation(MaskVal); 1975 1976 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), 1977 Srl.getOperand(0), ShiftVal, WidthVal)); 1978 return; 1979 } 1980 } 1981 } 1982 break; 1983 case ISD::SRL: 1984 if (N->getOperand(0).getOpcode() == ISD::AND) { 1985 // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)" 1986 // Predicate: isMask(mask >> b) 1987 const SDValue &And = N->getOperand(0); 1988 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1)); 1989 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1)); 1990 1991 if (Shift && Mask) { 1992 uint32_t ShiftVal = Shift->getZExtValue(); 1993 uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal; 1994 1995 if (isMask_32(MaskVal)) { 1996 uint32_t WidthVal = countPopulation(MaskVal); 1997 1998 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), 1999 And.getOperand(0), ShiftVal, WidthVal)); 2000 return; 2001 } 2002 } 2003 } else if (N->getOperand(0).getOpcode() == ISD::SHL) { 2004 SelectS_BFEFromShifts(N); 2005 return; 2006 } 2007 break; 2008 case ISD::SRA: 2009 if (N->getOperand(0).getOpcode() == ISD::SHL) { 2010 SelectS_BFEFromShifts(N); 2011 return; 2012 } 2013 break; 2014 2015 case ISD::SIGN_EXTEND_INREG: { 2016 // sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8 2017 SDValue Src = N->getOperand(0); 2018 if (Src.getOpcode() != ISD::SRL) 2019 break; 2020 2021 const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1)); 2022 if (!Amt) 2023 break; 2024 2025 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits(); 2026 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_I32, SDLoc(N), Src.getOperand(0), 2027 Amt->getZExtValue(), Width)); 2028 return; 2029 } 2030 } 2031 2032 SelectCode(N); 2033 } 2034 2035 bool AMDGPUDAGToDAGISel::isCBranchSCC(const SDNode *N) const { 2036 assert(N->getOpcode() == ISD::BRCOND); 2037 if (!N->hasOneUse()) 2038 return false; 2039 2040 SDValue Cond = N->getOperand(1); 2041 if (Cond.getOpcode() == ISD::CopyToReg) 2042 Cond = Cond.getOperand(2); 2043 2044 if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse()) 2045 return false; 2046 2047 MVT VT = Cond.getOperand(0).getSimpleValueType(); 2048 if (VT == MVT::i32) 2049 return true; 2050 2051 if (VT == MVT::i64) { 2052 auto ST = static_cast<const GCNSubtarget *>(Subtarget); 2053 2054 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 2055 return (CC == ISD::SETEQ || CC == ISD::SETNE) && ST->hasScalarCompareEq64(); 2056 } 2057 2058 return false; 2059 } 2060 2061 void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) { 2062 SDValue Cond = N->getOperand(1); 2063 2064 if (Cond.isUndef()) { 2065 CurDAG->SelectNodeTo(N, AMDGPU::SI_BR_UNDEF, MVT::Other, 2066 N->getOperand(2), N->getOperand(0)); 2067 return; 2068 } 2069 2070 const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget); 2071 const SIRegisterInfo *TRI = ST->getRegisterInfo(); 2072 2073 bool UseSCCBr = isCBranchSCC(N) && isUniformBr(N); 2074 unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ; 2075 Register CondReg = UseSCCBr ? AMDGPU::SCC : TRI->getVCC(); 2076 SDLoc SL(N); 2077 2078 if (!UseSCCBr) { 2079 // This is the case that we are selecting to S_CBRANCH_VCCNZ. We have not 2080 // analyzed what generates the vcc value, so we do not know whether vcc 2081 // bits for disabled lanes are 0. Thus we need to mask out bits for 2082 // disabled lanes. 2083 // 2084 // For the case that we select S_CBRANCH_SCC1 and it gets 2085 // changed to S_CBRANCH_VCCNZ in SIFixSGPRCopies, SIFixSGPRCopies calls 2086 // SIInstrInfo::moveToVALU which inserts the S_AND). 2087 // 2088 // We could add an analysis of what generates the vcc value here and omit 2089 // the S_AND when is unnecessary. But it would be better to add a separate 2090 // pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it 2091 // catches both cases. 2092 Cond = SDValue(CurDAG->getMachineNode(ST->isWave32() ? AMDGPU::S_AND_B32 2093 : AMDGPU::S_AND_B64, 2094 SL, MVT::i1, 2095 CurDAG->getRegister(ST->isWave32() ? AMDGPU::EXEC_LO 2096 : AMDGPU::EXEC, 2097 MVT::i1), 2098 Cond), 2099 0); 2100 } 2101 2102 SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, CondReg, Cond); 2103 CurDAG->SelectNodeTo(N, BrOp, MVT::Other, 2104 N->getOperand(2), // Basic Block 2105 VCC.getValue(0)); 2106 } 2107 2108 void AMDGPUDAGToDAGISel::SelectFMAD_FMA(SDNode *N) { 2109 MVT VT = N->getSimpleValueType(0); 2110 bool IsFMA = N->getOpcode() == ISD::FMA; 2111 if (VT != MVT::f32 || (!Subtarget->hasMadMixInsts() && 2112 !Subtarget->hasFmaMixInsts()) || 2113 ((IsFMA && Subtarget->hasMadMixInsts()) || 2114 (!IsFMA && Subtarget->hasFmaMixInsts()))) { 2115 SelectCode(N); 2116 return; 2117 } 2118 2119 SDValue Src0 = N->getOperand(0); 2120 SDValue Src1 = N->getOperand(1); 2121 SDValue Src2 = N->getOperand(2); 2122 unsigned Src0Mods, Src1Mods, Src2Mods; 2123 2124 // Avoid using v_mad_mix_f32/v_fma_mix_f32 unless there is actually an operand 2125 // using the conversion from f16. 2126 bool Sel0 = SelectVOP3PMadMixModsImpl(Src0, Src0, Src0Mods); 2127 bool Sel1 = SelectVOP3PMadMixModsImpl(Src1, Src1, Src1Mods); 2128 bool Sel2 = SelectVOP3PMadMixModsImpl(Src2, Src2, Src2Mods); 2129 2130 assert((IsFMA || !Mode.allFP32Denormals()) && 2131 "fmad selected with denormals enabled"); 2132 // TODO: We can select this with f32 denormals enabled if all the sources are 2133 // converted from f16 (in which case fmad isn't legal). 2134 2135 if (Sel0 || Sel1 || Sel2) { 2136 // For dummy operands. 2137 SDValue Zero = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32); 2138 SDValue Ops[] = { 2139 CurDAG->getTargetConstant(Src0Mods, SDLoc(), MVT::i32), Src0, 2140 CurDAG->getTargetConstant(Src1Mods, SDLoc(), MVT::i32), Src1, 2141 CurDAG->getTargetConstant(Src2Mods, SDLoc(), MVT::i32), Src2, 2142 CurDAG->getTargetConstant(0, SDLoc(), MVT::i1), 2143 Zero, Zero 2144 }; 2145 2146 CurDAG->SelectNodeTo(N, 2147 IsFMA ? AMDGPU::V_FMA_MIX_F32 : AMDGPU::V_MAD_MIX_F32, 2148 MVT::f32, Ops); 2149 } else { 2150 SelectCode(N); 2151 } 2152 } 2153 2154 // This is here because there isn't a way to use the generated sub0_sub1 as the 2155 // subreg index to EXTRACT_SUBREG in tablegen. 2156 void AMDGPUDAGToDAGISel::SelectATOMIC_CMP_SWAP(SDNode *N) { 2157 MemSDNode *Mem = cast<MemSDNode>(N); 2158 unsigned AS = Mem->getAddressSpace(); 2159 if (AS == AMDGPUAS::FLAT_ADDRESS) { 2160 SelectCode(N); 2161 return; 2162 } 2163 2164 MVT VT = N->getSimpleValueType(0); 2165 bool Is32 = (VT == MVT::i32); 2166 SDLoc SL(N); 2167 2168 MachineSDNode *CmpSwap = nullptr; 2169 if (Subtarget->hasAddr64()) { 2170 SDValue SRsrc, VAddr, SOffset, Offset, SLC; 2171 2172 if (SelectMUBUFAddr64(Mem->getBasePtr(), SRsrc, VAddr, SOffset, Offset, SLC)) { 2173 unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN : 2174 AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN; 2175 SDValue CmpVal = Mem->getOperand(2); 2176 2177 // XXX - Do we care about glue operands? 2178 2179 SDValue Ops[] = { 2180 CmpVal, VAddr, SRsrc, SOffset, Offset, SLC, Mem->getChain() 2181 }; 2182 2183 CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops); 2184 } 2185 } 2186 2187 if (!CmpSwap) { 2188 SDValue SRsrc, SOffset, Offset, SLC; 2189 if (SelectMUBUFOffset(Mem->getBasePtr(), SRsrc, SOffset, Offset, SLC)) { 2190 unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN : 2191 AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN; 2192 2193 SDValue CmpVal = Mem->getOperand(2); 2194 SDValue Ops[] = { 2195 CmpVal, SRsrc, SOffset, Offset, SLC, Mem->getChain() 2196 }; 2197 2198 CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops); 2199 } 2200 } 2201 2202 if (!CmpSwap) { 2203 SelectCode(N); 2204 return; 2205 } 2206 2207 MachineMemOperand *MMO = Mem->getMemOperand(); 2208 CurDAG->setNodeMemRefs(CmpSwap, {MMO}); 2209 2210 unsigned SubReg = Is32 ? AMDGPU::sub0 : AMDGPU::sub0_sub1; 2211 SDValue Extract 2212 = CurDAG->getTargetExtractSubreg(SubReg, SL, VT, SDValue(CmpSwap, 0)); 2213 2214 ReplaceUses(SDValue(N, 0), Extract); 2215 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 1)); 2216 CurDAG->RemoveDeadNode(N); 2217 } 2218 2219 void AMDGPUDAGToDAGISel::SelectDSAppendConsume(SDNode *N, unsigned IntrID) { 2220 // The address is assumed to be uniform, so if it ends up in a VGPR, it will 2221 // be copied to an SGPR with readfirstlane. 2222 unsigned Opc = IntrID == Intrinsic::amdgcn_ds_append ? 2223 AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME; 2224 2225 SDValue Chain = N->getOperand(0); 2226 SDValue Ptr = N->getOperand(2); 2227 MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N); 2228 MachineMemOperand *MMO = M->getMemOperand(); 2229 bool IsGDS = M->getAddressSpace() == AMDGPUAS::REGION_ADDRESS; 2230 2231 SDValue Offset; 2232 if (CurDAG->isBaseWithConstantOffset(Ptr)) { 2233 SDValue PtrBase = Ptr.getOperand(0); 2234 SDValue PtrOffset = Ptr.getOperand(1); 2235 2236 const APInt &OffsetVal = cast<ConstantSDNode>(PtrOffset)->getAPIntValue(); 2237 if (isDSOffsetLegal(PtrBase, OffsetVal.getZExtValue(), 16)) { 2238 N = glueCopyToM0(N, PtrBase); 2239 Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i32); 2240 } 2241 } 2242 2243 if (!Offset) { 2244 N = glueCopyToM0(N, Ptr); 2245 Offset = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32); 2246 } 2247 2248 SDValue Ops[] = { 2249 Offset, 2250 CurDAG->getTargetConstant(IsGDS, SDLoc(), MVT::i32), 2251 Chain, 2252 N->getOperand(N->getNumOperands() - 1) // New glue 2253 }; 2254 2255 SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops); 2256 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO}); 2257 } 2258 2259 static unsigned gwsIntrinToOpcode(unsigned IntrID) { 2260 switch (IntrID) { 2261 case Intrinsic::amdgcn_ds_gws_init: 2262 return AMDGPU::DS_GWS_INIT; 2263 case Intrinsic::amdgcn_ds_gws_barrier: 2264 return AMDGPU::DS_GWS_BARRIER; 2265 case Intrinsic::amdgcn_ds_gws_sema_v: 2266 return AMDGPU::DS_GWS_SEMA_V; 2267 case Intrinsic::amdgcn_ds_gws_sema_br: 2268 return AMDGPU::DS_GWS_SEMA_BR; 2269 case Intrinsic::amdgcn_ds_gws_sema_p: 2270 return AMDGPU::DS_GWS_SEMA_P; 2271 case Intrinsic::amdgcn_ds_gws_sema_release_all: 2272 return AMDGPU::DS_GWS_SEMA_RELEASE_ALL; 2273 default: 2274 llvm_unreachable("not a gws intrinsic"); 2275 } 2276 } 2277 2278 void AMDGPUDAGToDAGISel::SelectDS_GWS(SDNode *N, unsigned IntrID) { 2279 if (IntrID == Intrinsic::amdgcn_ds_gws_sema_release_all && 2280 !Subtarget->hasGWSSemaReleaseAll()) { 2281 // Let this error. 2282 SelectCode(N); 2283 return; 2284 } 2285 2286 // Chain, intrinsic ID, vsrc, offset 2287 const bool HasVSrc = N->getNumOperands() == 4; 2288 assert(HasVSrc || N->getNumOperands() == 3); 2289 2290 SDLoc SL(N); 2291 SDValue BaseOffset = N->getOperand(HasVSrc ? 3 : 2); 2292 int ImmOffset = 0; 2293 MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N); 2294 MachineMemOperand *MMO = M->getMemOperand(); 2295 2296 // Don't worry if the offset ends up in a VGPR. Only one lane will have 2297 // effect, so SIFixSGPRCopies will validly insert readfirstlane. 2298 2299 // The resource id offset is computed as (<isa opaque base> + M0[21:16] + 2300 // offset field) % 64. Some versions of the programming guide omit the m0 2301 // part, or claim it's from offset 0. 2302 if (ConstantSDNode *ConstOffset = dyn_cast<ConstantSDNode>(BaseOffset)) { 2303 // If we have a constant offset, try to use the 0 in m0 as the base. 2304 // TODO: Look into changing the default m0 initialization value. If the 2305 // default -1 only set the low 16-bits, we could leave it as-is and add 1 to 2306 // the immediate offset. 2307 glueCopyToM0(N, CurDAG->getTargetConstant(0, SL, MVT::i32)); 2308 ImmOffset = ConstOffset->getZExtValue(); 2309 } else { 2310 if (CurDAG->isBaseWithConstantOffset(BaseOffset)) { 2311 ImmOffset = BaseOffset.getConstantOperandVal(1); 2312 BaseOffset = BaseOffset.getOperand(0); 2313 } 2314 2315 // Prefer to do the shift in an SGPR since it should be possible to use m0 2316 // as the result directly. If it's already an SGPR, it will be eliminated 2317 // later. 2318 SDNode *SGPROffset 2319 = CurDAG->getMachineNode(AMDGPU::V_READFIRSTLANE_B32, SL, MVT::i32, 2320 BaseOffset); 2321 // Shift to offset in m0 2322 SDNode *M0Base 2323 = CurDAG->getMachineNode(AMDGPU::S_LSHL_B32, SL, MVT::i32, 2324 SDValue(SGPROffset, 0), 2325 CurDAG->getTargetConstant(16, SL, MVT::i32)); 2326 glueCopyToM0(N, SDValue(M0Base, 0)); 2327 } 2328 2329 SDValue Chain = N->getOperand(0); 2330 SDValue OffsetField = CurDAG->getTargetConstant(ImmOffset, SL, MVT::i32); 2331 2332 // TODO: Can this just be removed from the instruction? 2333 SDValue GDS = CurDAG->getTargetConstant(1, SL, MVT::i1); 2334 2335 const unsigned Opc = gwsIntrinToOpcode(IntrID); 2336 SmallVector<SDValue, 5> Ops; 2337 if (HasVSrc) 2338 Ops.push_back(N->getOperand(2)); 2339 Ops.push_back(OffsetField); 2340 Ops.push_back(GDS); 2341 Ops.push_back(Chain); 2342 2343 SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops); 2344 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO}); 2345 } 2346 2347 void AMDGPUDAGToDAGISel::SelectInterpP1F16(SDNode *N) { 2348 if (Subtarget->getLDSBankCount() != 16) { 2349 // This is a single instruction with a pattern. 2350 SelectCode(N); 2351 return; 2352 } 2353 2354 SDLoc DL(N); 2355 2356 // This requires 2 instructions. It is possible to write a pattern to support 2357 // this, but the generated isel emitter doesn't correctly deal with multiple 2358 // output instructions using the same physical register input. The copy to m0 2359 // is incorrectly placed before the second instruction. 2360 // 2361 // TODO: Match source modifiers. 2362 // 2363 // def : Pat < 2364 // (int_amdgcn_interp_p1_f16 2365 // (VOP3Mods f32:$src0, i32:$src0_modifiers), 2366 // (i32 timm:$attrchan), (i32 timm:$attr), 2367 // (i1 timm:$high), M0), 2368 // (V_INTERP_P1LV_F16 $src0_modifiers, VGPR_32:$src0, timm:$attr, 2369 // timm:$attrchan, 0, 2370 // (V_INTERP_MOV_F32 2, timm:$attr, timm:$attrchan), timm:$high)> { 2371 // let Predicates = [has16BankLDS]; 2372 // } 2373 2374 // 16 bank LDS 2375 SDValue ToM0 = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, AMDGPU::M0, 2376 N->getOperand(5), SDValue()); 2377 2378 SDVTList VTs = CurDAG->getVTList(MVT::f32, MVT::Other); 2379 2380 SDNode *InterpMov = 2381 CurDAG->getMachineNode(AMDGPU::V_INTERP_MOV_F32, DL, VTs, { 2382 CurDAG->getTargetConstant(2, DL, MVT::i32), // P0 2383 N->getOperand(3), // Attr 2384 N->getOperand(2), // Attrchan 2385 ToM0.getValue(1) // In glue 2386 }); 2387 2388 SDNode *InterpP1LV = 2389 CurDAG->getMachineNode(AMDGPU::V_INTERP_P1LV_F16, DL, MVT::f32, { 2390 CurDAG->getTargetConstant(0, DL, MVT::i32), // $src0_modifiers 2391 N->getOperand(1), // Src0 2392 N->getOperand(3), // Attr 2393 N->getOperand(2), // Attrchan 2394 CurDAG->getTargetConstant(0, DL, MVT::i32), // $src2_modifiers 2395 SDValue(InterpMov, 0), // Src2 - holds two f16 values selected by high 2396 N->getOperand(4), // high 2397 CurDAG->getTargetConstant(0, DL, MVT::i1), // $clamp 2398 CurDAG->getTargetConstant(0, DL, MVT::i32), // $omod 2399 SDValue(InterpMov, 1) 2400 }); 2401 2402 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), SDValue(InterpP1LV, 0)); 2403 } 2404 2405 void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) { 2406 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 2407 switch (IntrID) { 2408 case Intrinsic::amdgcn_ds_append: 2409 case Intrinsic::amdgcn_ds_consume: { 2410 if (N->getValueType(0) != MVT::i32) 2411 break; 2412 SelectDSAppendConsume(N, IntrID); 2413 return; 2414 } 2415 } 2416 2417 SelectCode(N); 2418 } 2419 2420 void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) { 2421 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 2422 unsigned Opcode; 2423 switch (IntrID) { 2424 case Intrinsic::amdgcn_wqm: 2425 Opcode = AMDGPU::WQM; 2426 break; 2427 case Intrinsic::amdgcn_softwqm: 2428 Opcode = AMDGPU::SOFT_WQM; 2429 break; 2430 case Intrinsic::amdgcn_wwm: 2431 Opcode = AMDGPU::WWM; 2432 break; 2433 case Intrinsic::amdgcn_interp_p1_f16: 2434 SelectInterpP1F16(N); 2435 return; 2436 default: 2437 SelectCode(N); 2438 return; 2439 } 2440 2441 SDValue Src = N->getOperand(1); 2442 CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), {Src}); 2443 } 2444 2445 void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) { 2446 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 2447 switch (IntrID) { 2448 case Intrinsic::amdgcn_ds_gws_init: 2449 case Intrinsic::amdgcn_ds_gws_barrier: 2450 case Intrinsic::amdgcn_ds_gws_sema_v: 2451 case Intrinsic::amdgcn_ds_gws_sema_br: 2452 case Intrinsic::amdgcn_ds_gws_sema_p: 2453 case Intrinsic::amdgcn_ds_gws_sema_release_all: 2454 SelectDS_GWS(N, IntrID); 2455 return; 2456 default: 2457 break; 2458 } 2459 2460 SelectCode(N); 2461 } 2462 2463 bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src, 2464 unsigned &Mods) const { 2465 Mods = 0; 2466 Src = In; 2467 2468 if (Src.getOpcode() == ISD::FNEG) { 2469 Mods |= SISrcMods::NEG; 2470 Src = Src.getOperand(0); 2471 } 2472 2473 if (Src.getOpcode() == ISD::FABS) { 2474 Mods |= SISrcMods::ABS; 2475 Src = Src.getOperand(0); 2476 } 2477 2478 return true; 2479 } 2480 2481 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src, 2482 SDValue &SrcMods) const { 2483 unsigned Mods; 2484 if (SelectVOP3ModsImpl(In, Src, Mods)) { 2485 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); 2486 return true; 2487 } 2488 2489 return false; 2490 } 2491 2492 bool AMDGPUDAGToDAGISel::SelectVOP3Mods_NNaN(SDValue In, SDValue &Src, 2493 SDValue &SrcMods) const { 2494 SelectVOP3Mods(In, Src, SrcMods); 2495 return isNoNanSrc(Src); 2496 } 2497 2498 bool AMDGPUDAGToDAGISel::SelectVOP3NoMods(SDValue In, SDValue &Src) const { 2499 if (In.getOpcode() == ISD::FABS || In.getOpcode() == ISD::FNEG) 2500 return false; 2501 2502 Src = In; 2503 return true; 2504 } 2505 2506 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src, 2507 SDValue &SrcMods, SDValue &Clamp, 2508 SDValue &Omod) const { 2509 SDLoc DL(In); 2510 Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1); 2511 Omod = CurDAG->getTargetConstant(0, DL, MVT::i1); 2512 2513 return SelectVOP3Mods(In, Src, SrcMods); 2514 } 2515 2516 bool AMDGPUDAGToDAGISel::SelectVOP3OMods(SDValue In, SDValue &Src, 2517 SDValue &Clamp, SDValue &Omod) const { 2518 Src = In; 2519 2520 SDLoc DL(In); 2521 Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1); 2522 Omod = CurDAG->getTargetConstant(0, DL, MVT::i1); 2523 2524 return true; 2525 } 2526 2527 bool AMDGPUDAGToDAGISel::SelectVOP3PMods(SDValue In, SDValue &Src, 2528 SDValue &SrcMods) const { 2529 unsigned Mods = 0; 2530 Src = In; 2531 2532 if (Src.getOpcode() == ISD::FNEG) { 2533 Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI); 2534 Src = Src.getOperand(0); 2535 } 2536 2537 if (Src.getOpcode() == ISD::BUILD_VECTOR) { 2538 unsigned VecMods = Mods; 2539 2540 SDValue Lo = stripBitcast(Src.getOperand(0)); 2541 SDValue Hi = stripBitcast(Src.getOperand(1)); 2542 2543 if (Lo.getOpcode() == ISD::FNEG) { 2544 Lo = stripBitcast(Lo.getOperand(0)); 2545 Mods ^= SISrcMods::NEG; 2546 } 2547 2548 if (Hi.getOpcode() == ISD::FNEG) { 2549 Hi = stripBitcast(Hi.getOperand(0)); 2550 Mods ^= SISrcMods::NEG_HI; 2551 } 2552 2553 if (isExtractHiElt(Lo, Lo)) 2554 Mods |= SISrcMods::OP_SEL_0; 2555 2556 if (isExtractHiElt(Hi, Hi)) 2557 Mods |= SISrcMods::OP_SEL_1; 2558 2559 Lo = stripExtractLoElt(Lo); 2560 Hi = stripExtractLoElt(Hi); 2561 2562 if (Lo == Hi && !isInlineImmediate(Lo.getNode())) { 2563 // Really a scalar input. Just select from the low half of the register to 2564 // avoid packing. 2565 2566 Src = Lo; 2567 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); 2568 return true; 2569 } 2570 2571 Mods = VecMods; 2572 } 2573 2574 // Packed instructions do not have abs modifiers. 2575 Mods |= SISrcMods::OP_SEL_1; 2576 2577 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); 2578 return true; 2579 } 2580 2581 bool AMDGPUDAGToDAGISel::SelectVOP3OpSel(SDValue In, SDValue &Src, 2582 SDValue &SrcMods) const { 2583 Src = In; 2584 // FIXME: Handle op_sel 2585 SrcMods = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32); 2586 return true; 2587 } 2588 2589 bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods(SDValue In, SDValue &Src, 2590 SDValue &SrcMods) const { 2591 // FIXME: Handle op_sel 2592 return SelectVOP3Mods(In, Src, SrcMods); 2593 } 2594 2595 // The return value is not whether the match is possible (which it always is), 2596 // but whether or not it a conversion is really used. 2597 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src, 2598 unsigned &Mods) const { 2599 Mods = 0; 2600 SelectVOP3ModsImpl(In, Src, Mods); 2601 2602 if (Src.getOpcode() == ISD::FP_EXTEND) { 2603 Src = Src.getOperand(0); 2604 assert(Src.getValueType() == MVT::f16); 2605 Src = stripBitcast(Src); 2606 2607 // Be careful about folding modifiers if we already have an abs. fneg is 2608 // applied last, so we don't want to apply an earlier fneg. 2609 if ((Mods & SISrcMods::ABS) == 0) { 2610 unsigned ModsTmp; 2611 SelectVOP3ModsImpl(Src, Src, ModsTmp); 2612 2613 if ((ModsTmp & SISrcMods::NEG) != 0) 2614 Mods ^= SISrcMods::NEG; 2615 2616 if ((ModsTmp & SISrcMods::ABS) != 0) 2617 Mods |= SISrcMods::ABS; 2618 } 2619 2620 // op_sel/op_sel_hi decide the source type and source. 2621 // If the source's op_sel_hi is set, it indicates to do a conversion from fp16. 2622 // If the sources's op_sel is set, it picks the high half of the source 2623 // register. 2624 2625 Mods |= SISrcMods::OP_SEL_1; 2626 if (isExtractHiElt(Src, Src)) { 2627 Mods |= SISrcMods::OP_SEL_0; 2628 2629 // TODO: Should we try to look for neg/abs here? 2630 } 2631 2632 return true; 2633 } 2634 2635 return false; 2636 } 2637 2638 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixMods(SDValue In, SDValue &Src, 2639 SDValue &SrcMods) const { 2640 unsigned Mods = 0; 2641 SelectVOP3PMadMixModsImpl(In, Src, Mods); 2642 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); 2643 return true; 2644 } 2645 2646 SDValue AMDGPUDAGToDAGISel::getHi16Elt(SDValue In) const { 2647 if (In.isUndef()) 2648 return CurDAG->getUNDEF(MVT::i32); 2649 2650 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(In)) { 2651 SDLoc SL(In); 2652 return CurDAG->getConstant(C->getZExtValue() << 16, SL, MVT::i32); 2653 } 2654 2655 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(In)) { 2656 SDLoc SL(In); 2657 return CurDAG->getConstant( 2658 C->getValueAPF().bitcastToAPInt().getZExtValue() << 16, SL, MVT::i32); 2659 } 2660 2661 SDValue Src; 2662 if (isExtractHiElt(In, Src)) 2663 return Src; 2664 2665 return SDValue(); 2666 } 2667 2668 bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const { 2669 assert(CurDAG->getTarget().getTargetTriple().getArch() == Triple::amdgcn); 2670 2671 const SIRegisterInfo *SIRI = 2672 static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo()); 2673 const SIInstrInfo * SII = 2674 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 2675 2676 unsigned Limit = 0; 2677 bool AllUsesAcceptSReg = true; 2678 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end(); 2679 Limit < 10 && U != E; ++U, ++Limit) { 2680 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo()); 2681 2682 // If the register class is unknown, it could be an unknown 2683 // register class that needs to be an SGPR, e.g. an inline asm 2684 // constraint 2685 if (!RC || SIRI->isSGPRClass(RC)) 2686 return false; 2687 2688 if (RC != &AMDGPU::VS_32RegClass) { 2689 AllUsesAcceptSReg = false; 2690 SDNode * User = *U; 2691 if (User->isMachineOpcode()) { 2692 unsigned Opc = User->getMachineOpcode(); 2693 MCInstrDesc Desc = SII->get(Opc); 2694 if (Desc.isCommutable()) { 2695 unsigned OpIdx = Desc.getNumDefs() + U.getOperandNo(); 2696 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex; 2697 if (SII->findCommutedOpIndices(Desc, OpIdx, CommuteIdx1)) { 2698 unsigned CommutedOpNo = CommuteIdx1 - Desc.getNumDefs(); 2699 const TargetRegisterClass *CommutedRC = getOperandRegClass(*U, CommutedOpNo); 2700 if (CommutedRC == &AMDGPU::VS_32RegClass) 2701 AllUsesAcceptSReg = true; 2702 } 2703 } 2704 } 2705 // If "AllUsesAcceptSReg == false" so far we haven't suceeded 2706 // commuting current user. This means have at least one use 2707 // that strictly require VGPR. Thus, we will not attempt to commute 2708 // other user instructions. 2709 if (!AllUsesAcceptSReg) 2710 break; 2711 } 2712 } 2713 return !AllUsesAcceptSReg && (Limit < 10); 2714 } 2715 2716 bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode * N) const { 2717 auto Ld = cast<LoadSDNode>(N); 2718 2719 return Ld->getAlignment() >= 4 && 2720 ( 2721 ( 2722 ( 2723 Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 2724 Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT 2725 ) 2726 && 2727 !N->isDivergent() 2728 ) 2729 || 2730 ( 2731 Subtarget->getScalarizeGlobalBehavior() && 2732 Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && 2733 !Ld->isVolatile() && 2734 !N->isDivergent() && 2735 static_cast<const SITargetLowering *>( 2736 getTargetLowering())->isMemOpHasNoClobberedMemOperand(N) 2737 ) 2738 ); 2739 } 2740 2741 void AMDGPUDAGToDAGISel::PostprocessISelDAG() { 2742 const AMDGPUTargetLowering& Lowering = 2743 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering()); 2744 bool IsModified = false; 2745 do { 2746 IsModified = false; 2747 2748 // Go over all selected nodes and try to fold them a bit more 2749 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_begin(); 2750 while (Position != CurDAG->allnodes_end()) { 2751 SDNode *Node = &*Position++; 2752 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(Node); 2753 if (!MachineNode) 2754 continue; 2755 2756 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG); 2757 if (ResNode != Node) { 2758 if (ResNode) 2759 ReplaceUses(Node, ResNode); 2760 IsModified = true; 2761 } 2762 } 2763 CurDAG->RemoveDeadNodes(); 2764 } while (IsModified); 2765 } 2766 2767 bool R600DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { 2768 Subtarget = &MF.getSubtarget<R600Subtarget>(); 2769 return SelectionDAGISel::runOnMachineFunction(MF); 2770 } 2771 2772 bool R600DAGToDAGISel::isConstantLoad(const MemSDNode *N, int CbId) const { 2773 if (!N->readMem()) 2774 return false; 2775 if (CbId == -1) 2776 return N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 2777 N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT; 2778 2779 return N->getAddressSpace() == AMDGPUAS::CONSTANT_BUFFER_0 + CbId; 2780 } 2781 2782 bool R600DAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr, 2783 SDValue& IntPtr) { 2784 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) { 2785 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr), 2786 true); 2787 return true; 2788 } 2789 return false; 2790 } 2791 2792 bool R600DAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr, 2793 SDValue& BaseReg, SDValue &Offset) { 2794 if (!isa<ConstantSDNode>(Addr)) { 2795 BaseReg = Addr; 2796 Offset = CurDAG->getIntPtrConstant(0, SDLoc(Addr), true); 2797 return true; 2798 } 2799 return false; 2800 } 2801 2802 void R600DAGToDAGISel::Select(SDNode *N) { 2803 unsigned int Opc = N->getOpcode(); 2804 if (N->isMachineOpcode()) { 2805 N->setNodeId(-1); 2806 return; // Already selected. 2807 } 2808 2809 switch (Opc) { 2810 default: break; 2811 case AMDGPUISD::BUILD_VERTICAL_VECTOR: 2812 case ISD::SCALAR_TO_VECTOR: 2813 case ISD::BUILD_VECTOR: { 2814 EVT VT = N->getValueType(0); 2815 unsigned NumVectorElts = VT.getVectorNumElements(); 2816 unsigned RegClassID; 2817 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG 2818 // that adds a 128 bits reg copy when going through TwoAddressInstructions 2819 // pass. We want to avoid 128 bits copies as much as possible because they 2820 // can't be bundled by our scheduler. 2821 switch(NumVectorElts) { 2822 case 2: RegClassID = R600::R600_Reg64RegClassID; break; 2823 case 4: 2824 if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR) 2825 RegClassID = R600::R600_Reg128VerticalRegClassID; 2826 else 2827 RegClassID = R600::R600_Reg128RegClassID; 2828 break; 2829 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR"); 2830 } 2831 SelectBuildVector(N, RegClassID); 2832 return; 2833 } 2834 } 2835 2836 SelectCode(N); 2837 } 2838 2839 bool R600DAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base, 2840 SDValue &Offset) { 2841 ConstantSDNode *C; 2842 SDLoc DL(Addr); 2843 2844 if ((C = dyn_cast<ConstantSDNode>(Addr))) { 2845 Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32); 2846 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); 2847 } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) && 2848 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) { 2849 Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32); 2850 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); 2851 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) && 2852 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) { 2853 Base = Addr.getOperand(0); 2854 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); 2855 } else { 2856 Base = Addr; 2857 Offset = CurDAG->getTargetConstant(0, DL, MVT::i32); 2858 } 2859 2860 return true; 2861 } 2862 2863 bool R600DAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base, 2864 SDValue &Offset) { 2865 ConstantSDNode *IMMOffset; 2866 2867 if (Addr.getOpcode() == ISD::ADD 2868 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) 2869 && isInt<16>(IMMOffset->getZExtValue())) { 2870 2871 Base = Addr.getOperand(0); 2872 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr), 2873 MVT::i32); 2874 return true; 2875 // If the pointer address is constant, we can move it to the offset field. 2876 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr)) 2877 && isInt<16>(IMMOffset->getZExtValue())) { 2878 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), 2879 SDLoc(CurDAG->getEntryNode()), 2880 R600::ZERO, MVT::i32); 2881 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr), 2882 MVT::i32); 2883 return true; 2884 } 2885 2886 // Default case, no offset 2887 Base = Addr; 2888 Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32); 2889 return true; 2890 } 2891