1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief Custom DAG lowering for SI 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifdef _MSC_VER 16 // Provide M_PI. 17 #define _USE_MATH_DEFINES 18 #include <cmath> 19 #endif 20 21 #include "AMDGPU.h" 22 #include "AMDGPUIntrinsicInfo.h" 23 #include "AMDGPUSubtarget.h" 24 #include "SIISelLowering.h" 25 #include "SIInstrInfo.h" 26 #include "SIMachineFunctionInfo.h" 27 #include "SIRegisterInfo.h" 28 #include "llvm/ADT/BitVector.h" 29 #include "llvm/ADT/StringSwitch.h" 30 #include "llvm/CodeGen/CallingConvLower.h" 31 #include "llvm/CodeGen/MachineInstrBuilder.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/CodeGen/SelectionDAG.h" 34 #include "llvm/CodeGen/Analysis.h" 35 #include "llvm/IR/DiagnosticInfo.h" 36 #include "llvm/IR/Function.h" 37 38 using namespace llvm; 39 40 static cl::opt<bool> EnableVGPRIndexMode( 41 "amdgpu-vgpr-index-mode", 42 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), 43 cl::init(false)); 44 45 46 static unsigned findFirstFreeSGPR(CCState &CCInfo) { 47 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); 48 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { 49 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { 50 return AMDGPU::SGPR0 + Reg; 51 } 52 } 53 llvm_unreachable("Cannot allocate sgpr"); 54 } 55 56 SITargetLowering::SITargetLowering(const TargetMachine &TM, 57 const SISubtarget &STI) 58 : AMDGPUTargetLowering(TM, STI) { 59 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); 60 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); 61 62 addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass); 63 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); 64 65 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); 66 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); 67 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); 68 69 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); 70 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); 71 72 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); 73 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); 74 75 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); 76 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); 77 78 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); 79 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); 80 81 computeRegisterProperties(STI.getRegisterInfo()); 82 83 // We need to custom lower vector stores from local memory 84 setOperationAction(ISD::LOAD, MVT::v2i32, Custom); 85 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 86 setOperationAction(ISD::LOAD, MVT::v8i32, Custom); 87 setOperationAction(ISD::LOAD, MVT::v16i32, Custom); 88 setOperationAction(ISD::LOAD, MVT::i1, Custom); 89 90 setOperationAction(ISD::STORE, MVT::v2i32, Custom); 91 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 92 setOperationAction(ISD::STORE, MVT::v8i32, Custom); 93 setOperationAction(ISD::STORE, MVT::v16i32, Custom); 94 setOperationAction(ISD::STORE, MVT::i1, Custom); 95 96 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 97 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 98 setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand); 99 100 setOperationAction(ISD::SELECT, MVT::i1, Promote); 101 setOperationAction(ISD::SELECT, MVT::i64, Custom); 102 setOperationAction(ISD::SELECT, MVT::f64, Promote); 103 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); 104 105 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 106 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 107 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 108 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 109 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); 110 111 setOperationAction(ISD::SETCC, MVT::i1, Promote); 112 setOperationAction(ISD::SETCC, MVT::v2i1, Expand); 113 setOperationAction(ISD::SETCC, MVT::v4i1, Expand); 114 115 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); 116 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 117 118 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); 119 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); 120 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 121 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); 122 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 123 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); 124 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); 125 126 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); 127 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); 128 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 129 130 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 131 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 132 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 133 setOperationAction(ISD::BR_CC, MVT::i64, Expand); 134 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 135 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 136 137 // We only support LOAD/STORE and vector manipulation ops for vectors 138 // with > 4 elements. 139 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64}) { 140 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 141 switch (Op) { 142 case ISD::LOAD: 143 case ISD::STORE: 144 case ISD::BUILD_VECTOR: 145 case ISD::BITCAST: 146 case ISD::EXTRACT_VECTOR_ELT: 147 case ISD::INSERT_VECTOR_ELT: 148 case ISD::INSERT_SUBVECTOR: 149 case ISD::EXTRACT_SUBVECTOR: 150 case ISD::SCALAR_TO_VECTOR: 151 break; 152 case ISD::CONCAT_VECTORS: 153 setOperationAction(Op, VT, Custom); 154 break; 155 default: 156 setOperationAction(Op, VT, Expand); 157 break; 158 } 159 } 160 } 161 162 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that 163 // is expanded to avoid having two separate loops in case the index is a VGPR. 164 165 // Most operations are naturally 32-bit vector operations. We only support 166 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. 167 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { 168 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 169 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); 170 171 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 172 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); 173 174 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 175 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); 176 177 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 178 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); 179 } 180 181 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); 182 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); 183 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); 184 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); 185 186 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, 187 // and output demarshalling 188 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 189 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 190 191 // We can't return success/failure, only the old value, 192 // let LLVM add the comparison 193 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); 194 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); 195 196 if (getSubtarget()->hasFlatAddressSpace()) { 197 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); 198 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); 199 } 200 201 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 202 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 203 204 // On SI this is s_memtime and s_memrealtime on VI. 205 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 206 setOperationAction(ISD::TRAP, MVT::Other, Custom); 207 208 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 209 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 210 211 if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) { 212 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 213 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 214 setOperationAction(ISD::FRINT, MVT::f64, Legal); 215 } 216 217 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 218 219 setOperationAction(ISD::FSIN, MVT::f32, Custom); 220 setOperationAction(ISD::FCOS, MVT::f32, Custom); 221 setOperationAction(ISD::FDIV, MVT::f32, Custom); 222 setOperationAction(ISD::FDIV, MVT::f64, Custom); 223 224 setTargetDAGCombine(ISD::FADD); 225 setTargetDAGCombine(ISD::FSUB); 226 setTargetDAGCombine(ISD::FMINNUM); 227 setTargetDAGCombine(ISD::FMAXNUM); 228 setTargetDAGCombine(ISD::SMIN); 229 setTargetDAGCombine(ISD::SMAX); 230 setTargetDAGCombine(ISD::UMIN); 231 setTargetDAGCombine(ISD::UMAX); 232 setTargetDAGCombine(ISD::SETCC); 233 setTargetDAGCombine(ISD::AND); 234 setTargetDAGCombine(ISD::OR); 235 setTargetDAGCombine(ISD::XOR); 236 setTargetDAGCombine(ISD::UINT_TO_FP); 237 setTargetDAGCombine(ISD::FCANONICALIZE); 238 239 // All memory operations. Some folding on the pointer operand is done to help 240 // matching the constant offsets in the addressing modes. 241 setTargetDAGCombine(ISD::LOAD); 242 setTargetDAGCombine(ISD::STORE); 243 setTargetDAGCombine(ISD::ATOMIC_LOAD); 244 setTargetDAGCombine(ISD::ATOMIC_STORE); 245 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); 246 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 247 setTargetDAGCombine(ISD::ATOMIC_SWAP); 248 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); 249 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); 250 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); 251 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); 252 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); 253 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); 254 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); 255 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); 256 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); 257 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); 258 259 setSchedulingPreference(Sched::RegPressure); 260 } 261 262 const SISubtarget *SITargetLowering::getSubtarget() const { 263 return static_cast<const SISubtarget *>(Subtarget); 264 } 265 266 //===----------------------------------------------------------------------===// 267 // TargetLowering queries 268 //===----------------------------------------------------------------------===// 269 270 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 271 const CallInst &CI, 272 unsigned IntrID) const { 273 switch (IntrID) { 274 case Intrinsic::amdgcn_atomic_inc: 275 case Intrinsic::amdgcn_atomic_dec: 276 Info.opc = ISD::INTRINSIC_W_CHAIN; 277 Info.memVT = MVT::getVT(CI.getType()); 278 Info.ptrVal = CI.getOperand(0); 279 Info.align = 0; 280 Info.vol = false; 281 Info.readMem = true; 282 Info.writeMem = true; 283 return true; 284 default: 285 return false; 286 } 287 } 288 289 bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &, 290 EVT) const { 291 // SI has some legal vector types, but no legal vector operations. Say no 292 // shuffles are legal in order to prefer scalarizing some vector operations. 293 return false; 294 } 295 296 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { 297 // Flat instructions do not have offsets, and only have the register 298 // address. 299 return AM.BaseOffs == 0 && (AM.Scale == 0 || AM.Scale == 1); 300 } 301 302 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { 303 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and 304 // additionally can do r + r + i with addr64. 32-bit has more addressing 305 // mode options. Depending on the resource constant, it can also do 306 // (i64 r0) + (i32 r1) * (i14 i). 307 // 308 // Private arrays end up using a scratch buffer most of the time, so also 309 // assume those use MUBUF instructions. Scratch loads / stores are currently 310 // implemented as mubuf instructions with offen bit set, so slightly 311 // different than the normal addr64. 312 if (!isUInt<12>(AM.BaseOffs)) 313 return false; 314 315 // FIXME: Since we can split immediate into soffset and immediate offset, 316 // would it make sense to allow any immediate? 317 318 switch (AM.Scale) { 319 case 0: // r + i or just i, depending on HasBaseReg. 320 return true; 321 case 1: 322 return true; // We have r + r or r + i. 323 case 2: 324 if (AM.HasBaseReg) { 325 // Reject 2 * r + r. 326 return false; 327 } 328 329 // Allow 2 * r as r + r 330 // Or 2 * r + i is allowed as r + r + i. 331 return true; 332 default: // Don't allow n * r 333 return false; 334 } 335 } 336 337 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, 338 const AddrMode &AM, Type *Ty, 339 unsigned AS) const { 340 // No global is ever allowed as a base. 341 if (AM.BaseGV) 342 return false; 343 344 switch (AS) { 345 case AMDGPUAS::GLOBAL_ADDRESS: { 346 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 347 // Assume the we will use FLAT for all global memory accesses 348 // on VI. 349 // FIXME: This assumption is currently wrong. On VI we still use 350 // MUBUF instructions for the r + i addressing mode. As currently 351 // implemented, the MUBUF instructions only work on buffer < 4GB. 352 // It may be possible to support > 4GB buffers with MUBUF instructions, 353 // by setting the stride value in the resource descriptor which would 354 // increase the size limit to (stride * 4GB). However, this is risky, 355 // because it has never been validated. 356 return isLegalFlatAddressingMode(AM); 357 } 358 359 return isLegalMUBUFAddressingMode(AM); 360 } 361 case AMDGPUAS::CONSTANT_ADDRESS: { 362 // If the offset isn't a multiple of 4, it probably isn't going to be 363 // correctly aligned. 364 // FIXME: Can we get the real alignment here? 365 if (AM.BaseOffs % 4 != 0) 366 return isLegalMUBUFAddressingMode(AM); 367 368 // There are no SMRD extloads, so if we have to do a small type access we 369 // will use a MUBUF load. 370 // FIXME?: We also need to do this if unaligned, but we don't know the 371 // alignment here. 372 if (DL.getTypeStoreSize(Ty) < 4) 373 return isLegalMUBUFAddressingMode(AM); 374 375 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 376 // SMRD instructions have an 8-bit, dword offset on SI. 377 if (!isUInt<8>(AM.BaseOffs / 4)) 378 return false; 379 } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) { 380 // On CI+, this can also be a 32-bit literal constant offset. If it fits 381 // in 8-bits, it can use a smaller encoding. 382 if (!isUInt<32>(AM.BaseOffs / 4)) 383 return false; 384 } else if (Subtarget->getGeneration() == SISubtarget::VOLCANIC_ISLANDS) { 385 // On VI, these use the SMEM format and the offset is 20-bit in bytes. 386 if (!isUInt<20>(AM.BaseOffs)) 387 return false; 388 } else 389 llvm_unreachable("unhandled generation"); 390 391 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 392 return true; 393 394 if (AM.Scale == 1 && AM.HasBaseReg) 395 return true; 396 397 return false; 398 } 399 400 case AMDGPUAS::PRIVATE_ADDRESS: 401 return isLegalMUBUFAddressingMode(AM); 402 403 case AMDGPUAS::LOCAL_ADDRESS: 404 case AMDGPUAS::REGION_ADDRESS: { 405 // Basic, single offset DS instructions allow a 16-bit unsigned immediate 406 // field. 407 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have 408 // an 8-bit dword offset but we don't know the alignment here. 409 if (!isUInt<16>(AM.BaseOffs)) 410 return false; 411 412 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 413 return true; 414 415 if (AM.Scale == 1 && AM.HasBaseReg) 416 return true; 417 418 return false; 419 } 420 case AMDGPUAS::FLAT_ADDRESS: 421 case AMDGPUAS::UNKNOWN_ADDRESS_SPACE: 422 // For an unknown address space, this usually means that this is for some 423 // reason being used for pure arithmetic, and not based on some addressing 424 // computation. We don't have instructions that compute pointers with any 425 // addressing modes, so treat them as having no offset like flat 426 // instructions. 427 return isLegalFlatAddressingMode(AM); 428 429 default: 430 llvm_unreachable("unhandled address space"); 431 } 432 } 433 434 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 435 unsigned AddrSpace, 436 unsigned Align, 437 bool *IsFast) const { 438 if (IsFast) 439 *IsFast = false; 440 441 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, 442 // which isn't a simple VT. 443 // Until MVT is extended to handle this, simply check for the size and 444 // rely on the condition below: allow accesses if the size is a multiple of 4. 445 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && 446 VT.getStoreSize() > 16)) { 447 return false; 448 } 449 450 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || 451 AddrSpace == AMDGPUAS::REGION_ADDRESS) { 452 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte 453 // aligned, 8 byte access in a single operation using ds_read2/write2_b32 454 // with adjacent offsets. 455 bool AlignedBy4 = (Align % 4 == 0); 456 if (IsFast) 457 *IsFast = AlignedBy4; 458 459 return AlignedBy4; 460 } 461 462 // FIXME: We have to be conservative here and assume that flat operations 463 // will access scratch. If we had access to the IR function, then we 464 // could determine if any private memory was used in the function. 465 if (!Subtarget->hasUnalignedScratchAccess() && 466 (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS || 467 AddrSpace == AMDGPUAS::FLAT_ADDRESS)) { 468 return false; 469 } 470 471 if (Subtarget->hasUnalignedBufferAccess()) { 472 // If we have an uniform constant load, it still requires using a slow 473 // buffer instruction if unaligned. 474 if (IsFast) { 475 *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS) ? 476 (Align % 4 == 0) : true; 477 } 478 479 return true; 480 } 481 482 // Smaller than dword value must be aligned. 483 if (VT.bitsLT(MVT::i32)) 484 return false; 485 486 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the 487 // byte-address are ignored, thus forcing Dword alignment. 488 // This applies to private, global, and constant memory. 489 if (IsFast) 490 *IsFast = true; 491 492 return VT.bitsGT(MVT::i32) && Align % 4 == 0; 493 } 494 495 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, 496 unsigned SrcAlign, bool IsMemset, 497 bool ZeroMemset, 498 bool MemcpyStrSrc, 499 MachineFunction &MF) const { 500 // FIXME: Should account for address space here. 501 502 // The default fallback uses the private pointer size as a guess for a type to 503 // use. Make sure we switch these to 64-bit accesses. 504 505 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global 506 return MVT::v4i32; 507 508 if (Size >= 8 && DstAlign >= 4) 509 return MVT::v2i32; 510 511 // Use the default. 512 return MVT::Other; 513 } 514 515 static bool isFlatGlobalAddrSpace(unsigned AS) { 516 return AS == AMDGPUAS::GLOBAL_ADDRESS || 517 AS == AMDGPUAS::FLAT_ADDRESS || 518 AS == AMDGPUAS::CONSTANT_ADDRESS; 519 } 520 521 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, 522 unsigned DestAS) const { 523 return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS); 524 } 525 526 bool SITargetLowering::isMemOpUniform(const SDNode *N) const { 527 const MemSDNode *MemNode = cast<MemSDNode>(N); 528 const Value *Ptr = MemNode->getMemOperand()->getValue(); 529 530 // UndefValue means this is a load of a kernel input. These are uniform. 531 // Sometimes LDS instructions have constant pointers. 532 // If Ptr is null, then that means this mem operand contains a 533 // PseudoSourceValue like GOT. 534 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || 535 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) 536 return true; 537 538 const Instruction *I = dyn_cast<Instruction>(Ptr); 539 return I && I->getMetadata("amdgpu.uniform"); 540 } 541 542 TargetLoweringBase::LegalizeTypeAction 543 SITargetLowering::getPreferredVectorAction(EVT VT) const { 544 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) 545 return TypeSplitVector; 546 547 return TargetLoweringBase::getPreferredVectorAction(VT); 548 } 549 550 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 551 Type *Ty) const { 552 // FIXME: Could be smarter if called for vector constants. 553 return true; 554 } 555 556 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { 557 558 // i16 is not desirable unless it is a load or a store. 559 if (VT == MVT::i16 && Op != ISD::LOAD && Op != ISD::STORE) 560 return false; 561 562 // SimplifySetCC uses this function to determine whether or not it should 563 // create setcc with i1 operands. We don't have instructions for i1 setcc. 564 if (VT == MVT::i1 && Op == ISD::SETCC) 565 return false; 566 567 return TargetLowering::isTypeDesirableForOp(Op, VT); 568 } 569 570 SDValue SITargetLowering::LowerParameterPtr(SelectionDAG &DAG, 571 const SDLoc &SL, SDValue Chain, 572 unsigned Offset) const { 573 const DataLayout &DL = DAG.getDataLayout(); 574 MachineFunction &MF = DAG.getMachineFunction(); 575 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 576 unsigned InputPtrReg = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 577 578 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 579 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); 580 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, 581 MRI.getLiveInVirtReg(InputPtrReg), PtrVT); 582 return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, 583 DAG.getConstant(Offset, SL, PtrVT)); 584 } 585 SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, 586 const SDLoc &SL, SDValue Chain, 587 unsigned Offset, bool Signed) const { 588 const DataLayout &DL = DAG.getDataLayout(); 589 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 590 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); 591 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 592 SDValue PtrOffset = DAG.getUNDEF(PtrVT); 593 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 594 595 unsigned Align = DL.getABITypeAlignment(Ty); 596 597 ISD::LoadExtType ExtTy = Signed ? ISD::SEXTLOAD : ISD::ZEXTLOAD; 598 if (MemVT.isFloatingPoint()) 599 ExtTy = ISD::EXTLOAD; 600 601 SDValue Ptr = LowerParameterPtr(DAG, SL, Chain, Offset); 602 return DAG.getLoad(ISD::UNINDEXED, ExtTy, VT, SL, Chain, Ptr, PtrOffset, 603 PtrInfo, MemVT, Align, 604 MachineMemOperand::MONonTemporal | 605 MachineMemOperand::MODereferenceable | 606 MachineMemOperand::MOInvariant); 607 } 608 609 SDValue SITargetLowering::LowerFormalArguments( 610 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 611 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 612 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 613 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 614 615 MachineFunction &MF = DAG.getMachineFunction(); 616 FunctionType *FType = MF.getFunction()->getFunctionType(); 617 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 618 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 619 620 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { 621 const Function *Fn = MF.getFunction(); 622 DiagnosticInfoUnsupported NoGraphicsHSA( 623 *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); 624 DAG.getContext()->diagnose(NoGraphicsHSA); 625 return DAG.getEntryNode(); 626 } 627 628 // Create stack objects that are used for emitting debugger prologue if 629 // "amdgpu-debugger-emit-prologue" attribute was specified. 630 if (ST.debuggerEmitPrologue()) 631 createDebuggerPrologueStackObjects(MF); 632 633 SmallVector<ISD::InputArg, 16> Splits; 634 BitVector Skipped(Ins.size()); 635 636 for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) { 637 const ISD::InputArg &Arg = Ins[i]; 638 639 // First check if it's a PS input addr 640 if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() && 641 !Arg.Flags.isByVal() && PSInputNum <= 15) { 642 643 if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) { 644 // We can safely skip PS inputs 645 Skipped.set(i); 646 ++PSInputNum; 647 continue; 648 } 649 650 Info->markPSInputAllocated(PSInputNum); 651 if (Arg.Used) 652 Info->PSInputEna |= 1 << PSInputNum; 653 654 ++PSInputNum; 655 } 656 657 if (AMDGPU::isShader(CallConv)) { 658 // Second split vertices into their elements 659 if (Arg.VT.isVector()) { 660 ISD::InputArg NewArg = Arg; 661 NewArg.Flags.setSplit(); 662 NewArg.VT = Arg.VT.getVectorElementType(); 663 664 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a 665 // three or five element vertex only needs three or five registers, 666 // NOT four or eight. 667 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 668 unsigned NumElements = ParamType->getVectorNumElements(); 669 670 for (unsigned j = 0; j != NumElements; ++j) { 671 Splits.push_back(NewArg); 672 NewArg.PartOffset += NewArg.VT.getStoreSize(); 673 } 674 } else { 675 Splits.push_back(Arg); 676 } 677 } 678 } 679 680 SmallVector<CCValAssign, 16> ArgLocs; 681 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 682 *DAG.getContext()); 683 684 // At least one interpolation mode must be enabled or else the GPU will hang. 685 // 686 // Check PSInputAddr instead of PSInputEna. The idea is that if the user set 687 // PSInputAddr, the user wants to enable some bits after the compilation 688 // based on run-time states. Since we can't know what the final PSInputEna 689 // will look like, so we shouldn't do anything here and the user should take 690 // responsibility for the correct programming. 691 // 692 // Otherwise, the following restrictions apply: 693 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 694 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 695 // enabled too. 696 if (CallConv == CallingConv::AMDGPU_PS && 697 ((Info->getPSInputAddr() & 0x7F) == 0 || 698 ((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11)))) { 699 CCInfo.AllocateReg(AMDGPU::VGPR0); 700 CCInfo.AllocateReg(AMDGPU::VGPR1); 701 Info->markPSInputAllocated(0); 702 Info->PSInputEna |= 1; 703 } 704 705 if (!AMDGPU::isShader(CallConv)) { 706 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); 707 } else { 708 assert(!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr() && 709 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && 710 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && 711 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && 712 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && 713 !Info->hasWorkItemIDZ()); 714 } 715 716 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 717 if (Info->hasPrivateSegmentBuffer()) { 718 unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI); 719 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass); 720 CCInfo.AllocateReg(PrivateSegmentBufferReg); 721 } 722 723 if (Info->hasDispatchPtr()) { 724 unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI); 725 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SReg_64RegClass); 726 CCInfo.AllocateReg(DispatchPtrReg); 727 } 728 729 if (Info->hasQueuePtr()) { 730 unsigned QueuePtrReg = Info->addQueuePtr(*TRI); 731 MF.addLiveIn(QueuePtrReg, &AMDGPU::SReg_64RegClass); 732 CCInfo.AllocateReg(QueuePtrReg); 733 } 734 735 if (Info->hasKernargSegmentPtr()) { 736 unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI); 737 MF.addLiveIn(InputPtrReg, &AMDGPU::SReg_64RegClass); 738 CCInfo.AllocateReg(InputPtrReg); 739 } 740 741 if (Info->hasDispatchID()) { 742 unsigned DispatchIDReg = Info->addDispatchID(*TRI); 743 MF.addLiveIn(DispatchIDReg, &AMDGPU::SReg_64RegClass); 744 CCInfo.AllocateReg(DispatchIDReg); 745 } 746 747 if (Info->hasFlatScratchInit()) { 748 unsigned FlatScratchInitReg = Info->addFlatScratchInit(*TRI); 749 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SReg_64RegClass); 750 CCInfo.AllocateReg(FlatScratchInitReg); 751 } 752 753 if (!AMDGPU::isShader(CallConv)) 754 analyzeFormalArgumentsCompute(CCInfo, Ins); 755 else 756 AnalyzeFormalArguments(CCInfo, Splits); 757 758 SmallVector<SDValue, 16> Chains; 759 760 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { 761 762 const ISD::InputArg &Arg = Ins[i]; 763 if (Skipped[i]) { 764 InVals.push_back(DAG.getUNDEF(Arg.VT)); 765 continue; 766 } 767 768 CCValAssign &VA = ArgLocs[ArgIdx++]; 769 MVT VT = VA.getLocVT(); 770 771 if (VA.isMemLoc()) { 772 VT = Ins[i].VT; 773 EVT MemVT = VA.getLocVT(); 774 const unsigned Offset = Subtarget->getExplicitKernelArgOffset() + 775 VA.getLocMemOffset(); 776 // The first 36 bytes of the input buffer contains information about 777 // thread group and global sizes. 778 SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, Chain, 779 Offset, Ins[i].Flags.isSExt()); 780 Chains.push_back(Arg.getValue(1)); 781 782 auto *ParamTy = 783 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); 784 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 785 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 786 // On SI local pointers are just offsets into LDS, so they are always 787 // less than 16-bits. On CI and newer they could potentially be 788 // real pointers, so we can't guarantee their size. 789 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, 790 DAG.getValueType(MVT::i16)); 791 } 792 793 InVals.push_back(Arg); 794 Info->setABIArgOffset(Offset + MemVT.getStoreSize()); 795 continue; 796 } 797 assert(VA.isRegLoc() && "Parameter must be in a register!"); 798 799 unsigned Reg = VA.getLocReg(); 800 801 if (VT == MVT::i64) { 802 // For now assume it is a pointer 803 Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, 804 &AMDGPU::SReg_64RegClass); 805 Reg = MF.addLiveIn(Reg, &AMDGPU::SReg_64RegClass); 806 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 807 InVals.push_back(Copy); 808 continue; 809 } 810 811 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 812 813 Reg = MF.addLiveIn(Reg, RC); 814 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); 815 816 if (Arg.VT.isVector()) { 817 818 // Build a vector from the registers 819 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 820 unsigned NumElements = ParamType->getVectorNumElements(); 821 822 SmallVector<SDValue, 4> Regs; 823 Regs.push_back(Val); 824 for (unsigned j = 1; j != NumElements; ++j) { 825 Reg = ArgLocs[ArgIdx++].getLocReg(); 826 Reg = MF.addLiveIn(Reg, RC); 827 828 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 829 Regs.push_back(Copy); 830 } 831 832 // Fill up the missing vector elements 833 NumElements = Arg.VT.getVectorNumElements() - NumElements; 834 Regs.append(NumElements, DAG.getUNDEF(VT)); 835 836 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs)); 837 continue; 838 } 839 840 InVals.push_back(Val); 841 } 842 843 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 844 // these from the dispatch pointer. 845 846 // Start adding system SGPRs. 847 if (Info->hasWorkGroupIDX()) { 848 unsigned Reg = Info->addWorkGroupIDX(); 849 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 850 CCInfo.AllocateReg(Reg); 851 } 852 853 if (Info->hasWorkGroupIDY()) { 854 unsigned Reg = Info->addWorkGroupIDY(); 855 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 856 CCInfo.AllocateReg(Reg); 857 } 858 859 if (Info->hasWorkGroupIDZ()) { 860 unsigned Reg = Info->addWorkGroupIDZ(); 861 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 862 CCInfo.AllocateReg(Reg); 863 } 864 865 if (Info->hasWorkGroupInfo()) { 866 unsigned Reg = Info->addWorkGroupInfo(); 867 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 868 CCInfo.AllocateReg(Reg); 869 } 870 871 if (Info->hasPrivateSegmentWaveByteOffset()) { 872 // Scratch wave offset passed in system SGPR. 873 unsigned PrivateSegmentWaveByteOffsetReg; 874 875 if (AMDGPU::isShader(CallConv)) { 876 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); 877 Info->setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); 878 } else 879 PrivateSegmentWaveByteOffsetReg = Info->addPrivateSegmentWaveByteOffset(); 880 881 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); 882 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); 883 } 884 885 // Now that we've figured out where the scratch register inputs are, see if 886 // should reserve the arguments and use them directly. 887 bool HasStackObjects = MF.getFrameInfo().hasStackObjects(); 888 // Record that we know we have non-spill stack objects so we don't need to 889 // check all stack objects later. 890 if (HasStackObjects) 891 Info->setHasNonSpillStackObjects(true); 892 893 // Everything live out of a block is spilled with fast regalloc, so it's 894 // almost certain that spilling will be required. 895 if (getTargetMachine().getOptLevel() == CodeGenOpt::None) 896 HasStackObjects = true; 897 898 if (ST.isAmdCodeObjectV2()) { 899 if (HasStackObjects) { 900 // If we have stack objects, we unquestionably need the private buffer 901 // resource. For the Code Object V2 ABI, this will be the first 4 user 902 // SGPR inputs. We can reserve those and use them directly. 903 904 unsigned PrivateSegmentBufferReg = TRI->getPreloadedValue( 905 MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); 906 Info->setScratchRSrcReg(PrivateSegmentBufferReg); 907 908 unsigned PrivateSegmentWaveByteOffsetReg = TRI->getPreloadedValue( 909 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 910 Info->setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg); 911 } else { 912 unsigned ReservedBufferReg 913 = TRI->reservedPrivateSegmentBufferReg(MF); 914 unsigned ReservedOffsetReg 915 = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 916 917 // We tentatively reserve the last registers (skipping the last two 918 // which may contain VCC). After register allocation, we'll replace 919 // these with the ones immediately after those which were really 920 // allocated. In the prologue copies will be inserted from the argument 921 // to these reserved registers. 922 Info->setScratchRSrcReg(ReservedBufferReg); 923 Info->setScratchWaveOffsetReg(ReservedOffsetReg); 924 } 925 } else { 926 unsigned ReservedBufferReg = TRI->reservedPrivateSegmentBufferReg(MF); 927 928 // Without HSA, relocations are used for the scratch pointer and the 929 // buffer resource setup is always inserted in the prologue. Scratch wave 930 // offset is still in an input SGPR. 931 Info->setScratchRSrcReg(ReservedBufferReg); 932 933 if (HasStackObjects) { 934 unsigned ScratchWaveOffsetReg = TRI->getPreloadedValue( 935 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 936 Info->setScratchWaveOffsetReg(ScratchWaveOffsetReg); 937 } else { 938 unsigned ReservedOffsetReg 939 = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 940 Info->setScratchWaveOffsetReg(ReservedOffsetReg); 941 } 942 } 943 944 if (Info->hasWorkItemIDX()) { 945 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X); 946 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 947 CCInfo.AllocateReg(Reg); 948 } 949 950 if (Info->hasWorkItemIDY()) { 951 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y); 952 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 953 CCInfo.AllocateReg(Reg); 954 } 955 956 if (Info->hasWorkItemIDZ()) { 957 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z); 958 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 959 CCInfo.AllocateReg(Reg); 960 } 961 962 if (Chains.empty()) 963 return Chain; 964 965 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 966 } 967 968 SDValue 969 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 970 bool isVarArg, 971 const SmallVectorImpl<ISD::OutputArg> &Outs, 972 const SmallVectorImpl<SDValue> &OutVals, 973 const SDLoc &DL, SelectionDAG &DAG) const { 974 MachineFunction &MF = DAG.getMachineFunction(); 975 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 976 977 if (!AMDGPU::isShader(CallConv)) 978 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, 979 OutVals, DL, DAG); 980 981 Info->setIfReturnsVoid(Outs.size() == 0); 982 983 SmallVector<ISD::OutputArg, 48> Splits; 984 SmallVector<SDValue, 48> SplitVals; 985 986 // Split vectors into their elements. 987 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 988 const ISD::OutputArg &Out = Outs[i]; 989 990 if (Out.VT.isVector()) { 991 MVT VT = Out.VT.getVectorElementType(); 992 ISD::OutputArg NewOut = Out; 993 NewOut.Flags.setSplit(); 994 NewOut.VT = VT; 995 996 // We want the original number of vector elements here, e.g. 997 // three or five, not four or eight. 998 unsigned NumElements = Out.ArgVT.getVectorNumElements(); 999 1000 for (unsigned j = 0; j != NumElements; ++j) { 1001 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i], 1002 DAG.getConstant(j, DL, MVT::i32)); 1003 SplitVals.push_back(Elem); 1004 Splits.push_back(NewOut); 1005 NewOut.PartOffset += NewOut.VT.getStoreSize(); 1006 } 1007 } else { 1008 SplitVals.push_back(OutVals[i]); 1009 Splits.push_back(Out); 1010 } 1011 } 1012 1013 // CCValAssign - represent the assignment of the return value to a location. 1014 SmallVector<CCValAssign, 48> RVLocs; 1015 1016 // CCState - Info about the registers and stack slots. 1017 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1018 *DAG.getContext()); 1019 1020 // Analyze outgoing return values. 1021 AnalyzeReturn(CCInfo, Splits); 1022 1023 SDValue Flag; 1024 SmallVector<SDValue, 48> RetOps; 1025 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1026 1027 // Copy the result values into the output registers. 1028 for (unsigned i = 0, realRVLocIdx = 0; 1029 i != RVLocs.size(); 1030 ++i, ++realRVLocIdx) { 1031 CCValAssign &VA = RVLocs[i]; 1032 assert(VA.isRegLoc() && "Can only return in registers!"); 1033 1034 SDValue Arg = SplitVals[realRVLocIdx]; 1035 1036 // Copied from other backends. 1037 switch (VA.getLocInfo()) { 1038 default: llvm_unreachable("Unknown loc info!"); 1039 case CCValAssign::Full: 1040 break; 1041 case CCValAssign::BCvt: 1042 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 1043 break; 1044 } 1045 1046 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 1047 Flag = Chain.getValue(1); 1048 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1049 } 1050 1051 // Update chain and glue. 1052 RetOps[0] = Chain; 1053 if (Flag.getNode()) 1054 RetOps.push_back(Flag); 1055 1056 unsigned Opc = Info->returnsVoid() ? AMDGPUISD::ENDPGM : AMDGPUISD::RETURN; 1057 return DAG.getNode(Opc, DL, MVT::Other, RetOps); 1058 } 1059 1060 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT, 1061 SelectionDAG &DAG) const { 1062 unsigned Reg = StringSwitch<unsigned>(RegName) 1063 .Case("m0", AMDGPU::M0) 1064 .Case("exec", AMDGPU::EXEC) 1065 .Case("exec_lo", AMDGPU::EXEC_LO) 1066 .Case("exec_hi", AMDGPU::EXEC_HI) 1067 .Case("flat_scratch", AMDGPU::FLAT_SCR) 1068 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) 1069 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) 1070 .Default(AMDGPU::NoRegister); 1071 1072 if (Reg == AMDGPU::NoRegister) { 1073 report_fatal_error(Twine("invalid register name \"" 1074 + StringRef(RegName) + "\".")); 1075 1076 } 1077 1078 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 1079 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { 1080 report_fatal_error(Twine("invalid register \"" 1081 + StringRef(RegName) + "\" for subtarget.")); 1082 } 1083 1084 switch (Reg) { 1085 case AMDGPU::M0: 1086 case AMDGPU::EXEC_LO: 1087 case AMDGPU::EXEC_HI: 1088 case AMDGPU::FLAT_SCR_LO: 1089 case AMDGPU::FLAT_SCR_HI: 1090 if (VT.getSizeInBits() == 32) 1091 return Reg; 1092 break; 1093 case AMDGPU::EXEC: 1094 case AMDGPU::FLAT_SCR: 1095 if (VT.getSizeInBits() == 64) 1096 return Reg; 1097 break; 1098 default: 1099 llvm_unreachable("missing register type checking"); 1100 } 1101 1102 report_fatal_error(Twine("invalid type for register \"" 1103 + StringRef(RegName) + "\".")); 1104 } 1105 1106 // If kill is not the last instruction, split the block so kill is always a 1107 // proper terminator. 1108 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI, 1109 MachineBasicBlock *BB) const { 1110 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1111 1112 MachineBasicBlock::iterator SplitPoint(&MI); 1113 ++SplitPoint; 1114 1115 if (SplitPoint == BB->end()) { 1116 // Don't bother with a new block. 1117 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR)); 1118 return BB; 1119 } 1120 1121 MachineFunction *MF = BB->getParent(); 1122 MachineBasicBlock *SplitBB 1123 = MF->CreateMachineBasicBlock(BB->getBasicBlock()); 1124 1125 MF->insert(++MachineFunction::iterator(BB), SplitBB); 1126 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end()); 1127 1128 SplitBB->transferSuccessorsAndUpdatePHIs(BB); 1129 BB->addSuccessor(SplitBB); 1130 1131 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR)); 1132 return SplitBB; 1133 } 1134 1135 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the 1136 // wavefront. If the value is uniform and just happens to be in a VGPR, this 1137 // will only do one iteration. In the worst case, this will loop 64 times. 1138 // 1139 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. 1140 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop( 1141 const SIInstrInfo *TII, 1142 MachineRegisterInfo &MRI, 1143 MachineBasicBlock &OrigBB, 1144 MachineBasicBlock &LoopBB, 1145 const DebugLoc &DL, 1146 const MachineOperand &IdxReg, 1147 unsigned InitReg, 1148 unsigned ResultReg, 1149 unsigned PhiReg, 1150 unsigned InitSaveExecReg, 1151 int Offset, 1152 bool UseGPRIdxMode) { 1153 MachineBasicBlock::iterator I = LoopBB.begin(); 1154 1155 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1156 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1157 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1158 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1159 1160 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) 1161 .addReg(InitReg) 1162 .addMBB(&OrigBB) 1163 .addReg(ResultReg) 1164 .addMBB(&LoopBB); 1165 1166 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) 1167 .addReg(InitSaveExecReg) 1168 .addMBB(&OrigBB) 1169 .addReg(NewExec) 1170 .addMBB(&LoopBB); 1171 1172 // Read the next variant <- also loop target. 1173 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) 1174 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef())); 1175 1176 // Compare the just read M0 value to all possible Idx values. 1177 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) 1178 .addReg(CurrentIdxReg) 1179 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg()); 1180 1181 if (UseGPRIdxMode) { 1182 unsigned IdxReg; 1183 if (Offset == 0) { 1184 IdxReg = CurrentIdxReg; 1185 } else { 1186 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1187 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg) 1188 .addReg(CurrentIdxReg, RegState::Kill) 1189 .addImm(Offset); 1190 } 1191 1192 MachineInstr *SetIdx = 1193 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_IDX)) 1194 .addReg(IdxReg, RegState::Kill); 1195 SetIdx->getOperand(2).setIsUndef(); 1196 } else { 1197 // Move index from VCC into M0 1198 if (Offset == 0) { 1199 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1200 .addReg(CurrentIdxReg, RegState::Kill); 1201 } else { 1202 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 1203 .addReg(CurrentIdxReg, RegState::Kill) 1204 .addImm(Offset); 1205 } 1206 } 1207 1208 // Update EXEC, save the original EXEC value to VCC. 1209 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec) 1210 .addReg(CondReg, RegState::Kill); 1211 1212 MRI.setSimpleHint(NewExec, CondReg); 1213 1214 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 1215 MachineInstr *InsertPt = 1216 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 1217 .addReg(AMDGPU::EXEC) 1218 .addReg(NewExec); 1219 1220 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use 1221 // s_cbranch_scc0? 1222 1223 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. 1224 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 1225 .addMBB(&LoopBB); 1226 1227 return InsertPt->getIterator(); 1228 } 1229 1230 // This has slightly sub-optimal regalloc when the source vector is killed by 1231 // the read. The register allocator does not understand that the kill is 1232 // per-workitem, so is kept alive for the whole loop so we end up not re-using a 1233 // subregister from it, using 1 more VGPR than necessary. This was saved when 1234 // this was expanded after register allocation. 1235 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII, 1236 MachineBasicBlock &MBB, 1237 MachineInstr &MI, 1238 unsigned InitResultReg, 1239 unsigned PhiReg, 1240 int Offset, 1241 bool UseGPRIdxMode) { 1242 MachineFunction *MF = MBB.getParent(); 1243 MachineRegisterInfo &MRI = MF->getRegInfo(); 1244 const DebugLoc &DL = MI.getDebugLoc(); 1245 MachineBasicBlock::iterator I(&MI); 1246 1247 unsigned DstReg = MI.getOperand(0).getReg(); 1248 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1249 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1250 1251 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); 1252 1253 // Save the EXEC mask 1254 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec) 1255 .addReg(AMDGPU::EXEC); 1256 1257 // To insert the loop we need to split the block. Move everything after this 1258 // point to a new block, and insert a new empty block between the two. 1259 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); 1260 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); 1261 MachineFunction::iterator MBBI(MBB); 1262 ++MBBI; 1263 1264 MF->insert(MBBI, LoopBB); 1265 MF->insert(MBBI, RemainderBB); 1266 1267 LoopBB->addSuccessor(LoopBB); 1268 LoopBB->addSuccessor(RemainderBB); 1269 1270 // Move the rest of the block into a new block. 1271 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 1272 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 1273 1274 MBB.addSuccessor(LoopBB); 1275 1276 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1277 1278 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, 1279 InitResultReg, DstReg, PhiReg, TmpExec, 1280 Offset, UseGPRIdxMode); 1281 1282 MachineBasicBlock::iterator First = RemainderBB->begin(); 1283 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 1284 .addReg(SaveExec); 1285 1286 return InsPt; 1287 } 1288 1289 // Returns subreg index, offset 1290 static std::pair<unsigned, int> 1291 computeIndirectRegAndOffset(const SIRegisterInfo &TRI, 1292 const TargetRegisterClass *SuperRC, 1293 unsigned VecReg, 1294 int Offset) { 1295 int NumElts = SuperRC->getSize() / 4; 1296 1297 // Skip out of bounds offsets, or else we would end up using an undefined 1298 // register. 1299 if (Offset >= NumElts || Offset < 0) 1300 return std::make_pair(AMDGPU::sub0, Offset); 1301 1302 return std::make_pair(AMDGPU::sub0 + Offset, 0); 1303 } 1304 1305 // Return true if the index is an SGPR and was set. 1306 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, 1307 MachineRegisterInfo &MRI, 1308 MachineInstr &MI, 1309 int Offset, 1310 bool UseGPRIdxMode, 1311 bool IsIndirectSrc) { 1312 MachineBasicBlock *MBB = MI.getParent(); 1313 const DebugLoc &DL = MI.getDebugLoc(); 1314 MachineBasicBlock::iterator I(&MI); 1315 1316 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1317 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); 1318 1319 assert(Idx->getReg() != AMDGPU::NoRegister); 1320 1321 if (!TII->getRegisterInfo().isSGPRClass(IdxRC)) 1322 return false; 1323 1324 if (UseGPRIdxMode) { 1325 unsigned IdxMode = IsIndirectSrc ? 1326 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE; 1327 if (Offset == 0) { 1328 MachineInstr *SetOn = 1329 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1330 .addOperand(*Idx) 1331 .addImm(IdxMode); 1332 1333 SetOn->getOperand(3).setIsUndef(); 1334 } else { 1335 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 1336 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) 1337 .addOperand(*Idx) 1338 .addImm(Offset); 1339 MachineInstr *SetOn = 1340 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1341 .addReg(Tmp, RegState::Kill) 1342 .addImm(IdxMode); 1343 1344 SetOn->getOperand(3).setIsUndef(); 1345 } 1346 1347 return true; 1348 } 1349 1350 if (Offset == 0) { 1351 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1352 .addOperand(*Idx); 1353 } else { 1354 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 1355 .addOperand(*Idx) 1356 .addImm(Offset); 1357 } 1358 1359 return true; 1360 } 1361 1362 // Control flow needs to be inserted if indexing with a VGPR. 1363 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, 1364 MachineBasicBlock &MBB, 1365 const SISubtarget &ST) { 1366 const SIInstrInfo *TII = ST.getInstrInfo(); 1367 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1368 MachineFunction *MF = MBB.getParent(); 1369 MachineRegisterInfo &MRI = MF->getRegInfo(); 1370 1371 unsigned Dst = MI.getOperand(0).getReg(); 1372 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); 1373 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 1374 1375 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); 1376 1377 unsigned SubReg; 1378 std::tie(SubReg, Offset) 1379 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); 1380 1381 bool UseGPRIdxMode = ST.hasVGPRIndexMode() && EnableVGPRIndexMode; 1382 1383 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) { 1384 MachineBasicBlock::iterator I(&MI); 1385 const DebugLoc &DL = MI.getDebugLoc(); 1386 1387 if (UseGPRIdxMode) { 1388 // TODO: Look at the uses to avoid the copy. This may require rescheduling 1389 // to avoid interfering with other uses, so probably requires a new 1390 // optimization pass. 1391 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 1392 .addReg(SrcReg, RegState::Undef, SubReg) 1393 .addReg(SrcReg, RegState::Implicit) 1394 .addReg(AMDGPU::M0, RegState::Implicit); 1395 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1396 } else { 1397 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 1398 .addReg(SrcReg, RegState::Undef, SubReg) 1399 .addReg(SrcReg, RegState::Implicit); 1400 } 1401 1402 MI.eraseFromParent(); 1403 1404 return &MBB; 1405 } 1406 1407 1408 const DebugLoc &DL = MI.getDebugLoc(); 1409 MachineBasicBlock::iterator I(&MI); 1410 1411 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1412 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1413 1414 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); 1415 1416 if (UseGPRIdxMode) { 1417 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1418 .addImm(0) // Reset inside loop. 1419 .addImm(VGPRIndexMode::SRC0_ENABLE); 1420 SetOn->getOperand(3).setIsUndef(); 1421 1422 // Disable again after the loop. 1423 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1424 } 1425 1426 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, UseGPRIdxMode); 1427 MachineBasicBlock *LoopBB = InsPt->getParent(); 1428 1429 if (UseGPRIdxMode) { 1430 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 1431 .addReg(SrcReg, RegState::Undef, SubReg) 1432 .addReg(SrcReg, RegState::Implicit) 1433 .addReg(AMDGPU::M0, RegState::Implicit); 1434 } else { 1435 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 1436 .addReg(SrcReg, RegState::Undef, SubReg) 1437 .addReg(SrcReg, RegState::Implicit); 1438 } 1439 1440 MI.eraseFromParent(); 1441 1442 return LoopBB; 1443 } 1444 1445 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, 1446 MachineBasicBlock &MBB, 1447 const SISubtarget &ST) { 1448 const SIInstrInfo *TII = ST.getInstrInfo(); 1449 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1450 MachineFunction *MF = MBB.getParent(); 1451 MachineRegisterInfo &MRI = MF->getRegInfo(); 1452 1453 unsigned Dst = MI.getOperand(0).getReg(); 1454 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); 1455 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1456 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); 1457 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 1458 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); 1459 1460 // This can be an immediate, but will be folded later. 1461 assert(Val->getReg()); 1462 1463 unsigned SubReg; 1464 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, 1465 SrcVec->getReg(), 1466 Offset); 1467 bool UseGPRIdxMode = ST.hasVGPRIndexMode() && EnableVGPRIndexMode; 1468 1469 if (Idx->getReg() == AMDGPU::NoRegister) { 1470 MachineBasicBlock::iterator I(&MI); 1471 const DebugLoc &DL = MI.getDebugLoc(); 1472 1473 assert(Offset == 0); 1474 1475 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) 1476 .addOperand(*SrcVec) 1477 .addOperand(*Val) 1478 .addImm(SubReg); 1479 1480 MI.eraseFromParent(); 1481 return &MBB; 1482 } 1483 1484 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) { 1485 MachineBasicBlock::iterator I(&MI); 1486 const DebugLoc &DL = MI.getDebugLoc(); 1487 1488 if (UseGPRIdxMode) { 1489 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 1490 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst 1491 .addOperand(*Val) 1492 .addReg(Dst, RegState::ImplicitDefine) 1493 .addReg(SrcVec->getReg(), RegState::Implicit) 1494 .addReg(AMDGPU::M0, RegState::Implicit); 1495 1496 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1497 } else { 1498 const MCInstrDesc &MovRelDesc = TII->get(AMDGPU::V_MOVRELD_B32_e32); 1499 1500 MachineInstr *MovRel = 1501 BuildMI(MBB, I, DL, MovRelDesc) 1502 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst 1503 .addOperand(*Val) 1504 .addReg(Dst, RegState::ImplicitDefine) 1505 .addReg(SrcVec->getReg(), RegState::Implicit); 1506 1507 const int ImpDefIdx = MovRelDesc.getNumOperands() + 1508 MovRelDesc.getNumImplicitUses(); 1509 const int ImpUseIdx = ImpDefIdx + 1; 1510 1511 MovRel->tieOperands(ImpDefIdx, ImpUseIdx); 1512 } 1513 1514 MI.eraseFromParent(); 1515 return &MBB; 1516 } 1517 1518 if (Val->isReg()) 1519 MRI.clearKillFlags(Val->getReg()); 1520 1521 const DebugLoc &DL = MI.getDebugLoc(); 1522 1523 if (UseGPRIdxMode) { 1524 MachineBasicBlock::iterator I(&MI); 1525 1526 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1527 .addImm(0) // Reset inside loop. 1528 .addImm(VGPRIndexMode::DST_ENABLE); 1529 SetOn->getOperand(3).setIsUndef(); 1530 1531 // Disable again after the loop. 1532 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1533 } 1534 1535 unsigned PhiReg = MRI.createVirtualRegister(VecRC); 1536 1537 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, 1538 Offset, UseGPRIdxMode); 1539 MachineBasicBlock *LoopBB = InsPt->getParent(); 1540 1541 if (UseGPRIdxMode) { 1542 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 1543 .addReg(PhiReg, RegState::Undef, SubReg) // vdst 1544 .addOperand(*Val) // src0 1545 .addReg(Dst, RegState::ImplicitDefine) 1546 .addReg(PhiReg, RegState::Implicit) 1547 .addReg(AMDGPU::M0, RegState::Implicit); 1548 } else { 1549 const MCInstrDesc &MovRelDesc = TII->get(AMDGPU::V_MOVRELD_B32_e32); 1550 // vdst is not actually read and just provides the base register index. 1551 MachineInstr *MovRel = 1552 BuildMI(*LoopBB, InsPt, DL, MovRelDesc) 1553 .addReg(PhiReg, RegState::Undef, SubReg) // vdst 1554 .addOperand(*Val) 1555 .addReg(Dst, RegState::ImplicitDefine) 1556 .addReg(PhiReg, RegState::Implicit); 1557 1558 const int ImpDefIdx = MovRelDesc.getNumOperands() + 1559 MovRelDesc.getNumImplicitUses(); 1560 const int ImpUseIdx = ImpDefIdx + 1; 1561 1562 MovRel->tieOperands(ImpDefIdx, ImpUseIdx); 1563 } 1564 1565 MI.eraseFromParent(); 1566 1567 return LoopBB; 1568 } 1569 1570 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( 1571 MachineInstr &MI, MachineBasicBlock *BB) const { 1572 switch (MI.getOpcode()) { 1573 case AMDGPU::SI_INIT_M0: { 1574 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1575 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), 1576 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1577 .addOperand(MI.getOperand(0)); 1578 MI.eraseFromParent(); 1579 return BB; 1580 } 1581 case AMDGPU::GET_GROUPSTATICSIZE: { 1582 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1583 1584 MachineFunction *MF = BB->getParent(); 1585 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1586 DebugLoc DL = MI.getDebugLoc(); 1587 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) 1588 .addOperand(MI.getOperand(0)) 1589 .addImm(MFI->getLDSSize()); 1590 MI.eraseFromParent(); 1591 return BB; 1592 } 1593 case AMDGPU::SI_INDIRECT_SRC_V1: 1594 case AMDGPU::SI_INDIRECT_SRC_V2: 1595 case AMDGPU::SI_INDIRECT_SRC_V4: 1596 case AMDGPU::SI_INDIRECT_SRC_V8: 1597 case AMDGPU::SI_INDIRECT_SRC_V16: 1598 return emitIndirectSrc(MI, *BB, *getSubtarget()); 1599 case AMDGPU::SI_INDIRECT_DST_V1: 1600 case AMDGPU::SI_INDIRECT_DST_V2: 1601 case AMDGPU::SI_INDIRECT_DST_V4: 1602 case AMDGPU::SI_INDIRECT_DST_V8: 1603 case AMDGPU::SI_INDIRECT_DST_V16: 1604 return emitIndirectDst(MI, *BB, *getSubtarget()); 1605 case AMDGPU::SI_KILL: 1606 return splitKillBlock(MI, BB); 1607 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 1608 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 1609 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1610 1611 unsigned Dst = MI.getOperand(0).getReg(); 1612 unsigned Src0 = MI.getOperand(1).getReg(); 1613 unsigned Src1 = MI.getOperand(2).getReg(); 1614 const DebugLoc &DL = MI.getDebugLoc(); 1615 unsigned SrcCond = MI.getOperand(3).getReg(); 1616 1617 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1618 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1619 1620 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 1621 .addReg(Src0, 0, AMDGPU::sub0) 1622 .addReg(Src1, 0, AMDGPU::sub0) 1623 .addReg(SrcCond); 1624 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 1625 .addReg(Src0, 0, AMDGPU::sub1) 1626 .addReg(Src1, 0, AMDGPU::sub1) 1627 .addReg(SrcCond); 1628 1629 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) 1630 .addReg(DstLo) 1631 .addImm(AMDGPU::sub0) 1632 .addReg(DstHi) 1633 .addImm(AMDGPU::sub1); 1634 MI.eraseFromParent(); 1635 return BB; 1636 } 1637 default: 1638 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 1639 } 1640 } 1641 1642 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1643 // This currently forces unfolding various combinations of fsub into fma with 1644 // free fneg'd operands. As long as we have fast FMA (controlled by 1645 // isFMAFasterThanFMulAndFAdd), we should perform these. 1646 1647 // When fma is quarter rate, for f64 where add / sub are at best half rate, 1648 // most of these combines appear to be cycle neutral but save on instruction 1649 // count / code size. 1650 return true; 1651 } 1652 1653 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, 1654 EVT VT) const { 1655 if (!VT.isVector()) { 1656 return MVT::i1; 1657 } 1658 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); 1659 } 1660 1661 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT) const { 1662 return MVT::i32; 1663 } 1664 1665 // Answering this is somewhat tricky and depends on the specific device which 1666 // have different rates for fma or all f64 operations. 1667 // 1668 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other 1669 // regardless of which device (although the number of cycles differs between 1670 // devices), so it is always profitable for f64. 1671 // 1672 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable 1673 // only on full rate devices. Normally, we should prefer selecting v_mad_f32 1674 // which we can always do even without fused FP ops since it returns the same 1675 // result as the separate operations and since it is always full 1676 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 1677 // however does not support denormals, so we do report fma as faster if we have 1678 // a fast fma device and require denormals. 1679 // 1680 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 1681 VT = VT.getScalarType(); 1682 1683 if (!VT.isSimple()) 1684 return false; 1685 1686 switch (VT.getSimpleVT().SimpleTy) { 1687 case MVT::f32: 1688 // This is as fast on some subtargets. However, we always have full rate f32 1689 // mad available which returns the same result as the separate operations 1690 // which we should prefer over fma. We can't use this if we want to support 1691 // denormals, so only report this in these cases. 1692 return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32(); 1693 case MVT::f64: 1694 return true; 1695 default: 1696 break; 1697 } 1698 1699 return false; 1700 } 1701 1702 //===----------------------------------------------------------------------===// 1703 // Custom DAG Lowering Operations 1704 //===----------------------------------------------------------------------===// 1705 1706 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 1707 switch (Op.getOpcode()) { 1708 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 1709 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 1710 case ISD::LOAD: { 1711 SDValue Result = LowerLOAD(Op, DAG); 1712 assert((!Result.getNode() || 1713 Result.getNode()->getNumValues() == 2) && 1714 "Load should return a value and a chain"); 1715 return Result; 1716 } 1717 1718 case ISD::FSIN: 1719 case ISD::FCOS: 1720 return LowerTrig(Op, DAG); 1721 case ISD::SELECT: return LowerSELECT(Op, DAG); 1722 case ISD::FDIV: return LowerFDIV(Op, DAG); 1723 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); 1724 case ISD::STORE: return LowerSTORE(Op, DAG); 1725 case ISD::GlobalAddress: { 1726 MachineFunction &MF = DAG.getMachineFunction(); 1727 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1728 return LowerGlobalAddress(MFI, Op, DAG); 1729 } 1730 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 1731 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 1732 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 1733 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); 1734 case ISD::TRAP: return lowerTRAP(Op, DAG); 1735 } 1736 return SDValue(); 1737 } 1738 1739 /// \brief Helper function for LowerBRCOND 1740 static SDNode *findUser(SDValue Value, unsigned Opcode) { 1741 1742 SDNode *Parent = Value.getNode(); 1743 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); 1744 I != E; ++I) { 1745 1746 if (I.getUse().get() != Value) 1747 continue; 1748 1749 if (I->getOpcode() == Opcode) 1750 return *I; 1751 } 1752 return nullptr; 1753 } 1754 1755 bool SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { 1756 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 1757 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { 1758 case AMDGPUIntrinsic::amdgcn_if: 1759 case AMDGPUIntrinsic::amdgcn_else: 1760 case AMDGPUIntrinsic::amdgcn_end_cf: 1761 case AMDGPUIntrinsic::amdgcn_loop: 1762 return true; 1763 default: 1764 return false; 1765 } 1766 } 1767 1768 if (Intr->getOpcode() == ISD::INTRINSIC_WO_CHAIN) { 1769 switch (cast<ConstantSDNode>(Intr->getOperand(0))->getZExtValue()) { 1770 case AMDGPUIntrinsic::amdgcn_break: 1771 case AMDGPUIntrinsic::amdgcn_if_break: 1772 case AMDGPUIntrinsic::amdgcn_else_break: 1773 return true; 1774 default: 1775 return false; 1776 } 1777 } 1778 1779 return false; 1780 } 1781 1782 void SITargetLowering::createDebuggerPrologueStackObjects( 1783 MachineFunction &MF) const { 1784 // Create stack objects that are used for emitting debugger prologue. 1785 // 1786 // Debugger prologue writes work group IDs and work item IDs to scratch memory 1787 // at fixed location in the following format: 1788 // offset 0: work group ID x 1789 // offset 4: work group ID y 1790 // offset 8: work group ID z 1791 // offset 16: work item ID x 1792 // offset 20: work item ID y 1793 // offset 24: work item ID z 1794 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1795 int ObjectIdx = 0; 1796 1797 // For each dimension: 1798 for (unsigned i = 0; i < 3; ++i) { 1799 // Create fixed stack object for work group ID. 1800 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true); 1801 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx); 1802 // Create fixed stack object for work item ID. 1803 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true); 1804 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx); 1805 } 1806 } 1807 1808 /// This transforms the control flow intrinsics to get the branch destination as 1809 /// last parameter, also switches branch target with BR if the need arise 1810 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, 1811 SelectionDAG &DAG) const { 1812 1813 SDLoc DL(BRCOND); 1814 1815 SDNode *Intr = BRCOND.getOperand(1).getNode(); 1816 SDValue Target = BRCOND.getOperand(2); 1817 SDNode *BR = nullptr; 1818 SDNode *SetCC = nullptr; 1819 1820 if (Intr->getOpcode() == ISD::SETCC) { 1821 // As long as we negate the condition everything is fine 1822 SetCC = Intr; 1823 Intr = SetCC->getOperand(0).getNode(); 1824 1825 } else { 1826 // Get the target from BR if we don't negate the condition 1827 BR = findUser(BRCOND, ISD::BR); 1828 Target = BR->getOperand(1); 1829 } 1830 1831 // FIXME: This changes the types of the intrinsics instead of introducing new 1832 // nodes with the correct types. 1833 // e.g. llvm.amdgcn.loop 1834 1835 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3 1836 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088> 1837 1838 if (!isCFIntrinsic(Intr)) { 1839 // This is a uniform branch so we don't need to legalize. 1840 return BRCOND; 1841 } 1842 1843 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || 1844 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; 1845 1846 assert(!SetCC || 1847 (SetCC->getConstantOperandVal(1) == 1 && 1848 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == 1849 ISD::SETNE)); 1850 1851 // operands of the new intrinsic call 1852 SmallVector<SDValue, 4> Ops; 1853 if (HaveChain) 1854 Ops.push_back(BRCOND.getOperand(0)); 1855 1856 Ops.append(Intr->op_begin() + (HaveChain ? 1 : 0), Intr->op_end()); 1857 Ops.push_back(Target); 1858 1859 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); 1860 1861 // build the new intrinsic call 1862 SDNode *Result = DAG.getNode( 1863 Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL, 1864 DAG.getVTList(Res), Ops).getNode(); 1865 1866 if (!HaveChain) { 1867 SDValue Ops[] = { 1868 SDValue(Result, 0), 1869 BRCOND.getOperand(0) 1870 }; 1871 1872 Result = DAG.getMergeValues(Ops, DL).getNode(); 1873 } 1874 1875 if (BR) { 1876 // Give the branch instruction our target 1877 SDValue Ops[] = { 1878 BR->getOperand(0), 1879 BRCOND.getOperand(2) 1880 }; 1881 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); 1882 DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); 1883 BR = NewBR.getNode(); 1884 } 1885 1886 SDValue Chain = SDValue(Result, Result->getNumValues() - 1); 1887 1888 // Copy the intrinsic results to registers 1889 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { 1890 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); 1891 if (!CopyToReg) 1892 continue; 1893 1894 Chain = DAG.getCopyToReg( 1895 Chain, DL, 1896 CopyToReg->getOperand(1), 1897 SDValue(Result, i - 1), 1898 SDValue()); 1899 1900 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); 1901 } 1902 1903 // Remove the old intrinsic from the chain 1904 DAG.ReplaceAllUsesOfValueWith( 1905 SDValue(Intr, Intr->getNumValues() - 1), 1906 Intr->getOperand(0)); 1907 1908 return Chain; 1909 } 1910 1911 SDValue SITargetLowering::getSegmentAperture(unsigned AS, 1912 SelectionDAG &DAG) const { 1913 SDLoc SL; 1914 MachineFunction &MF = DAG.getMachineFunction(); 1915 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1916 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 1917 assert(UserSGPR != AMDGPU::NoRegister); 1918 1919 SDValue QueuePtr = CreateLiveInRegister( 1920 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 1921 1922 // Offset into amd_queue_t for group_segment_aperture_base_hi / 1923 // private_segment_aperture_base_hi. 1924 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44; 1925 1926 SDValue Ptr = DAG.getNode(ISD::ADD, SL, MVT::i64, QueuePtr, 1927 DAG.getConstant(StructOffset, SL, MVT::i64)); 1928 1929 // TODO: Use custom target PseudoSourceValue. 1930 // TODO: We should use the value from the IR intrinsic call, but it might not 1931 // be available and how do we get it? 1932 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()), 1933 AMDGPUAS::CONSTANT_ADDRESS)); 1934 1935 MachinePointerInfo PtrInfo(V, StructOffset); 1936 return DAG.getLoad(MVT::i32, SL, QueuePtr.getValue(1), Ptr, PtrInfo, 1937 MinAlign(64, StructOffset), 1938 MachineMemOperand::MODereferenceable | 1939 MachineMemOperand::MOInvariant); 1940 } 1941 1942 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, 1943 SelectionDAG &DAG) const { 1944 SDLoc SL(Op); 1945 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); 1946 1947 SDValue Src = ASC->getOperand(0); 1948 1949 // FIXME: Really support non-0 null pointers. 1950 SDValue SegmentNullPtr = DAG.getConstant(-1, SL, MVT::i32); 1951 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); 1952 1953 // flat -> local/private 1954 if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 1955 if (ASC->getDestAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 1956 ASC->getDestAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { 1957 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); 1958 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); 1959 1960 return DAG.getNode(ISD::SELECT, SL, MVT::i32, 1961 NonNull, Ptr, SegmentNullPtr); 1962 } 1963 } 1964 1965 // local/private -> flat 1966 if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 1967 if (ASC->getSrcAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 1968 ASC->getSrcAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { 1969 SDValue NonNull 1970 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); 1971 1972 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), DAG); 1973 SDValue CvtPtr 1974 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); 1975 1976 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, 1977 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), 1978 FlatNullPtr); 1979 } 1980 } 1981 1982 // global <-> flat are no-ops and never emitted. 1983 1984 const MachineFunction &MF = DAG.getMachineFunction(); 1985 DiagnosticInfoUnsupported InvalidAddrSpaceCast( 1986 *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); 1987 DAG.getContext()->diagnose(InvalidAddrSpaceCast); 1988 1989 return DAG.getUNDEF(ASC->getValueType(0)); 1990 } 1991 1992 static bool shouldEmitFixup(const GlobalValue *GV, 1993 const TargetMachine &TM) { 1994 // FIXME: We need to emit global variables in constant address space in a 1995 // separate section, and use relocations. 1996 return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS; 1997 } 1998 1999 static bool shouldEmitGOTReloc(const GlobalValue *GV, 2000 const TargetMachine &TM) { 2001 return GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && 2002 !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); 2003 } 2004 2005 static bool shouldEmitPCReloc(const GlobalValue *GV, 2006 const TargetMachine &TM) { 2007 return !shouldEmitFixup(GV, TM) && !shouldEmitGOTReloc(GV, TM); 2008 } 2009 2010 bool 2011 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 2012 // We can fold offsets for anything that doesn't require a GOT relocation. 2013 return GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && 2014 !shouldEmitGOTReloc(GA->getGlobal(), getTargetMachine()); 2015 } 2016 2017 static SDValue buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, 2018 SDLoc DL, unsigned Offset, EVT PtrVT, 2019 unsigned GAFlags = SIInstrInfo::MO_NONE) { 2020 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is 2021 // lowered to the following code sequence: 2022 // 2023 // For constant address space: 2024 // s_getpc_b64 s[0:1] 2025 // s_add_u32 s0, s0, $symbol 2026 // s_addc_u32 s1, s1, 0 2027 // 2028 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 2029 // a fixup or relocation is emitted to replace $symbol with a literal 2030 // constant, which is a pc-relative offset from the encoding of the $symbol 2031 // operand to the global variable. 2032 // 2033 // For global address space: 2034 // s_getpc_b64 s[0:1] 2035 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo 2036 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi 2037 // 2038 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 2039 // fixups or relocations are emitted to replace $symbol@*@lo and 2040 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, 2041 // which is a 64-bit pc-relative offset from the encoding of the $symbol 2042 // operand to the global variable. 2043 // 2044 // What we want here is an offset from the value returned by s_getpc 2045 // (which is the address of the s_add_u32 instruction) to the global 2046 // variable, but since the encoding of $symbol starts 4 bytes after the start 2047 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too 2048 // small. This requires us to add 4 to the global variable offset in order to 2049 // compute the correct address. 2050 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 2051 GAFlags); 2052 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 2053 GAFlags == SIInstrInfo::MO_NONE ? 2054 GAFlags : GAFlags + 1); 2055 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); 2056 } 2057 2058 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 2059 SDValue Op, 2060 SelectionDAG &DAG) const { 2061 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 2062 2063 if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS && 2064 GSD->getAddressSpace() != AMDGPUAS::GLOBAL_ADDRESS) 2065 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 2066 2067 SDLoc DL(GSD); 2068 const GlobalValue *GV = GSD->getGlobal(); 2069 EVT PtrVT = Op.getValueType(); 2070 2071 if (shouldEmitFixup(GV, getTargetMachine())) 2072 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); 2073 else if (shouldEmitPCReloc(GV, getTargetMachine())) 2074 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, 2075 SIInstrInfo::MO_REL32); 2076 2077 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, 2078 SIInstrInfo::MO_GOTPCREL32); 2079 2080 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); 2081 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 2082 const DataLayout &DataLayout = DAG.getDataLayout(); 2083 unsigned Align = DataLayout.getABITypeAlignment(PtrTy); 2084 // FIXME: Use a PseudoSourceValue once those can be assigned an address space. 2085 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 2086 2087 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align, 2088 MachineMemOperand::MODereferenceable | 2089 MachineMemOperand::MOInvariant); 2090 } 2091 2092 SDValue SITargetLowering::lowerTRAP(SDValue Op, 2093 SelectionDAG &DAG) const { 2094 const MachineFunction &MF = DAG.getMachineFunction(); 2095 DiagnosticInfoUnsupported NoTrap(*MF.getFunction(), 2096 "trap handler not supported", 2097 Op.getDebugLoc(), 2098 DS_Warning); 2099 DAG.getContext()->diagnose(NoTrap); 2100 2101 // Emit s_endpgm. 2102 2103 // FIXME: This should really be selected to s_trap, but that requires 2104 // setting up the trap handler for it o do anything. 2105 return DAG.getNode(AMDGPUISD::ENDPGM, SDLoc(Op), MVT::Other, 2106 Op.getOperand(0)); 2107 } 2108 2109 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, 2110 const SDLoc &DL, SDValue V) const { 2111 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as 2112 // the destination register. 2113 // 2114 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, 2115 // so we will end up with redundant moves to m0. 2116 // 2117 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. 2118 2119 // A Null SDValue creates a glue result. 2120 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, 2121 V, Chain); 2122 return SDValue(M0, 0); 2123 } 2124 2125 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, 2126 SDValue Op, 2127 MVT VT, 2128 unsigned Offset) const { 2129 SDLoc SL(Op); 2130 SDValue Param = LowerParameter(DAG, MVT::i32, MVT::i32, SL, 2131 DAG.getEntryNode(), Offset, false); 2132 // The local size values will have the hi 16-bits as zero. 2133 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, 2134 DAG.getValueType(VT)); 2135 } 2136 2137 static SDValue emitNonHSAIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) { 2138 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), 2139 "non-hsa intrinsic with hsa target", 2140 DL.getDebugLoc()); 2141 DAG.getContext()->diagnose(BadIntrin); 2142 return DAG.getUNDEF(VT); 2143 } 2144 2145 static SDValue emitRemovedIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) { 2146 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), 2147 "intrinsic not supported on subtarget", 2148 DL.getDebugLoc()); 2149 DAG.getContext()->diagnose(BadIntrin); 2150 return DAG.getUNDEF(VT); 2151 } 2152 2153 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 2154 SelectionDAG &DAG) const { 2155 MachineFunction &MF = DAG.getMachineFunction(); 2156 auto MFI = MF.getInfo<SIMachineFunctionInfo>(); 2157 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2158 2159 EVT VT = Op.getValueType(); 2160 SDLoc DL(Op); 2161 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2162 2163 // TODO: Should this propagate fast-math-flags? 2164 2165 switch (IntrinsicID) { 2166 case Intrinsic::amdgcn_dispatch_ptr: 2167 case Intrinsic::amdgcn_queue_ptr: { 2168 if (!Subtarget->isAmdCodeObjectV2()) { 2169 DiagnosticInfoUnsupported BadIntrin( 2170 *MF.getFunction(), "unsupported hsa intrinsic without hsa target", 2171 DL.getDebugLoc()); 2172 DAG.getContext()->diagnose(BadIntrin); 2173 return DAG.getUNDEF(VT); 2174 } 2175 2176 auto Reg = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? 2177 SIRegisterInfo::DISPATCH_PTR : SIRegisterInfo::QUEUE_PTR; 2178 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, 2179 TRI->getPreloadedValue(MF, Reg), VT); 2180 } 2181 case Intrinsic::amdgcn_implicitarg_ptr: { 2182 unsigned offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT); 2183 return LowerParameterPtr(DAG, DL, DAG.getEntryNode(), offset); 2184 } 2185 case Intrinsic::amdgcn_kernarg_segment_ptr: { 2186 unsigned Reg 2187 = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 2188 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 2189 } 2190 case Intrinsic::amdgcn_dispatch_id: { 2191 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::DISPATCH_ID); 2192 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 2193 } 2194 case Intrinsic::amdgcn_rcp: 2195 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); 2196 case Intrinsic::amdgcn_rsq: 2197 case AMDGPUIntrinsic::AMDGPU_rsq: // Legacy name 2198 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 2199 case Intrinsic::amdgcn_rsq_legacy: { 2200 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 2201 return emitRemovedIntrinsicError(DAG, DL, VT); 2202 2203 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); 2204 } 2205 case Intrinsic::amdgcn_rcp_legacy: { 2206 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 2207 return emitRemovedIntrinsicError(DAG, DL, VT); 2208 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); 2209 } 2210 case Intrinsic::amdgcn_rsq_clamp: { 2211 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 2212 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 2213 2214 Type *Type = VT.getTypeForEVT(*DAG.getContext()); 2215 APFloat Max = APFloat::getLargest(Type->getFltSemantics()); 2216 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); 2217 2218 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 2219 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, 2220 DAG.getConstantFP(Max, DL, VT)); 2221 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, 2222 DAG.getConstantFP(Min, DL, VT)); 2223 } 2224 case Intrinsic::r600_read_ngroups_x: 2225 if (Subtarget->isAmdHsaOS()) 2226 return emitNonHSAIntrinsicError(DAG, DL, VT); 2227 2228 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2229 SI::KernelInputOffsets::NGROUPS_X, false); 2230 case Intrinsic::r600_read_ngroups_y: 2231 if (Subtarget->isAmdHsaOS()) 2232 return emitNonHSAIntrinsicError(DAG, DL, VT); 2233 2234 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2235 SI::KernelInputOffsets::NGROUPS_Y, false); 2236 case Intrinsic::r600_read_ngroups_z: 2237 if (Subtarget->isAmdHsaOS()) 2238 return emitNonHSAIntrinsicError(DAG, DL, VT); 2239 2240 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2241 SI::KernelInputOffsets::NGROUPS_Z, false); 2242 case Intrinsic::r600_read_global_size_x: 2243 if (Subtarget->isAmdHsaOS()) 2244 return emitNonHSAIntrinsicError(DAG, DL, VT); 2245 2246 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2247 SI::KernelInputOffsets::GLOBAL_SIZE_X, false); 2248 case Intrinsic::r600_read_global_size_y: 2249 if (Subtarget->isAmdHsaOS()) 2250 return emitNonHSAIntrinsicError(DAG, DL, VT); 2251 2252 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2253 SI::KernelInputOffsets::GLOBAL_SIZE_Y, false); 2254 case Intrinsic::r600_read_global_size_z: 2255 if (Subtarget->isAmdHsaOS()) 2256 return emitNonHSAIntrinsicError(DAG, DL, VT); 2257 2258 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2259 SI::KernelInputOffsets::GLOBAL_SIZE_Z, false); 2260 case Intrinsic::r600_read_local_size_x: 2261 if (Subtarget->isAmdHsaOS()) 2262 return emitNonHSAIntrinsicError(DAG, DL, VT); 2263 2264 return lowerImplicitZextParam(DAG, Op, MVT::i16, 2265 SI::KernelInputOffsets::LOCAL_SIZE_X); 2266 case Intrinsic::r600_read_local_size_y: 2267 if (Subtarget->isAmdHsaOS()) 2268 return emitNonHSAIntrinsicError(DAG, DL, VT); 2269 2270 return lowerImplicitZextParam(DAG, Op, MVT::i16, 2271 SI::KernelInputOffsets::LOCAL_SIZE_Y); 2272 case Intrinsic::r600_read_local_size_z: 2273 if (Subtarget->isAmdHsaOS()) 2274 return emitNonHSAIntrinsicError(DAG, DL, VT); 2275 2276 return lowerImplicitZextParam(DAG, Op, MVT::i16, 2277 SI::KernelInputOffsets::LOCAL_SIZE_Z); 2278 case Intrinsic::amdgcn_workgroup_id_x: 2279 case Intrinsic::r600_read_tgid_x: 2280 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, 2281 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_X), VT); 2282 case Intrinsic::amdgcn_workgroup_id_y: 2283 case Intrinsic::r600_read_tgid_y: 2284 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, 2285 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Y), VT); 2286 case Intrinsic::amdgcn_workgroup_id_z: 2287 case Intrinsic::r600_read_tgid_z: 2288 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, 2289 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Z), VT); 2290 case Intrinsic::amdgcn_workitem_id_x: 2291 case Intrinsic::r600_read_tidig_x: 2292 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 2293 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X), VT); 2294 case Intrinsic::amdgcn_workitem_id_y: 2295 case Intrinsic::r600_read_tidig_y: 2296 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 2297 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y), VT); 2298 case Intrinsic::amdgcn_workitem_id_z: 2299 case Intrinsic::r600_read_tidig_z: 2300 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 2301 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z), VT); 2302 case AMDGPUIntrinsic::SI_load_const: { 2303 SDValue Ops[] = { 2304 Op.getOperand(1), 2305 Op.getOperand(2) 2306 }; 2307 2308 MachineMemOperand *MMO = MF.getMachineMemOperand( 2309 MachinePointerInfo(), 2310 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 2311 MachineMemOperand::MOInvariant, 2312 VT.getStoreSize(), 4); 2313 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL, 2314 Op->getVTList(), Ops, VT, MMO); 2315 } 2316 case AMDGPUIntrinsic::amdgcn_fdiv_fast: { 2317 return lowerFDIV_FAST(Op, DAG); 2318 } 2319 case AMDGPUIntrinsic::SI_vs_load_input: 2320 return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT, 2321 Op.getOperand(1), 2322 Op.getOperand(2), 2323 Op.getOperand(3)); 2324 2325 case AMDGPUIntrinsic::SI_fs_constant: { 2326 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); 2327 SDValue Glue = M0.getValue(1); 2328 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, 2329 DAG.getConstant(2, DL, MVT::i32), // P0 2330 Op.getOperand(1), Op.getOperand(2), Glue); 2331 } 2332 case AMDGPUIntrinsic::SI_packf16: 2333 if (Op.getOperand(1).isUndef() && Op.getOperand(2).isUndef()) 2334 return DAG.getUNDEF(MVT::i32); 2335 return Op; 2336 case AMDGPUIntrinsic::SI_fs_interp: { 2337 SDValue IJ = Op.getOperand(4); 2338 SDValue I = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, 2339 DAG.getConstant(0, DL, MVT::i32)); 2340 SDValue J = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, 2341 DAG.getConstant(1, DL, MVT::i32)); 2342 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); 2343 SDValue Glue = M0.getValue(1); 2344 SDValue P1 = DAG.getNode(AMDGPUISD::INTERP_P1, DL, 2345 DAG.getVTList(MVT::f32, MVT::Glue), 2346 I, Op.getOperand(1), Op.getOperand(2), Glue); 2347 Glue = SDValue(P1.getNode(), 1); 2348 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, P1, J, 2349 Op.getOperand(1), Op.getOperand(2), Glue); 2350 } 2351 case Intrinsic::amdgcn_interp_p1: { 2352 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 2353 SDValue Glue = M0.getValue(1); 2354 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), 2355 Op.getOperand(2), Op.getOperand(3), Glue); 2356 } 2357 case Intrinsic::amdgcn_interp_p2: { 2358 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 2359 SDValue Glue = SDValue(M0.getNode(), 1); 2360 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), 2361 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), 2362 Glue); 2363 } 2364 case Intrinsic::amdgcn_sin: 2365 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); 2366 2367 case Intrinsic::amdgcn_cos: 2368 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); 2369 2370 case Intrinsic::amdgcn_log_clamp: { 2371 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 2372 return SDValue(); 2373 2374 DiagnosticInfoUnsupported BadIntrin( 2375 *MF.getFunction(), "intrinsic not supported on subtarget", 2376 DL.getDebugLoc()); 2377 DAG.getContext()->diagnose(BadIntrin); 2378 return DAG.getUNDEF(VT); 2379 } 2380 case Intrinsic::amdgcn_ldexp: 2381 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, 2382 Op.getOperand(1), Op.getOperand(2)); 2383 2384 case Intrinsic::amdgcn_fract: 2385 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); 2386 2387 case Intrinsic::amdgcn_class: 2388 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, 2389 Op.getOperand(1), Op.getOperand(2)); 2390 case Intrinsic::amdgcn_div_fmas: 2391 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, 2392 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 2393 Op.getOperand(4)); 2394 2395 case Intrinsic::amdgcn_div_fixup: 2396 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, 2397 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 2398 2399 case Intrinsic::amdgcn_trig_preop: 2400 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, 2401 Op.getOperand(1), Op.getOperand(2)); 2402 case Intrinsic::amdgcn_div_scale: { 2403 // 3rd parameter required to be a constant. 2404 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 2405 if (!Param) 2406 return DAG.getUNDEF(VT); 2407 2408 // Translate to the operands expected by the machine instruction. The 2409 // first parameter must be the same as the first instruction. 2410 SDValue Numerator = Op.getOperand(1); 2411 SDValue Denominator = Op.getOperand(2); 2412 2413 // Note this order is opposite of the machine instruction's operations, 2414 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The 2415 // intrinsic has the numerator as the first operand to match a normal 2416 // division operation. 2417 2418 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; 2419 2420 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, 2421 Denominator, Numerator); 2422 } 2423 case Intrinsic::amdgcn_icmp: { 2424 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 2425 int CondCode = CD->getSExtValue(); 2426 2427 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || 2428 CondCode >= ICmpInst::Predicate::BAD_ICMP_PREDICATE) 2429 return DAG.getUNDEF(VT); 2430 2431 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); 2432 ISD::CondCode CCOpcode = getICmpCondCode(IcInput); 2433 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 2434 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 2435 } 2436 case Intrinsic::amdgcn_fcmp: { 2437 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 2438 int CondCode = CD->getSExtValue(); 2439 2440 if (CondCode <= FCmpInst::Predicate::FCMP_FALSE || 2441 CondCode >= FCmpInst::Predicate::FCMP_TRUE) 2442 return DAG.getUNDEF(VT); 2443 2444 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); 2445 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); 2446 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 2447 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 2448 } 2449 case Intrinsic::amdgcn_fmul_legacy: 2450 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, 2451 Op.getOperand(1), Op.getOperand(2)); 2452 case Intrinsic::amdgcn_sffbh: 2453 case AMDGPUIntrinsic::AMDGPU_flbit_i32: // Legacy name. 2454 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); 2455 default: 2456 return AMDGPUTargetLowering::LowerOperation(Op, DAG); 2457 } 2458 } 2459 2460 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 2461 SelectionDAG &DAG) const { 2462 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2463 switch (IntrID) { 2464 case Intrinsic::amdgcn_atomic_inc: 2465 case Intrinsic::amdgcn_atomic_dec: { 2466 MemSDNode *M = cast<MemSDNode>(Op); 2467 unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ? 2468 AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC; 2469 SDValue Ops[] = { 2470 M->getOperand(0), // Chain 2471 M->getOperand(2), // Ptr 2472 M->getOperand(3) // Value 2473 }; 2474 2475 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, 2476 M->getMemoryVT(), M->getMemOperand()); 2477 } 2478 default: 2479 return SDValue(); 2480 } 2481 } 2482 2483 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, 2484 SelectionDAG &DAG) const { 2485 MachineFunction &MF = DAG.getMachineFunction(); 2486 SDLoc DL(Op); 2487 SDValue Chain = Op.getOperand(0); 2488 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2489 2490 switch (IntrinsicID) { 2491 case AMDGPUIntrinsic::SI_sendmsg: { 2492 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 2493 SDValue Glue = Chain.getValue(1); 2494 return DAG.getNode(AMDGPUISD::SENDMSG, DL, MVT::Other, Chain, 2495 Op.getOperand(2), Glue); 2496 } 2497 case AMDGPUIntrinsic::SI_tbuffer_store: { 2498 SDValue Ops[] = { 2499 Chain, 2500 Op.getOperand(2), 2501 Op.getOperand(3), 2502 Op.getOperand(4), 2503 Op.getOperand(5), 2504 Op.getOperand(6), 2505 Op.getOperand(7), 2506 Op.getOperand(8), 2507 Op.getOperand(9), 2508 Op.getOperand(10), 2509 Op.getOperand(11), 2510 Op.getOperand(12), 2511 Op.getOperand(13), 2512 Op.getOperand(14) 2513 }; 2514 2515 EVT VT = Op.getOperand(3).getValueType(); 2516 2517 MachineMemOperand *MMO = MF.getMachineMemOperand( 2518 MachinePointerInfo(), 2519 MachineMemOperand::MOStore, 2520 VT.getStoreSize(), 4); 2521 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL, 2522 Op->getVTList(), Ops, VT, MMO); 2523 } 2524 case AMDGPUIntrinsic::AMDGPU_kill: { 2525 SDValue Src = Op.getOperand(2); 2526 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) { 2527 if (!K->isNegative()) 2528 return Chain; 2529 2530 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32); 2531 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne); 2532 } 2533 2534 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src); 2535 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast); 2536 } 2537 default: 2538 return SDValue(); 2539 } 2540 } 2541 2542 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 2543 SDLoc DL(Op); 2544 LoadSDNode *Load = cast<LoadSDNode>(Op); 2545 ISD::LoadExtType ExtType = Load->getExtensionType(); 2546 EVT MemVT = Load->getMemoryVT(); 2547 2548 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { 2549 assert(MemVT == MVT::i1 && "Only i1 non-extloads expected"); 2550 // FIXME: Copied from PPC 2551 // First, load into 32 bits, then truncate to 1 bit. 2552 2553 SDValue Chain = Load->getChain(); 2554 SDValue BasePtr = Load->getBasePtr(); 2555 MachineMemOperand *MMO = Load->getMemOperand(); 2556 2557 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 2558 BasePtr, MVT::i8, MMO); 2559 2560 SDValue Ops[] = { 2561 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), 2562 NewLD.getValue(1) 2563 }; 2564 2565 return DAG.getMergeValues(Ops, DL); 2566 } 2567 2568 if (!MemVT.isVector()) 2569 return SDValue(); 2570 2571 assert(Op.getValueType().getVectorElementType() == MVT::i32 && 2572 "Custom lowering for non-i32 vectors hasn't been implemented."); 2573 2574 unsigned AS = Load->getAddressSpace(); 2575 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, 2576 AS, Load->getAlignment())) { 2577 SDValue Ops[2]; 2578 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); 2579 return DAG.getMergeValues(Ops, DL); 2580 } 2581 2582 unsigned NumElements = MemVT.getVectorNumElements(); 2583 switch (AS) { 2584 case AMDGPUAS::CONSTANT_ADDRESS: 2585 if (isMemOpUniform(Load)) 2586 return SDValue(); 2587 // Non-uniform loads will be selected to MUBUF instructions, so they 2588 // have the same legalization requires ments as global and private 2589 // loads. 2590 // 2591 LLVM_FALLTHROUGH; 2592 case AMDGPUAS::GLOBAL_ADDRESS: 2593 case AMDGPUAS::FLAT_ADDRESS: 2594 if (NumElements > 4) 2595 return SplitVectorLoad(Op, DAG); 2596 // v4 loads are supported for private and global memory. 2597 return SDValue(); 2598 case AMDGPUAS::PRIVATE_ADDRESS: { 2599 // Depending on the setting of the private_element_size field in the 2600 // resource descriptor, we can only make private accesses up to a certain 2601 // size. 2602 switch (Subtarget->getMaxPrivateElementSize()) { 2603 case 4: 2604 return scalarizeVectorLoad(Load, DAG); 2605 case 8: 2606 if (NumElements > 2) 2607 return SplitVectorLoad(Op, DAG); 2608 return SDValue(); 2609 case 16: 2610 // Same as global/flat 2611 if (NumElements > 4) 2612 return SplitVectorLoad(Op, DAG); 2613 return SDValue(); 2614 default: 2615 llvm_unreachable("unsupported private_element_size"); 2616 } 2617 } 2618 case AMDGPUAS::LOCAL_ADDRESS: { 2619 if (NumElements > 2) 2620 return SplitVectorLoad(Op, DAG); 2621 2622 if (NumElements == 2) 2623 return SDValue(); 2624 2625 // If properly aligned, if we split we might be able to use ds_read_b64. 2626 return SplitVectorLoad(Op, DAG); 2627 } 2628 default: 2629 return SDValue(); 2630 } 2631 } 2632 2633 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2634 if (Op.getValueType() != MVT::i64) 2635 return SDValue(); 2636 2637 SDLoc DL(Op); 2638 SDValue Cond = Op.getOperand(0); 2639 2640 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 2641 SDValue One = DAG.getConstant(1, DL, MVT::i32); 2642 2643 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); 2644 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); 2645 2646 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); 2647 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); 2648 2649 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); 2650 2651 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); 2652 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); 2653 2654 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); 2655 2656 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); 2657 return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res); 2658 } 2659 2660 // Catch division cases where we can use shortcuts with rcp and rsq 2661 // instructions. 2662 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, 2663 SelectionDAG &DAG) const { 2664 SDLoc SL(Op); 2665 SDValue LHS = Op.getOperand(0); 2666 SDValue RHS = Op.getOperand(1); 2667 EVT VT = Op.getValueType(); 2668 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath; 2669 2670 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { 2671 if ((Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals()))) { 2672 2673 if (CLHS->isExactlyValue(1.0)) { 2674 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 2675 // the CI documentation has a worst case error of 1 ulp. 2676 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 2677 // use it as long as we aren't trying to use denormals. 2678 2679 // 1.0 / sqrt(x) -> rsq(x) 2680 // 2681 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP 2682 // error seems really high at 2^29 ULP. 2683 if (RHS.getOpcode() == ISD::FSQRT) 2684 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); 2685 2686 // 1.0 / x -> rcp(x) 2687 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 2688 } 2689 2690 // Same as for 1.0, but expand the sign out of the constant. 2691 if (CLHS->isExactlyValue(-1.0)) { 2692 // -1.0 / x -> rcp (fneg x) 2693 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 2694 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); 2695 } 2696 } 2697 } 2698 2699 const SDNodeFlags *Flags = Op->getFlags(); 2700 2701 if (Unsafe || Flags->hasAllowReciprocal()) { 2702 // Turn into multiply by the reciprocal. 2703 // x / y -> x * (1.0 / y) 2704 SDNodeFlags Flags; 2705 Flags.setUnsafeAlgebra(true); 2706 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 2707 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, &Flags); 2708 } 2709 2710 return SDValue(); 2711 } 2712 2713 // Faster 2.5 ULP division that does not support denormals. 2714 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { 2715 SDLoc SL(Op); 2716 SDValue LHS = Op.getOperand(1); 2717 SDValue RHS = Op.getOperand(2); 2718 2719 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); 2720 2721 const APFloat K0Val(BitsToFloat(0x6f800000)); 2722 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); 2723 2724 const APFloat K1Val(BitsToFloat(0x2f800000)); 2725 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); 2726 2727 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 2728 2729 EVT SetCCVT = 2730 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 2731 2732 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); 2733 2734 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); 2735 2736 // TODO: Should this propagate fast-math-flags? 2737 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); 2738 2739 // rcp does not support denormals. 2740 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); 2741 2742 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); 2743 2744 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); 2745 } 2746 2747 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { 2748 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 2749 return FastLowered; 2750 2751 SDLoc SL(Op); 2752 SDValue LHS = Op.getOperand(0); 2753 SDValue RHS = Op.getOperand(1); 2754 2755 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 2756 2757 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); 2758 2759 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, RHS, RHS, LHS); 2760 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, LHS, RHS, LHS); 2761 2762 // Denominator is scaled to not be denormal, so using rcp is ok. 2763 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, DenominatorScaled); 2764 2765 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, DenominatorScaled); 2766 2767 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f32, NegDivScale0, ApproxRcp, One); 2768 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, ApproxRcp); 2769 2770 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, NumeratorScaled, Fma1); 2771 2772 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, NumeratorScaled); 2773 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f32, Fma2, Fma1, Mul); 2774 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, NumeratorScaled); 2775 2776 SDValue Scale = NumeratorScaled.getValue(1); 2777 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, Fma4, Fma1, Fma3, Scale); 2778 2779 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS); 2780 } 2781 2782 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { 2783 if (DAG.getTarget().Options.UnsafeFPMath) 2784 return lowerFastUnsafeFDIV(Op, DAG); 2785 2786 SDLoc SL(Op); 2787 SDValue X = Op.getOperand(0); 2788 SDValue Y = Op.getOperand(1); 2789 2790 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 2791 2792 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); 2793 2794 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); 2795 2796 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); 2797 2798 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); 2799 2800 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); 2801 2802 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); 2803 2804 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); 2805 2806 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); 2807 2808 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); 2809 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); 2810 2811 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, 2812 NegDivScale0, Mul, DivScale1); 2813 2814 SDValue Scale; 2815 2816 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 2817 // Workaround a hardware bug on SI where the condition output from div_scale 2818 // is not usable. 2819 2820 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); 2821 2822 // Figure out if the scale to use for div_fmas. 2823 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 2824 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); 2825 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); 2826 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); 2827 2828 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); 2829 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); 2830 2831 SDValue Scale0Hi 2832 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); 2833 SDValue Scale1Hi 2834 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); 2835 2836 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); 2837 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); 2838 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); 2839 } else { 2840 Scale = DivScale1.getValue(1); 2841 } 2842 2843 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, 2844 Fma4, Fma3, Mul, Scale); 2845 2846 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); 2847 } 2848 2849 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { 2850 EVT VT = Op.getValueType(); 2851 2852 if (VT == MVT::f32) 2853 return LowerFDIV32(Op, DAG); 2854 2855 if (VT == MVT::f64) 2856 return LowerFDIV64(Op, DAG); 2857 2858 llvm_unreachable("Unexpected type for fdiv"); 2859 } 2860 2861 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 2862 SDLoc DL(Op); 2863 StoreSDNode *Store = cast<StoreSDNode>(Op); 2864 EVT VT = Store->getMemoryVT(); 2865 2866 if (VT == MVT::i1) { 2867 return DAG.getTruncStore(Store->getChain(), DL, 2868 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), 2869 Store->getBasePtr(), MVT::i1, Store->getMemOperand()); 2870 } 2871 2872 assert(VT.isVector() && 2873 Store->getValue().getValueType().getScalarType() == MVT::i32); 2874 2875 unsigned AS = Store->getAddressSpace(); 2876 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 2877 AS, Store->getAlignment())) { 2878 return expandUnalignedStore(Store, DAG); 2879 } 2880 2881 unsigned NumElements = VT.getVectorNumElements(); 2882 switch (AS) { 2883 case AMDGPUAS::GLOBAL_ADDRESS: 2884 case AMDGPUAS::FLAT_ADDRESS: 2885 if (NumElements > 4) 2886 return SplitVectorStore(Op, DAG); 2887 return SDValue(); 2888 case AMDGPUAS::PRIVATE_ADDRESS: { 2889 switch (Subtarget->getMaxPrivateElementSize()) { 2890 case 4: 2891 return scalarizeVectorStore(Store, DAG); 2892 case 8: 2893 if (NumElements > 2) 2894 return SplitVectorStore(Op, DAG); 2895 return SDValue(); 2896 case 16: 2897 if (NumElements > 4) 2898 return SplitVectorStore(Op, DAG); 2899 return SDValue(); 2900 default: 2901 llvm_unreachable("unsupported private_element_size"); 2902 } 2903 } 2904 case AMDGPUAS::LOCAL_ADDRESS: { 2905 if (NumElements > 2) 2906 return SplitVectorStore(Op, DAG); 2907 2908 if (NumElements == 2) 2909 return Op; 2910 2911 // If properly aligned, if we split we might be able to use ds_write_b64. 2912 return SplitVectorStore(Op, DAG); 2913 } 2914 default: 2915 llvm_unreachable("unhandled address space"); 2916 } 2917 } 2918 2919 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 2920 SDLoc DL(Op); 2921 EVT VT = Op.getValueType(); 2922 SDValue Arg = Op.getOperand(0); 2923 // TODO: Should this propagate fast-math-flags? 2924 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, 2925 DAG.getNode(ISD::FMUL, DL, VT, Arg, 2926 DAG.getConstantFP(0.5/M_PI, DL, 2927 VT))); 2928 2929 switch (Op.getOpcode()) { 2930 case ISD::FCOS: 2931 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart); 2932 case ISD::FSIN: 2933 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart); 2934 default: 2935 llvm_unreachable("Wrong trig opcode"); 2936 } 2937 } 2938 2939 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 2940 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); 2941 assert(AtomicNode->isCompareAndSwap()); 2942 unsigned AS = AtomicNode->getAddressSpace(); 2943 2944 // No custom lowering required for local address space 2945 if (!isFlatGlobalAddrSpace(AS)) 2946 return Op; 2947 2948 // Non-local address space requires custom lowering for atomic compare 2949 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 2950 SDLoc DL(Op); 2951 SDValue ChainIn = Op.getOperand(0); 2952 SDValue Addr = Op.getOperand(1); 2953 SDValue Old = Op.getOperand(2); 2954 SDValue New = Op.getOperand(3); 2955 EVT VT = Op.getValueType(); 2956 MVT SimpleVT = VT.getSimpleVT(); 2957 MVT VecType = MVT::getVectorVT(SimpleVT, 2); 2958 2959 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); 2960 SDValue Ops[] = { ChainIn, Addr, NewOld }; 2961 2962 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), 2963 Ops, VT, AtomicNode->getMemOperand()); 2964 } 2965 2966 //===----------------------------------------------------------------------===// 2967 // Custom DAG optimizations 2968 //===----------------------------------------------------------------------===// 2969 2970 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, 2971 DAGCombinerInfo &DCI) const { 2972 EVT VT = N->getValueType(0); 2973 EVT ScalarVT = VT.getScalarType(); 2974 if (ScalarVT != MVT::f32) 2975 return SDValue(); 2976 2977 SelectionDAG &DAG = DCI.DAG; 2978 SDLoc DL(N); 2979 2980 SDValue Src = N->getOperand(0); 2981 EVT SrcVT = Src.getValueType(); 2982 2983 // TODO: We could try to match extracting the higher bytes, which would be 2984 // easier if i8 vectors weren't promoted to i32 vectors, particularly after 2985 // types are legalized. v4i8 -> v4f32 is probably the only case to worry 2986 // about in practice. 2987 if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) { 2988 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { 2989 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); 2990 DCI.AddToWorklist(Cvt.getNode()); 2991 return Cvt; 2992 } 2993 } 2994 2995 return SDValue(); 2996 } 2997 2998 /// \brief Return true if the given offset Size in bytes can be folded into 2999 /// the immediate offsets of a memory instruction for the given address space. 3000 static bool canFoldOffset(unsigned OffsetSize, unsigned AS, 3001 const SISubtarget &STI) { 3002 switch (AS) { 3003 case AMDGPUAS::GLOBAL_ADDRESS: { 3004 // MUBUF instructions a 12-bit offset in bytes. 3005 return isUInt<12>(OffsetSize); 3006 } 3007 case AMDGPUAS::CONSTANT_ADDRESS: { 3008 // SMRD instructions have an 8-bit offset in dwords on SI and 3009 // a 20-bit offset in bytes on VI. 3010 if (STI.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 3011 return isUInt<20>(OffsetSize); 3012 else 3013 return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4); 3014 } 3015 case AMDGPUAS::LOCAL_ADDRESS: 3016 case AMDGPUAS::REGION_ADDRESS: { 3017 // The single offset versions have a 16-bit offset in bytes. 3018 return isUInt<16>(OffsetSize); 3019 } 3020 case AMDGPUAS::PRIVATE_ADDRESS: 3021 // Indirect register addressing does not use any offsets. 3022 default: 3023 return 0; 3024 } 3025 } 3026 3027 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) 3028 3029 // This is a variant of 3030 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), 3031 // 3032 // The normal DAG combiner will do this, but only if the add has one use since 3033 // that would increase the number of instructions. 3034 // 3035 // This prevents us from seeing a constant offset that can be folded into a 3036 // memory instruction's addressing mode. If we know the resulting add offset of 3037 // a pointer can be folded into an addressing offset, we can replace the pointer 3038 // operand with the add of new constant offset. This eliminates one of the uses, 3039 // and may allow the remaining use to also be simplified. 3040 // 3041 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, 3042 unsigned AddrSpace, 3043 DAGCombinerInfo &DCI) const { 3044 SDValue N0 = N->getOperand(0); 3045 SDValue N1 = N->getOperand(1); 3046 3047 if (N0.getOpcode() != ISD::ADD) 3048 return SDValue(); 3049 3050 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); 3051 if (!CN1) 3052 return SDValue(); 3053 3054 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3055 if (!CAdd) 3056 return SDValue(); 3057 3058 // If the resulting offset is too large, we can't fold it into the addressing 3059 // mode offset. 3060 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); 3061 if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *getSubtarget())) 3062 return SDValue(); 3063 3064 SelectionDAG &DAG = DCI.DAG; 3065 SDLoc SL(N); 3066 EVT VT = N->getValueType(0); 3067 3068 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); 3069 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); 3070 3071 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset); 3072 } 3073 3074 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { 3075 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || 3076 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || 3077 (Opc == ISD::XOR && Val == 0); 3078 } 3079 3080 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This 3081 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit 3082 // integer combine opportunities since most 64-bit operations are decomposed 3083 // this way. TODO: We won't want this for SALU especially if it is an inline 3084 // immediate. 3085 SDValue SITargetLowering::splitBinaryBitConstantOp( 3086 DAGCombinerInfo &DCI, 3087 const SDLoc &SL, 3088 unsigned Opc, SDValue LHS, 3089 const ConstantSDNode *CRHS) const { 3090 uint64_t Val = CRHS->getZExtValue(); 3091 uint32_t ValLo = Lo_32(Val); 3092 uint32_t ValHi = Hi_32(Val); 3093 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3094 3095 if ((bitOpWithConstantIsReducible(Opc, ValLo) || 3096 bitOpWithConstantIsReducible(Opc, ValHi)) || 3097 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { 3098 // If we need to materialize a 64-bit immediate, it will be split up later 3099 // anyway. Avoid creating the harder to understand 64-bit immediate 3100 // materialization. 3101 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); 3102 } 3103 3104 return SDValue(); 3105 } 3106 3107 SDValue SITargetLowering::performAndCombine(SDNode *N, 3108 DAGCombinerInfo &DCI) const { 3109 if (DCI.isBeforeLegalize()) 3110 return SDValue(); 3111 3112 SelectionDAG &DAG = DCI.DAG; 3113 EVT VT = N->getValueType(0); 3114 SDValue LHS = N->getOperand(0); 3115 SDValue RHS = N->getOperand(1); 3116 3117 3118 if (VT == MVT::i64) { 3119 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 3120 if (CRHS) { 3121 if (SDValue Split 3122 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) 3123 return Split; 3124 } 3125 } 3126 3127 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> 3128 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) 3129 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { 3130 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 3131 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); 3132 3133 SDValue X = LHS.getOperand(0); 3134 SDValue Y = RHS.getOperand(0); 3135 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) 3136 return SDValue(); 3137 3138 if (LCC == ISD::SETO) { 3139 if (X != LHS.getOperand(1)) 3140 return SDValue(); 3141 3142 if (RCC == ISD::SETUNE) { 3143 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); 3144 if (!C1 || !C1->isInfinity() || C1->isNegative()) 3145 return SDValue(); 3146 3147 const uint32_t Mask = SIInstrFlags::N_NORMAL | 3148 SIInstrFlags::N_SUBNORMAL | 3149 SIInstrFlags::N_ZERO | 3150 SIInstrFlags::P_ZERO | 3151 SIInstrFlags::P_SUBNORMAL | 3152 SIInstrFlags::P_NORMAL; 3153 3154 static_assert(((~(SIInstrFlags::S_NAN | 3155 SIInstrFlags::Q_NAN | 3156 SIInstrFlags::N_INFINITY | 3157 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, 3158 "mask not equal"); 3159 3160 SDLoc DL(N); 3161 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 3162 X, DAG.getConstant(Mask, DL, MVT::i32)); 3163 } 3164 } 3165 } 3166 3167 return SDValue(); 3168 } 3169 3170 SDValue SITargetLowering::performOrCombine(SDNode *N, 3171 DAGCombinerInfo &DCI) const { 3172 SelectionDAG &DAG = DCI.DAG; 3173 SDValue LHS = N->getOperand(0); 3174 SDValue RHS = N->getOperand(1); 3175 3176 EVT VT = N->getValueType(0); 3177 if (VT == MVT::i1) { 3178 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) 3179 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && 3180 RHS.getOpcode() == AMDGPUISD::FP_CLASS) { 3181 SDValue Src = LHS.getOperand(0); 3182 if (Src != RHS.getOperand(0)) 3183 return SDValue(); 3184 3185 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 3186 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 3187 if (!CLHS || !CRHS) 3188 return SDValue(); 3189 3190 // Only 10 bits are used. 3191 static const uint32_t MaxMask = 0x3ff; 3192 3193 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; 3194 SDLoc DL(N); 3195 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 3196 Src, DAG.getConstant(NewMask, DL, MVT::i32)); 3197 } 3198 3199 return SDValue(); 3200 } 3201 3202 if (VT != MVT::i64) 3203 return SDValue(); 3204 3205 // TODO: This could be a generic combine with a predicate for extracting the 3206 // high half of an integer being free. 3207 3208 // (or i64:x, (zero_extend i32:y)) -> 3209 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) 3210 if (LHS.getOpcode() == ISD::ZERO_EXTEND && 3211 RHS.getOpcode() != ISD::ZERO_EXTEND) 3212 std::swap(LHS, RHS); 3213 3214 if (RHS.getOpcode() == ISD::ZERO_EXTEND) { 3215 SDValue ExtSrc = RHS.getOperand(0); 3216 EVT SrcVT = ExtSrc.getValueType(); 3217 if (SrcVT == MVT::i32) { 3218 SDLoc SL(N); 3219 SDValue LowLHS, HiBits; 3220 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); 3221 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); 3222 3223 DCI.AddToWorklist(LowOr.getNode()); 3224 DCI.AddToWorklist(HiBits.getNode()); 3225 3226 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 3227 LowOr, HiBits); 3228 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 3229 } 3230 } 3231 3232 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3233 if (CRHS) { 3234 if (SDValue Split 3235 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) 3236 return Split; 3237 } 3238 3239 return SDValue(); 3240 } 3241 3242 SDValue SITargetLowering::performXorCombine(SDNode *N, 3243 DAGCombinerInfo &DCI) const { 3244 EVT VT = N->getValueType(0); 3245 if (VT != MVT::i64) 3246 return SDValue(); 3247 3248 SDValue LHS = N->getOperand(0); 3249 SDValue RHS = N->getOperand(1); 3250 3251 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 3252 if (CRHS) { 3253 if (SDValue Split 3254 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) 3255 return Split; 3256 } 3257 3258 return SDValue(); 3259 } 3260 3261 SDValue SITargetLowering::performClassCombine(SDNode *N, 3262 DAGCombinerInfo &DCI) const { 3263 SelectionDAG &DAG = DCI.DAG; 3264 SDValue Mask = N->getOperand(1); 3265 3266 // fp_class x, 0 -> false 3267 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { 3268 if (CMask->isNullValue()) 3269 return DAG.getConstant(0, SDLoc(N), MVT::i1); 3270 } 3271 3272 if (N->getOperand(0).isUndef()) 3273 return DAG.getUNDEF(MVT::i1); 3274 3275 return SDValue(); 3276 } 3277 3278 // Constant fold canonicalize. 3279 SDValue SITargetLowering::performFCanonicalizeCombine( 3280 SDNode *N, 3281 DAGCombinerInfo &DCI) const { 3282 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); 3283 if (!CFP) 3284 return SDValue(); 3285 3286 SelectionDAG &DAG = DCI.DAG; 3287 const APFloat &C = CFP->getValueAPF(); 3288 3289 // Flush denormals to 0 if not enabled. 3290 if (C.isDenormal()) { 3291 EVT VT = N->getValueType(0); 3292 if (VT == MVT::f32 && !Subtarget->hasFP32Denormals()) 3293 return DAG.getConstantFP(0.0, SDLoc(N), VT); 3294 3295 if (VT == MVT::f64 && !Subtarget->hasFP64Denormals()) 3296 return DAG.getConstantFP(0.0, SDLoc(N), VT); 3297 } 3298 3299 if (C.isNaN()) { 3300 EVT VT = N->getValueType(0); 3301 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); 3302 if (C.isSignaling()) { 3303 // Quiet a signaling NaN. 3304 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 3305 } 3306 3307 // Make sure it is the canonical NaN bitpattern. 3308 // 3309 // TODO: Can we use -1 as the canonical NaN value since it's an inline 3310 // immediate? 3311 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) 3312 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 3313 } 3314 3315 return SDValue(CFP, 0); 3316 } 3317 3318 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { 3319 switch (Opc) { 3320 case ISD::FMAXNUM: 3321 return AMDGPUISD::FMAX3; 3322 case ISD::SMAX: 3323 return AMDGPUISD::SMAX3; 3324 case ISD::UMAX: 3325 return AMDGPUISD::UMAX3; 3326 case ISD::FMINNUM: 3327 return AMDGPUISD::FMIN3; 3328 case ISD::SMIN: 3329 return AMDGPUISD::SMIN3; 3330 case ISD::UMIN: 3331 return AMDGPUISD::UMIN3; 3332 default: 3333 llvm_unreachable("Not a min/max opcode"); 3334 } 3335 } 3336 3337 static SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL, 3338 SDValue Op0, SDValue Op1, bool Signed) { 3339 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); 3340 if (!K1) 3341 return SDValue(); 3342 3343 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 3344 if (!K0) 3345 return SDValue(); 3346 3347 if (Signed) { 3348 if (K0->getAPIntValue().sge(K1->getAPIntValue())) 3349 return SDValue(); 3350 } else { 3351 if (K0->getAPIntValue().uge(K1->getAPIntValue())) 3352 return SDValue(); 3353 } 3354 3355 EVT VT = K0->getValueType(0); 3356 return DAG.getNode(Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3, SL, VT, 3357 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); 3358 } 3359 3360 static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) { 3361 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions()) 3362 return true; 3363 3364 return DAG.isKnownNeverNaN(Op); 3365 } 3366 3367 static SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL, 3368 SDValue Op0, SDValue Op1) { 3369 ConstantFPSDNode *K1 = dyn_cast<ConstantFPSDNode>(Op1); 3370 if (!K1) 3371 return SDValue(); 3372 3373 ConstantFPSDNode *K0 = dyn_cast<ConstantFPSDNode>(Op0.getOperand(1)); 3374 if (!K0) 3375 return SDValue(); 3376 3377 // Ordered >= (although NaN inputs should have folded away by now). 3378 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF()); 3379 if (Cmp == APFloat::cmpGreaterThan) 3380 return SDValue(); 3381 3382 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a 3383 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would then 3384 // give the other result, which is different from med3 with a NaN input. 3385 SDValue Var = Op0.getOperand(0); 3386 if (!isKnownNeverSNan(DAG, Var)) 3387 return SDValue(); 3388 3389 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), 3390 Var, SDValue(K0, 0), SDValue(K1, 0)); 3391 } 3392 3393 SDValue SITargetLowering::performMinMaxCombine(SDNode *N, 3394 DAGCombinerInfo &DCI) const { 3395 SelectionDAG &DAG = DCI.DAG; 3396 3397 unsigned Opc = N->getOpcode(); 3398 SDValue Op0 = N->getOperand(0); 3399 SDValue Op1 = N->getOperand(1); 3400 3401 // Only do this if the inner op has one use since this will just increases 3402 // register pressure for no benefit. 3403 3404 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY) { 3405 // max(max(a, b), c) -> max3(a, b, c) 3406 // min(min(a, b), c) -> min3(a, b, c) 3407 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { 3408 SDLoc DL(N); 3409 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 3410 DL, 3411 N->getValueType(0), 3412 Op0.getOperand(0), 3413 Op0.getOperand(1), 3414 Op1); 3415 } 3416 3417 // Try commuted. 3418 // max(a, max(b, c)) -> max3(a, b, c) 3419 // min(a, min(b, c)) -> min3(a, b, c) 3420 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { 3421 SDLoc DL(N); 3422 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 3423 DL, 3424 N->getValueType(0), 3425 Op0, 3426 Op1.getOperand(0), 3427 Op1.getOperand(1)); 3428 } 3429 } 3430 3431 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) 3432 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { 3433 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) 3434 return Med3; 3435 } 3436 3437 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { 3438 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) 3439 return Med3; 3440 } 3441 3442 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) 3443 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || 3444 (Opc == AMDGPUISD::FMIN_LEGACY && 3445 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && 3446 N->getValueType(0) == MVT::f32 && Op0.hasOneUse()) { 3447 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) 3448 return Res; 3449 } 3450 3451 return SDValue(); 3452 } 3453 3454 SDValue SITargetLowering::performSetCCCombine(SDNode *N, 3455 DAGCombinerInfo &DCI) const { 3456 SelectionDAG &DAG = DCI.DAG; 3457 SDLoc SL(N); 3458 3459 SDValue LHS = N->getOperand(0); 3460 SDValue RHS = N->getOperand(1); 3461 EVT VT = LHS.getValueType(); 3462 3463 if (VT != MVT::f32 && VT != MVT::f64) 3464 return SDValue(); 3465 3466 // Match isinf pattern 3467 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) 3468 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 3469 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { 3470 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 3471 if (!CRHS) 3472 return SDValue(); 3473 3474 const APFloat &APF = CRHS->getValueAPF(); 3475 if (APF.isInfinity() && !APF.isNegative()) { 3476 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY; 3477 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), 3478 DAG.getConstant(Mask, SL, MVT::i32)); 3479 } 3480 } 3481 3482 return SDValue(); 3483 } 3484 3485 SDValue SITargetLowering::PerformDAGCombine(SDNode *N, 3486 DAGCombinerInfo &DCI) const { 3487 SelectionDAG &DAG = DCI.DAG; 3488 SDLoc DL(N); 3489 3490 switch (N->getOpcode()) { 3491 default: 3492 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 3493 case ISD::SETCC: 3494 return performSetCCCombine(N, DCI); 3495 case ISD::FMAXNUM: 3496 case ISD::FMINNUM: 3497 case ISD::SMAX: 3498 case ISD::SMIN: 3499 case ISD::UMAX: 3500 case ISD::UMIN: 3501 case AMDGPUISD::FMIN_LEGACY: 3502 case AMDGPUISD::FMAX_LEGACY: { 3503 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG && 3504 N->getValueType(0) != MVT::f64 && 3505 getTargetMachine().getOptLevel() > CodeGenOpt::None) 3506 return performMinMaxCombine(N, DCI); 3507 break; 3508 } 3509 3510 case AMDGPUISD::CVT_F32_UBYTE0: 3511 case AMDGPUISD::CVT_F32_UBYTE1: 3512 case AMDGPUISD::CVT_F32_UBYTE2: 3513 case AMDGPUISD::CVT_F32_UBYTE3: { 3514 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; 3515 SDValue Src = N->getOperand(0); 3516 3517 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero. 3518 if (Src.getOpcode() == ISD::SRL) { 3519 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x 3520 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x 3521 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x 3522 3523 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src.getOperand(1))) { 3524 unsigned SrcOffset = C->getZExtValue() + 8 * Offset; 3525 if (SrcOffset < 32 && SrcOffset % 8 == 0) { 3526 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, DL, 3527 MVT::f32, Src.getOperand(0)); 3528 } 3529 } 3530 } 3531 3532 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); 3533 3534 APInt KnownZero, KnownOne; 3535 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 3536 !DCI.isBeforeLegalizeOps()); 3537 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3538 if (TLO.ShrinkDemandedConstant(Src, Demanded) || 3539 TLI.SimplifyDemandedBits(Src, Demanded, KnownZero, KnownOne, TLO)) { 3540 DCI.CommitTargetLoweringOpt(TLO); 3541 } 3542 3543 break; 3544 } 3545 3546 case ISD::UINT_TO_FP: { 3547 return performUCharToFloatCombine(N, DCI); 3548 } 3549 case ISD::FADD: { 3550 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3551 break; 3552 3553 EVT VT = N->getValueType(0); 3554 if (VT != MVT::f32) 3555 break; 3556 3557 // Only do this if we are not trying to support denormals. v_mad_f32 does 3558 // not support denormals ever. 3559 if (Subtarget->hasFP32Denormals()) 3560 break; 3561 3562 SDValue LHS = N->getOperand(0); 3563 SDValue RHS = N->getOperand(1); 3564 3565 // These should really be instruction patterns, but writing patterns with 3566 // source modiifiers is a pain. 3567 3568 // fadd (fadd (a, a), b) -> mad 2.0, a, b 3569 if (LHS.getOpcode() == ISD::FADD) { 3570 SDValue A = LHS.getOperand(0); 3571 if (A == LHS.getOperand(1)) { 3572 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); 3573 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, RHS); 3574 } 3575 } 3576 3577 // fadd (b, fadd (a, a)) -> mad 2.0, a, b 3578 if (RHS.getOpcode() == ISD::FADD) { 3579 SDValue A = RHS.getOperand(0); 3580 if (A == RHS.getOperand(1)) { 3581 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); 3582 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, LHS); 3583 } 3584 } 3585 3586 return SDValue(); 3587 } 3588 case ISD::FSUB: { 3589 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3590 break; 3591 3592 EVT VT = N->getValueType(0); 3593 3594 // Try to get the fneg to fold into the source modifier. This undoes generic 3595 // DAG combines and folds them into the mad. 3596 // 3597 // Only do this if we are not trying to support denormals. v_mad_f32 does 3598 // not support denormals ever. 3599 if (VT == MVT::f32 && 3600 !Subtarget->hasFP32Denormals()) { 3601 SDValue LHS = N->getOperand(0); 3602 SDValue RHS = N->getOperand(1); 3603 if (LHS.getOpcode() == ISD::FADD) { 3604 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) 3605 3606 SDValue A = LHS.getOperand(0); 3607 if (A == LHS.getOperand(1)) { 3608 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); 3609 SDValue NegRHS = DAG.getNode(ISD::FNEG, DL, VT, RHS); 3610 3611 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, NegRHS); 3612 } 3613 } 3614 3615 if (RHS.getOpcode() == ISD::FADD) { 3616 // (fsub c, (fadd a, a)) -> mad -2.0, a, c 3617 3618 SDValue A = RHS.getOperand(0); 3619 if (A == RHS.getOperand(1)) { 3620 const SDValue NegTwo = DAG.getConstantFP(-2.0, DL, MVT::f32); 3621 return DAG.getNode(ISD::FMAD, DL, VT, NegTwo, A, LHS); 3622 } 3623 } 3624 3625 return SDValue(); 3626 } 3627 3628 break; 3629 } 3630 case ISD::LOAD: 3631 case ISD::STORE: 3632 case ISD::ATOMIC_LOAD: 3633 case ISD::ATOMIC_STORE: 3634 case ISD::ATOMIC_CMP_SWAP: 3635 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 3636 case ISD::ATOMIC_SWAP: 3637 case ISD::ATOMIC_LOAD_ADD: 3638 case ISD::ATOMIC_LOAD_SUB: 3639 case ISD::ATOMIC_LOAD_AND: 3640 case ISD::ATOMIC_LOAD_OR: 3641 case ISD::ATOMIC_LOAD_XOR: 3642 case ISD::ATOMIC_LOAD_NAND: 3643 case ISD::ATOMIC_LOAD_MIN: 3644 case ISD::ATOMIC_LOAD_MAX: 3645 case ISD::ATOMIC_LOAD_UMIN: 3646 case ISD::ATOMIC_LOAD_UMAX: 3647 case AMDGPUISD::ATOMIC_INC: 3648 case AMDGPUISD::ATOMIC_DEC: { // TODO: Target mem intrinsics. 3649 if (DCI.isBeforeLegalize()) 3650 break; 3651 3652 MemSDNode *MemNode = cast<MemSDNode>(N); 3653 SDValue Ptr = MemNode->getBasePtr(); 3654 3655 // TODO: We could also do this for multiplies. 3656 unsigned AS = MemNode->getAddressSpace(); 3657 if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUAS::PRIVATE_ADDRESS) { 3658 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI); 3659 if (NewPtr) { 3660 SmallVector<SDValue, 8> NewOps(MemNode->op_begin(), MemNode->op_end()); 3661 3662 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; 3663 return SDValue(DAG.UpdateNodeOperands(MemNode, NewOps), 0); 3664 } 3665 } 3666 break; 3667 } 3668 case ISD::AND: 3669 return performAndCombine(N, DCI); 3670 case ISD::OR: 3671 return performOrCombine(N, DCI); 3672 case ISD::XOR: 3673 return performXorCombine(N, DCI); 3674 case AMDGPUISD::FP_CLASS: 3675 return performClassCombine(N, DCI); 3676 case ISD::FCANONICALIZE: 3677 return performFCanonicalizeCombine(N, DCI); 3678 case AMDGPUISD::FRACT: 3679 case AMDGPUISD::RCP: 3680 case AMDGPUISD::RSQ: 3681 case AMDGPUISD::RCP_LEGACY: 3682 case AMDGPUISD::RSQ_LEGACY: 3683 case AMDGPUISD::RSQ_CLAMP: 3684 case AMDGPUISD::LDEXP: { 3685 SDValue Src = N->getOperand(0); 3686 if (Src.isUndef()) 3687 return Src; 3688 break; 3689 } 3690 } 3691 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 3692 } 3693 3694 /// \brief Helper function for adjustWritemask 3695 static unsigned SubIdx2Lane(unsigned Idx) { 3696 switch (Idx) { 3697 default: return 0; 3698 case AMDGPU::sub0: return 0; 3699 case AMDGPU::sub1: return 1; 3700 case AMDGPU::sub2: return 2; 3701 case AMDGPU::sub3: return 3; 3702 } 3703 } 3704 3705 /// \brief Adjust the writemask of MIMG instructions 3706 void SITargetLowering::adjustWritemask(MachineSDNode *&Node, 3707 SelectionDAG &DAG) const { 3708 SDNode *Users[4] = { }; 3709 unsigned Lane = 0; 3710 unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3; 3711 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); 3712 unsigned NewDmask = 0; 3713 3714 // Try to figure out the used register components 3715 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); 3716 I != E; ++I) { 3717 3718 // Abort if we can't understand the usage 3719 if (!I->isMachineOpcode() || 3720 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) 3721 return; 3722 3723 // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used. 3724 // Note that subregs are packed, i.e. Lane==0 is the first bit set 3725 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 3726 // set, etc. 3727 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 3728 3729 // Set which texture component corresponds to the lane. 3730 unsigned Comp; 3731 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) { 3732 assert(Dmask); 3733 Comp = countTrailingZeros(Dmask); 3734 Dmask &= ~(1 << Comp); 3735 } 3736 3737 // Abort if we have more than one user per component 3738 if (Users[Lane]) 3739 return; 3740 3741 Users[Lane] = *I; 3742 NewDmask |= 1 << Comp; 3743 } 3744 3745 // Abort if there's no change 3746 if (NewDmask == OldDmask) 3747 return; 3748 3749 // Adjust the writemask in the node 3750 std::vector<SDValue> Ops; 3751 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); 3752 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); 3753 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); 3754 Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops); 3755 3756 // If we only got one lane, replace it with a copy 3757 // (if NewDmask has only one bit set...) 3758 if (NewDmask && (NewDmask & (NewDmask-1)) == 0) { 3759 SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(), 3760 MVT::i32); 3761 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 3762 SDLoc(), Users[Lane]->getValueType(0), 3763 SDValue(Node, 0), RC); 3764 DAG.ReplaceAllUsesWith(Users[Lane], Copy); 3765 return; 3766 } 3767 3768 // Update the users of the node with the new indices 3769 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { 3770 3771 SDNode *User = Users[i]; 3772 if (!User) 3773 continue; 3774 3775 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); 3776 DAG.UpdateNodeOperands(User, User->getOperand(0), Op); 3777 3778 switch (Idx) { 3779 default: break; 3780 case AMDGPU::sub0: Idx = AMDGPU::sub1; break; 3781 case AMDGPU::sub1: Idx = AMDGPU::sub2; break; 3782 case AMDGPU::sub2: Idx = AMDGPU::sub3; break; 3783 } 3784 } 3785 } 3786 3787 static bool isFrameIndexOp(SDValue Op) { 3788 if (Op.getOpcode() == ISD::AssertZext) 3789 Op = Op.getOperand(0); 3790 3791 return isa<FrameIndexSDNode>(Op); 3792 } 3793 3794 /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) 3795 /// with frame index operands. 3796 /// LLVM assumes that inputs are to these instructions are registers. 3797 void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, 3798 SelectionDAG &DAG) const { 3799 3800 SmallVector<SDValue, 8> Ops; 3801 for (unsigned i = 0; i < Node->getNumOperands(); ++i) { 3802 if (!isFrameIndexOp(Node->getOperand(i))) { 3803 Ops.push_back(Node->getOperand(i)); 3804 continue; 3805 } 3806 3807 SDLoc DL(Node); 3808 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, 3809 Node->getOperand(i).getValueType(), 3810 Node->getOperand(i)), 0)); 3811 } 3812 3813 DAG.UpdateNodeOperands(Node, Ops); 3814 } 3815 3816 /// \brief Fold the instructions after selecting them. 3817 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, 3818 SelectionDAG &DAG) const { 3819 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3820 unsigned Opcode = Node->getMachineOpcode(); 3821 3822 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && 3823 !TII->isGather4(Opcode)) 3824 adjustWritemask(Node, DAG); 3825 3826 if (Opcode == AMDGPU::INSERT_SUBREG || 3827 Opcode == AMDGPU::REG_SEQUENCE) { 3828 legalizeTargetIndependentNode(Node, DAG); 3829 return Node; 3830 } 3831 return Node; 3832 } 3833 3834 /// \brief Assign the register class depending on the number of 3835 /// bits set in the writemask 3836 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 3837 SDNode *Node) const { 3838 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3839 3840 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 3841 3842 if (TII->isVOP3(MI.getOpcode())) { 3843 // Make sure constant bus requirements are respected. 3844 TII->legalizeOperandsVOP3(MRI, MI); 3845 return; 3846 } 3847 3848 if (TII->isMIMG(MI)) { 3849 unsigned VReg = MI.getOperand(0).getReg(); 3850 unsigned DmaskIdx = MI.getNumOperands() == 12 ? 3 : 4; 3851 unsigned Writemask = MI.getOperand(DmaskIdx).getImm(); 3852 unsigned BitsSet = 0; 3853 for (unsigned i = 0; i < 4; ++i) 3854 BitsSet += Writemask & (1 << i) ? 1 : 0; 3855 3856 const TargetRegisterClass *RC; 3857 switch (BitsSet) { 3858 default: return; 3859 case 1: RC = &AMDGPU::VGPR_32RegClass; break; 3860 case 2: RC = &AMDGPU::VReg_64RegClass; break; 3861 case 3: RC = &AMDGPU::VReg_96RegClass; break; 3862 } 3863 3864 unsigned NewOpcode = TII->getMaskedMIMGOp(MI.getOpcode(), BitsSet); 3865 MI.setDesc(TII->get(NewOpcode)); 3866 MRI.setRegClass(VReg, RC); 3867 return; 3868 } 3869 3870 // Replace unused atomics with the no return version. 3871 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); 3872 if (NoRetAtomicOp != -1) { 3873 if (!Node->hasAnyUseOfValue(0)) { 3874 MI.setDesc(TII->get(NoRetAtomicOp)); 3875 MI.RemoveOperand(0); 3876 return; 3877 } 3878 3879 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg 3880 // instruction, because the return type of these instructions is a vec2 of 3881 // the memory type, so it can be tied to the input operand. 3882 // This means these instructions always have a use, so we need to add a 3883 // special case to check if the atomic has only one extract_subreg use, 3884 // which itself has no uses. 3885 if ((Node->hasNUsesOfValue(1, 0) && 3886 Node->use_begin()->isMachineOpcode() && 3887 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && 3888 !Node->use_begin()->hasAnyUseOfValue(0))) { 3889 unsigned Def = MI.getOperand(0).getReg(); 3890 3891 // Change this into a noret atomic. 3892 MI.setDesc(TII->get(NoRetAtomicOp)); 3893 MI.RemoveOperand(0); 3894 3895 // If we only remove the def operand from the atomic instruction, the 3896 // extract_subreg will be left with a use of a vreg without a def. 3897 // So we need to insert an implicit_def to avoid machine verifier 3898 // errors. 3899 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), 3900 TII->get(AMDGPU::IMPLICIT_DEF), Def); 3901 } 3902 return; 3903 } 3904 } 3905 3906 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, 3907 uint64_t Val) { 3908 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); 3909 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); 3910 } 3911 3912 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, 3913 const SDLoc &DL, 3914 SDValue Ptr) const { 3915 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3916 3917 // Build the half of the subregister with the constants before building the 3918 // full 128-bit register. If we are building multiple resource descriptors, 3919 // this will allow CSEing of the 2-component register. 3920 const SDValue Ops0[] = { 3921 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), 3922 buildSMovImm32(DAG, DL, 0), 3923 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 3924 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), 3925 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) 3926 }; 3927 3928 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, 3929 MVT::v2i32, Ops0), 0); 3930 3931 // Combine the constants and the pointer. 3932 const SDValue Ops1[] = { 3933 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 3934 Ptr, 3935 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), 3936 SubRegHi, 3937 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) 3938 }; 3939 3940 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); 3941 } 3942 3943 /// \brief Return a resource descriptor with the 'Add TID' bit enabled 3944 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] 3945 /// of the resource descriptor) to create an offset, which is added to 3946 /// the resource pointer. 3947 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, 3948 SDValue Ptr, uint32_t RsrcDword1, 3949 uint64_t RsrcDword2And3) const { 3950 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); 3951 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); 3952 if (RsrcDword1) { 3953 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, 3954 DAG.getConstant(RsrcDword1, DL, MVT::i32)), 3955 0); 3956 } 3957 3958 SDValue DataLo = buildSMovImm32(DAG, DL, 3959 RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); 3960 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); 3961 3962 const SDValue Ops[] = { 3963 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 3964 PtrLo, 3965 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 3966 PtrHi, 3967 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), 3968 DataLo, 3969 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), 3970 DataHi, 3971 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) 3972 }; 3973 3974 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); 3975 } 3976 3977 SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG, 3978 const TargetRegisterClass *RC, 3979 unsigned Reg, EVT VT) const { 3980 SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT); 3981 3982 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()), 3983 cast<RegisterSDNode>(VReg)->getReg(), VT); 3984 } 3985 3986 //===----------------------------------------------------------------------===// 3987 // SI Inline Assembly Support 3988 //===----------------------------------------------------------------------===// 3989 3990 std::pair<unsigned, const TargetRegisterClass *> 3991 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 3992 StringRef Constraint, 3993 MVT VT) const { 3994 3995 if (Constraint.size() == 1) { 3996 switch (Constraint[0]) { 3997 case 's': 3998 case 'r': 3999 switch (VT.getSizeInBits()) { 4000 default: 4001 return std::make_pair(0U, nullptr); 4002 case 32: 4003 return std::make_pair(0U, &AMDGPU::SReg_32RegClass); 4004 case 64: 4005 return std::make_pair(0U, &AMDGPU::SGPR_64RegClass); 4006 case 128: 4007 return std::make_pair(0U, &AMDGPU::SReg_128RegClass); 4008 case 256: 4009 return std::make_pair(0U, &AMDGPU::SReg_256RegClass); 4010 } 4011 4012 case 'v': 4013 switch (VT.getSizeInBits()) { 4014 default: 4015 return std::make_pair(0U, nullptr); 4016 case 32: 4017 return std::make_pair(0U, &AMDGPU::VGPR_32RegClass); 4018 case 64: 4019 return std::make_pair(0U, &AMDGPU::VReg_64RegClass); 4020 case 96: 4021 return std::make_pair(0U, &AMDGPU::VReg_96RegClass); 4022 case 128: 4023 return std::make_pair(0U, &AMDGPU::VReg_128RegClass); 4024 case 256: 4025 return std::make_pair(0U, &AMDGPU::VReg_256RegClass); 4026 case 512: 4027 return std::make_pair(0U, &AMDGPU::VReg_512RegClass); 4028 } 4029 } 4030 } 4031 4032 if (Constraint.size() > 1) { 4033 const TargetRegisterClass *RC = nullptr; 4034 if (Constraint[1] == 'v') { 4035 RC = &AMDGPU::VGPR_32RegClass; 4036 } else if (Constraint[1] == 's') { 4037 RC = &AMDGPU::SGPR_32RegClass; 4038 } 4039 4040 if (RC) { 4041 uint32_t Idx; 4042 bool Failed = Constraint.substr(2).getAsInteger(10, Idx); 4043 if (!Failed && Idx < RC->getNumRegs()) 4044 return std::make_pair(RC->getRegister(Idx), RC); 4045 } 4046 } 4047 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 4048 } 4049 4050 SITargetLowering::ConstraintType 4051 SITargetLowering::getConstraintType(StringRef Constraint) const { 4052 if (Constraint.size() == 1) { 4053 switch (Constraint[0]) { 4054 default: break; 4055 case 's': 4056 case 'v': 4057 return C_RegisterClass; 4058 } 4059 } 4060 return TargetLowering::getConstraintType(Constraint); 4061 } 4062