1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief Custom DAG lowering for SI 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifdef _MSC_VER 16 // Provide M_PI. 17 #define _USE_MATH_DEFINES 18 #include <cmath> 19 #endif 20 21 #include "AMDGPU.h" 22 #include "AMDGPUIntrinsicInfo.h" 23 #include "AMDGPUSubtarget.h" 24 #include "SIISelLowering.h" 25 #include "SIInstrInfo.h" 26 #include "SIMachineFunctionInfo.h" 27 #include "SIRegisterInfo.h" 28 #include "llvm/ADT/BitVector.h" 29 #include "llvm/ADT/StringSwitch.h" 30 #include "llvm/CodeGen/CallingConvLower.h" 31 #include "llvm/CodeGen/MachineInstrBuilder.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/CodeGen/SelectionDAG.h" 34 #include "llvm/CodeGen/Analysis.h" 35 #include "llvm/IR/DiagnosticInfo.h" 36 #include "llvm/IR/Function.h" 37 38 using namespace llvm; 39 40 static cl::opt<bool> EnableVGPRIndexMode( 41 "amdgpu-vgpr-index-mode", 42 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), 43 cl::init(false)); 44 45 46 static unsigned findFirstFreeSGPR(CCState &CCInfo) { 47 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); 48 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { 49 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { 50 return AMDGPU::SGPR0 + Reg; 51 } 52 } 53 llvm_unreachable("Cannot allocate sgpr"); 54 } 55 56 SITargetLowering::SITargetLowering(const TargetMachine &TM, 57 const SISubtarget &STI) 58 : AMDGPUTargetLowering(TM, STI) { 59 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); 60 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); 61 62 addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass); 63 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); 64 65 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); 66 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); 67 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); 68 69 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); 70 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); 71 72 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); 73 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); 74 75 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); 76 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); 77 78 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); 79 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); 80 81 computeRegisterProperties(STI.getRegisterInfo()); 82 83 // We need to custom lower vector stores from local memory 84 setOperationAction(ISD::LOAD, MVT::v2i32, Custom); 85 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 86 setOperationAction(ISD::LOAD, MVT::v8i32, Custom); 87 setOperationAction(ISD::LOAD, MVT::v16i32, Custom); 88 setOperationAction(ISD::LOAD, MVT::i1, Custom); 89 90 setOperationAction(ISD::STORE, MVT::v2i32, Custom); 91 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 92 setOperationAction(ISD::STORE, MVT::v8i32, Custom); 93 setOperationAction(ISD::STORE, MVT::v16i32, Custom); 94 setOperationAction(ISD::STORE, MVT::i1, Custom); 95 96 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 97 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 98 setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand); 99 100 setOperationAction(ISD::SELECT, MVT::i1, Promote); 101 setOperationAction(ISD::SELECT, MVT::i64, Custom); 102 setOperationAction(ISD::SELECT, MVT::f64, Promote); 103 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); 104 105 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 106 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 107 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 108 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 109 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); 110 111 setOperationAction(ISD::SETCC, MVT::i1, Promote); 112 setOperationAction(ISD::SETCC, MVT::v2i1, Expand); 113 setOperationAction(ISD::SETCC, MVT::v4i1, Expand); 114 115 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); 116 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 117 118 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); 119 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); 120 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 121 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); 122 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 123 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); 124 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); 125 126 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); 127 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); 128 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 129 130 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 131 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 132 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 133 setOperationAction(ISD::BR_CC, MVT::i64, Expand); 134 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 135 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 136 137 // We only support LOAD/STORE and vector manipulation ops for vectors 138 // with > 4 elements. 139 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64}) { 140 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 141 switch (Op) { 142 case ISD::LOAD: 143 case ISD::STORE: 144 case ISD::BUILD_VECTOR: 145 case ISD::BITCAST: 146 case ISD::EXTRACT_VECTOR_ELT: 147 case ISD::INSERT_VECTOR_ELT: 148 case ISD::INSERT_SUBVECTOR: 149 case ISD::EXTRACT_SUBVECTOR: 150 case ISD::SCALAR_TO_VECTOR: 151 break; 152 case ISD::CONCAT_VECTORS: 153 setOperationAction(Op, VT, Custom); 154 break; 155 default: 156 setOperationAction(Op, VT, Expand); 157 break; 158 } 159 } 160 } 161 162 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that 163 // is expanded to avoid having two separate loops in case the index is a VGPR. 164 165 // Most operations are naturally 32-bit vector operations. We only support 166 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. 167 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { 168 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 169 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); 170 171 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 172 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); 173 174 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 175 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); 176 177 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 178 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); 179 } 180 181 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); 182 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); 183 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); 184 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); 185 186 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, 187 // and output demarshalling 188 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 189 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 190 191 // We can't return success/failure, only the old value, 192 // let LLVM add the comparison 193 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); 194 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); 195 196 if (getSubtarget()->hasFlatAddressSpace()) { 197 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); 198 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); 199 } 200 201 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 202 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 203 204 // On SI this is s_memtime and s_memrealtime on VI. 205 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 206 setOperationAction(ISD::TRAP, MVT::Other, Custom); 207 208 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 209 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 210 211 if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) { 212 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 213 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 214 setOperationAction(ISD::FRINT, MVT::f64, Legal); 215 } 216 217 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 218 219 setOperationAction(ISD::FSIN, MVT::f32, Custom); 220 setOperationAction(ISD::FCOS, MVT::f32, Custom); 221 setOperationAction(ISD::FDIV, MVT::f32, Custom); 222 setOperationAction(ISD::FDIV, MVT::f64, Custom); 223 224 setTargetDAGCombine(ISD::FADD); 225 setTargetDAGCombine(ISD::FSUB); 226 setTargetDAGCombine(ISD::FMINNUM); 227 setTargetDAGCombine(ISD::FMAXNUM); 228 setTargetDAGCombine(ISD::SMIN); 229 setTargetDAGCombine(ISD::SMAX); 230 setTargetDAGCombine(ISD::UMIN); 231 setTargetDAGCombine(ISD::UMAX); 232 setTargetDAGCombine(ISD::SETCC); 233 setTargetDAGCombine(ISD::AND); 234 setTargetDAGCombine(ISD::OR); 235 setTargetDAGCombine(ISD::XOR); 236 setTargetDAGCombine(ISD::UINT_TO_FP); 237 setTargetDAGCombine(ISD::FCANONICALIZE); 238 239 // All memory operations. Some folding on the pointer operand is done to help 240 // matching the constant offsets in the addressing modes. 241 setTargetDAGCombine(ISD::LOAD); 242 setTargetDAGCombine(ISD::STORE); 243 setTargetDAGCombine(ISD::ATOMIC_LOAD); 244 setTargetDAGCombine(ISD::ATOMIC_STORE); 245 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); 246 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 247 setTargetDAGCombine(ISD::ATOMIC_SWAP); 248 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); 249 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); 250 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); 251 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); 252 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); 253 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); 254 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); 255 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); 256 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); 257 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); 258 259 setSchedulingPreference(Sched::RegPressure); 260 } 261 262 const SISubtarget *SITargetLowering::getSubtarget() const { 263 return static_cast<const SISubtarget *>(Subtarget); 264 } 265 266 //===----------------------------------------------------------------------===// 267 // TargetLowering queries 268 //===----------------------------------------------------------------------===// 269 270 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 271 const CallInst &CI, 272 unsigned IntrID) const { 273 switch (IntrID) { 274 case Intrinsic::amdgcn_atomic_inc: 275 case Intrinsic::amdgcn_atomic_dec: 276 Info.opc = ISD::INTRINSIC_W_CHAIN; 277 Info.memVT = MVT::getVT(CI.getType()); 278 Info.ptrVal = CI.getOperand(0); 279 Info.align = 0; 280 Info.vol = false; 281 Info.readMem = true; 282 Info.writeMem = true; 283 return true; 284 default: 285 return false; 286 } 287 } 288 289 bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &, 290 EVT) const { 291 // SI has some legal vector types, but no legal vector operations. Say no 292 // shuffles are legal in order to prefer scalarizing some vector operations. 293 return false; 294 } 295 296 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { 297 // Flat instructions do not have offsets, and only have the register 298 // address. 299 return AM.BaseOffs == 0 && (AM.Scale == 0 || AM.Scale == 1); 300 } 301 302 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { 303 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and 304 // additionally can do r + r + i with addr64. 32-bit has more addressing 305 // mode options. Depending on the resource constant, it can also do 306 // (i64 r0) + (i32 r1) * (i14 i). 307 // 308 // Private arrays end up using a scratch buffer most of the time, so also 309 // assume those use MUBUF instructions. Scratch loads / stores are currently 310 // implemented as mubuf instructions with offen bit set, so slightly 311 // different than the normal addr64. 312 if (!isUInt<12>(AM.BaseOffs)) 313 return false; 314 315 // FIXME: Since we can split immediate into soffset and immediate offset, 316 // would it make sense to allow any immediate? 317 318 switch (AM.Scale) { 319 case 0: // r + i or just i, depending on HasBaseReg. 320 return true; 321 case 1: 322 return true; // We have r + r or r + i. 323 case 2: 324 if (AM.HasBaseReg) { 325 // Reject 2 * r + r. 326 return false; 327 } 328 329 // Allow 2 * r as r + r 330 // Or 2 * r + i is allowed as r + r + i. 331 return true; 332 default: // Don't allow n * r 333 return false; 334 } 335 } 336 337 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, 338 const AddrMode &AM, Type *Ty, 339 unsigned AS) const { 340 // No global is ever allowed as a base. 341 if (AM.BaseGV) 342 return false; 343 344 switch (AS) { 345 case AMDGPUAS::GLOBAL_ADDRESS: { 346 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 347 // Assume the we will use FLAT for all global memory accesses 348 // on VI. 349 // FIXME: This assumption is currently wrong. On VI we still use 350 // MUBUF instructions for the r + i addressing mode. As currently 351 // implemented, the MUBUF instructions only work on buffer < 4GB. 352 // It may be possible to support > 4GB buffers with MUBUF instructions, 353 // by setting the stride value in the resource descriptor which would 354 // increase the size limit to (stride * 4GB). However, this is risky, 355 // because it has never been validated. 356 return isLegalFlatAddressingMode(AM); 357 } 358 359 return isLegalMUBUFAddressingMode(AM); 360 } 361 case AMDGPUAS::CONSTANT_ADDRESS: { 362 // If the offset isn't a multiple of 4, it probably isn't going to be 363 // correctly aligned. 364 // FIXME: Can we get the real alignment here? 365 if (AM.BaseOffs % 4 != 0) 366 return isLegalMUBUFAddressingMode(AM); 367 368 // There are no SMRD extloads, so if we have to do a small type access we 369 // will use a MUBUF load. 370 // FIXME?: We also need to do this if unaligned, but we don't know the 371 // alignment here. 372 if (DL.getTypeStoreSize(Ty) < 4) 373 return isLegalMUBUFAddressingMode(AM); 374 375 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 376 // SMRD instructions have an 8-bit, dword offset on SI. 377 if (!isUInt<8>(AM.BaseOffs / 4)) 378 return false; 379 } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) { 380 // On CI+, this can also be a 32-bit literal constant offset. If it fits 381 // in 8-bits, it can use a smaller encoding. 382 if (!isUInt<32>(AM.BaseOffs / 4)) 383 return false; 384 } else if (Subtarget->getGeneration() == SISubtarget::VOLCANIC_ISLANDS) { 385 // On VI, these use the SMEM format and the offset is 20-bit in bytes. 386 if (!isUInt<20>(AM.BaseOffs)) 387 return false; 388 } else 389 llvm_unreachable("unhandled generation"); 390 391 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 392 return true; 393 394 if (AM.Scale == 1 && AM.HasBaseReg) 395 return true; 396 397 return false; 398 } 399 400 case AMDGPUAS::PRIVATE_ADDRESS: 401 return isLegalMUBUFAddressingMode(AM); 402 403 case AMDGPUAS::LOCAL_ADDRESS: 404 case AMDGPUAS::REGION_ADDRESS: { 405 // Basic, single offset DS instructions allow a 16-bit unsigned immediate 406 // field. 407 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have 408 // an 8-bit dword offset but we don't know the alignment here. 409 if (!isUInt<16>(AM.BaseOffs)) 410 return false; 411 412 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 413 return true; 414 415 if (AM.Scale == 1 && AM.HasBaseReg) 416 return true; 417 418 return false; 419 } 420 case AMDGPUAS::FLAT_ADDRESS: 421 case AMDGPUAS::UNKNOWN_ADDRESS_SPACE: 422 // For an unknown address space, this usually means that this is for some 423 // reason being used for pure arithmetic, and not based on some addressing 424 // computation. We don't have instructions that compute pointers with any 425 // addressing modes, so treat them as having no offset like flat 426 // instructions. 427 return isLegalFlatAddressingMode(AM); 428 429 default: 430 llvm_unreachable("unhandled address space"); 431 } 432 } 433 434 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 435 unsigned AddrSpace, 436 unsigned Align, 437 bool *IsFast) const { 438 if (IsFast) 439 *IsFast = false; 440 441 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, 442 // which isn't a simple VT. 443 // Until MVT is extended to handle this, simply check for the size and 444 // rely on the condition below: allow accesses if the size is a multiple of 4. 445 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && 446 VT.getStoreSize() > 16)) { 447 return false; 448 } 449 450 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || 451 AddrSpace == AMDGPUAS::REGION_ADDRESS) { 452 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte 453 // aligned, 8 byte access in a single operation using ds_read2/write2_b32 454 // with adjacent offsets. 455 bool AlignedBy4 = (Align % 4 == 0); 456 if (IsFast) 457 *IsFast = AlignedBy4; 458 459 return AlignedBy4; 460 } 461 462 // FIXME: We have to be conservative here and assume that flat operations 463 // will access scratch. If we had access to the IR function, then we 464 // could determine if any private memory was used in the function. 465 if (!Subtarget->hasUnalignedScratchAccess() && 466 (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS || 467 AddrSpace == AMDGPUAS::FLAT_ADDRESS)) { 468 return false; 469 } 470 471 if (Subtarget->hasUnalignedBufferAccess()) { 472 // If we have an uniform constant load, it still requires using a slow 473 // buffer instruction if unaligned. 474 if (IsFast) { 475 *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS) ? 476 (Align % 4 == 0) : true; 477 } 478 479 return true; 480 } 481 482 // Smaller than dword value must be aligned. 483 if (VT.bitsLT(MVT::i32)) 484 return false; 485 486 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the 487 // byte-address are ignored, thus forcing Dword alignment. 488 // This applies to private, global, and constant memory. 489 if (IsFast) 490 *IsFast = true; 491 492 return VT.bitsGT(MVT::i32) && Align % 4 == 0; 493 } 494 495 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, 496 unsigned SrcAlign, bool IsMemset, 497 bool ZeroMemset, 498 bool MemcpyStrSrc, 499 MachineFunction &MF) const { 500 // FIXME: Should account for address space here. 501 502 // The default fallback uses the private pointer size as a guess for a type to 503 // use. Make sure we switch these to 64-bit accesses. 504 505 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global 506 return MVT::v4i32; 507 508 if (Size >= 8 && DstAlign >= 4) 509 return MVT::v2i32; 510 511 // Use the default. 512 return MVT::Other; 513 } 514 515 static bool isFlatGlobalAddrSpace(unsigned AS) { 516 return AS == AMDGPUAS::GLOBAL_ADDRESS || 517 AS == AMDGPUAS::FLAT_ADDRESS || 518 AS == AMDGPUAS::CONSTANT_ADDRESS; 519 } 520 521 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, 522 unsigned DestAS) const { 523 return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS); 524 } 525 526 bool SITargetLowering::isMemOpUniform(const SDNode *N) const { 527 const MemSDNode *MemNode = cast<MemSDNode>(N); 528 const Value *Ptr = MemNode->getMemOperand()->getValue(); 529 530 // UndefValue means this is a load of a kernel input. These are uniform. 531 // Sometimes LDS instructions have constant pointers. 532 // If Ptr is null, then that means this mem operand contains a 533 // PseudoSourceValue like GOT. 534 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || 535 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) 536 return true; 537 538 const Instruction *I = dyn_cast<Instruction>(Ptr); 539 return I && I->getMetadata("amdgpu.uniform"); 540 } 541 542 TargetLoweringBase::LegalizeTypeAction 543 SITargetLowering::getPreferredVectorAction(EVT VT) const { 544 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) 545 return TypeSplitVector; 546 547 return TargetLoweringBase::getPreferredVectorAction(VT); 548 } 549 550 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 551 Type *Ty) const { 552 // FIXME: Could be smarter if called for vector constants. 553 return true; 554 } 555 556 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { 557 558 // i16 is not desirable unless it is a load or a store. 559 if (VT == MVT::i16 && Op != ISD::LOAD && Op != ISD::STORE) 560 return false; 561 562 // SimplifySetCC uses this function to determine whether or not it should 563 // create setcc with i1 operands. We don't have instructions for i1 setcc. 564 if (VT == MVT::i1 && Op == ISD::SETCC) 565 return false; 566 567 return TargetLowering::isTypeDesirableForOp(Op, VT); 568 } 569 570 SDValue SITargetLowering::LowerParameterPtr(SelectionDAG &DAG, 571 const SDLoc &SL, SDValue Chain, 572 unsigned Offset) const { 573 const DataLayout &DL = DAG.getDataLayout(); 574 MachineFunction &MF = DAG.getMachineFunction(); 575 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 576 unsigned InputPtrReg = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 577 578 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 579 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); 580 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, 581 MRI.getLiveInVirtReg(InputPtrReg), PtrVT); 582 return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, 583 DAG.getConstant(Offset, SL, PtrVT)); 584 } 585 SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, 586 const SDLoc &SL, SDValue Chain, 587 unsigned Offset, bool Signed) const { 588 const DataLayout &DL = DAG.getDataLayout(); 589 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 590 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 591 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 592 593 unsigned Align = DL.getABITypeAlignment(Ty); 594 595 SDValue Ptr = LowerParameterPtr(DAG, SL, Chain, Offset); 596 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align, 597 MachineMemOperand::MONonTemporal | 598 MachineMemOperand::MODereferenceable | 599 MachineMemOperand::MOInvariant); 600 601 SDValue Val; 602 if (MemVT.isFloatingPoint()) 603 Val = DAG.getNode(ISD::FP_EXTEND, SL, VT, Load); 604 else if (Signed) 605 Val = DAG.getSExtOrTrunc(Load, SL, VT); 606 else 607 Val = DAG.getZExtOrTrunc(Load, SL, VT); 608 609 SDValue Ops[] = { 610 Val, 611 Load.getValue(1) 612 }; 613 614 return DAG.getMergeValues(Ops, SL); 615 } 616 617 SDValue SITargetLowering::LowerFormalArguments( 618 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 619 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 620 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 621 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 622 623 MachineFunction &MF = DAG.getMachineFunction(); 624 FunctionType *FType = MF.getFunction()->getFunctionType(); 625 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 626 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 627 628 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { 629 const Function *Fn = MF.getFunction(); 630 DiagnosticInfoUnsupported NoGraphicsHSA( 631 *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); 632 DAG.getContext()->diagnose(NoGraphicsHSA); 633 return DAG.getEntryNode(); 634 } 635 636 // Create stack objects that are used for emitting debugger prologue if 637 // "amdgpu-debugger-emit-prologue" attribute was specified. 638 if (ST.debuggerEmitPrologue()) 639 createDebuggerPrologueStackObjects(MF); 640 641 SmallVector<ISD::InputArg, 16> Splits; 642 BitVector Skipped(Ins.size()); 643 644 for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) { 645 const ISD::InputArg &Arg = Ins[i]; 646 647 // First check if it's a PS input addr 648 if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() && 649 !Arg.Flags.isByVal() && PSInputNum <= 15) { 650 651 if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) { 652 // We can safely skip PS inputs 653 Skipped.set(i); 654 ++PSInputNum; 655 continue; 656 } 657 658 Info->markPSInputAllocated(PSInputNum); 659 if (Arg.Used) 660 Info->PSInputEna |= 1 << PSInputNum; 661 662 ++PSInputNum; 663 } 664 665 if (AMDGPU::isShader(CallConv)) { 666 // Second split vertices into their elements 667 if (Arg.VT.isVector()) { 668 ISD::InputArg NewArg = Arg; 669 NewArg.Flags.setSplit(); 670 NewArg.VT = Arg.VT.getVectorElementType(); 671 672 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a 673 // three or five element vertex only needs three or five registers, 674 // NOT four or eight. 675 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 676 unsigned NumElements = ParamType->getVectorNumElements(); 677 678 for (unsigned j = 0; j != NumElements; ++j) { 679 Splits.push_back(NewArg); 680 NewArg.PartOffset += NewArg.VT.getStoreSize(); 681 } 682 } else { 683 Splits.push_back(Arg); 684 } 685 } 686 } 687 688 SmallVector<CCValAssign, 16> ArgLocs; 689 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 690 *DAG.getContext()); 691 692 // At least one interpolation mode must be enabled or else the GPU will hang. 693 // 694 // Check PSInputAddr instead of PSInputEna. The idea is that if the user set 695 // PSInputAddr, the user wants to enable some bits after the compilation 696 // based on run-time states. Since we can't know what the final PSInputEna 697 // will look like, so we shouldn't do anything here and the user should take 698 // responsibility for the correct programming. 699 // 700 // Otherwise, the following restrictions apply: 701 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 702 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 703 // enabled too. 704 if (CallConv == CallingConv::AMDGPU_PS && 705 ((Info->getPSInputAddr() & 0x7F) == 0 || 706 ((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11)))) { 707 CCInfo.AllocateReg(AMDGPU::VGPR0); 708 CCInfo.AllocateReg(AMDGPU::VGPR1); 709 Info->markPSInputAllocated(0); 710 Info->PSInputEna |= 1; 711 } 712 713 if (!AMDGPU::isShader(CallConv)) { 714 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); 715 } else { 716 assert(!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr() && 717 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && 718 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && 719 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && 720 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && 721 !Info->hasWorkItemIDZ()); 722 } 723 724 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 725 if (Info->hasPrivateSegmentBuffer()) { 726 unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI); 727 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass); 728 CCInfo.AllocateReg(PrivateSegmentBufferReg); 729 } 730 731 if (Info->hasDispatchPtr()) { 732 unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI); 733 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SReg_64RegClass); 734 CCInfo.AllocateReg(DispatchPtrReg); 735 } 736 737 if (Info->hasQueuePtr()) { 738 unsigned QueuePtrReg = Info->addQueuePtr(*TRI); 739 MF.addLiveIn(QueuePtrReg, &AMDGPU::SReg_64RegClass); 740 CCInfo.AllocateReg(QueuePtrReg); 741 } 742 743 if (Info->hasKernargSegmentPtr()) { 744 unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI); 745 MF.addLiveIn(InputPtrReg, &AMDGPU::SReg_64RegClass); 746 CCInfo.AllocateReg(InputPtrReg); 747 } 748 749 if (Info->hasDispatchID()) { 750 unsigned DispatchIDReg = Info->addDispatchID(*TRI); 751 MF.addLiveIn(DispatchIDReg, &AMDGPU::SReg_64RegClass); 752 CCInfo.AllocateReg(DispatchIDReg); 753 } 754 755 if (Info->hasFlatScratchInit()) { 756 unsigned FlatScratchInitReg = Info->addFlatScratchInit(*TRI); 757 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SReg_64RegClass); 758 CCInfo.AllocateReg(FlatScratchInitReg); 759 } 760 761 if (!AMDGPU::isShader(CallConv)) 762 analyzeFormalArgumentsCompute(CCInfo, Ins); 763 else 764 AnalyzeFormalArguments(CCInfo, Splits); 765 766 SmallVector<SDValue, 16> Chains; 767 768 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { 769 770 const ISD::InputArg &Arg = Ins[i]; 771 if (Skipped[i]) { 772 InVals.push_back(DAG.getUNDEF(Arg.VT)); 773 continue; 774 } 775 776 CCValAssign &VA = ArgLocs[ArgIdx++]; 777 MVT VT = VA.getLocVT(); 778 779 if (VA.isMemLoc()) { 780 VT = Ins[i].VT; 781 EVT MemVT = VA.getLocVT(); 782 const unsigned Offset = Subtarget->getExplicitKernelArgOffset() + 783 VA.getLocMemOffset(); 784 // The first 36 bytes of the input buffer contains information about 785 // thread group and global sizes. 786 SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, Chain, 787 Offset, Ins[i].Flags.isSExt()); 788 Chains.push_back(Arg.getValue(1)); 789 790 auto *ParamTy = 791 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); 792 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 793 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 794 // On SI local pointers are just offsets into LDS, so they are always 795 // less than 16-bits. On CI and newer they could potentially be 796 // real pointers, so we can't guarantee their size. 797 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, 798 DAG.getValueType(MVT::i16)); 799 } 800 801 InVals.push_back(Arg); 802 Info->setABIArgOffset(Offset + MemVT.getStoreSize()); 803 continue; 804 } 805 assert(VA.isRegLoc() && "Parameter must be in a register!"); 806 807 unsigned Reg = VA.getLocReg(); 808 809 if (VT == MVT::i64) { 810 // For now assume it is a pointer 811 Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, 812 &AMDGPU::SReg_64RegClass); 813 Reg = MF.addLiveIn(Reg, &AMDGPU::SReg_64RegClass); 814 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 815 InVals.push_back(Copy); 816 continue; 817 } 818 819 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 820 821 Reg = MF.addLiveIn(Reg, RC); 822 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); 823 824 if (Arg.VT.isVector()) { 825 826 // Build a vector from the registers 827 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 828 unsigned NumElements = ParamType->getVectorNumElements(); 829 830 SmallVector<SDValue, 4> Regs; 831 Regs.push_back(Val); 832 for (unsigned j = 1; j != NumElements; ++j) { 833 Reg = ArgLocs[ArgIdx++].getLocReg(); 834 Reg = MF.addLiveIn(Reg, RC); 835 836 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 837 Regs.push_back(Copy); 838 } 839 840 // Fill up the missing vector elements 841 NumElements = Arg.VT.getVectorNumElements() - NumElements; 842 Regs.append(NumElements, DAG.getUNDEF(VT)); 843 844 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs)); 845 continue; 846 } 847 848 InVals.push_back(Val); 849 } 850 851 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 852 // these from the dispatch pointer. 853 854 // Start adding system SGPRs. 855 if (Info->hasWorkGroupIDX()) { 856 unsigned Reg = Info->addWorkGroupIDX(); 857 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 858 CCInfo.AllocateReg(Reg); 859 } 860 861 if (Info->hasWorkGroupIDY()) { 862 unsigned Reg = Info->addWorkGroupIDY(); 863 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 864 CCInfo.AllocateReg(Reg); 865 } 866 867 if (Info->hasWorkGroupIDZ()) { 868 unsigned Reg = Info->addWorkGroupIDZ(); 869 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 870 CCInfo.AllocateReg(Reg); 871 } 872 873 if (Info->hasWorkGroupInfo()) { 874 unsigned Reg = Info->addWorkGroupInfo(); 875 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 876 CCInfo.AllocateReg(Reg); 877 } 878 879 if (Info->hasPrivateSegmentWaveByteOffset()) { 880 // Scratch wave offset passed in system SGPR. 881 unsigned PrivateSegmentWaveByteOffsetReg; 882 883 if (AMDGPU::isShader(CallConv)) { 884 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); 885 Info->setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); 886 } else 887 PrivateSegmentWaveByteOffsetReg = Info->addPrivateSegmentWaveByteOffset(); 888 889 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); 890 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); 891 } 892 893 // Now that we've figured out where the scratch register inputs are, see if 894 // should reserve the arguments and use them directly. 895 bool HasStackObjects = MF.getFrameInfo().hasStackObjects(); 896 // Record that we know we have non-spill stack objects so we don't need to 897 // check all stack objects later. 898 if (HasStackObjects) 899 Info->setHasNonSpillStackObjects(true); 900 901 // Everything live out of a block is spilled with fast regalloc, so it's 902 // almost certain that spilling will be required. 903 if (getTargetMachine().getOptLevel() == CodeGenOpt::None) 904 HasStackObjects = true; 905 906 if (ST.isAmdCodeObjectV2()) { 907 if (HasStackObjects) { 908 // If we have stack objects, we unquestionably need the private buffer 909 // resource. For the Code Object V2 ABI, this will be the first 4 user 910 // SGPR inputs. We can reserve those and use them directly. 911 912 unsigned PrivateSegmentBufferReg = TRI->getPreloadedValue( 913 MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); 914 Info->setScratchRSrcReg(PrivateSegmentBufferReg); 915 916 unsigned PrivateSegmentWaveByteOffsetReg = TRI->getPreloadedValue( 917 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 918 Info->setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg); 919 } else { 920 unsigned ReservedBufferReg 921 = TRI->reservedPrivateSegmentBufferReg(MF); 922 unsigned ReservedOffsetReg 923 = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 924 925 // We tentatively reserve the last registers (skipping the last two 926 // which may contain VCC). After register allocation, we'll replace 927 // these with the ones immediately after those which were really 928 // allocated. In the prologue copies will be inserted from the argument 929 // to these reserved registers. 930 Info->setScratchRSrcReg(ReservedBufferReg); 931 Info->setScratchWaveOffsetReg(ReservedOffsetReg); 932 } 933 } else { 934 unsigned ReservedBufferReg = TRI->reservedPrivateSegmentBufferReg(MF); 935 936 // Without HSA, relocations are used for the scratch pointer and the 937 // buffer resource setup is always inserted in the prologue. Scratch wave 938 // offset is still in an input SGPR. 939 Info->setScratchRSrcReg(ReservedBufferReg); 940 941 if (HasStackObjects) { 942 unsigned ScratchWaveOffsetReg = TRI->getPreloadedValue( 943 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 944 Info->setScratchWaveOffsetReg(ScratchWaveOffsetReg); 945 } else { 946 unsigned ReservedOffsetReg 947 = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 948 Info->setScratchWaveOffsetReg(ReservedOffsetReg); 949 } 950 } 951 952 if (Info->hasWorkItemIDX()) { 953 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X); 954 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 955 CCInfo.AllocateReg(Reg); 956 } 957 958 if (Info->hasWorkItemIDY()) { 959 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y); 960 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 961 CCInfo.AllocateReg(Reg); 962 } 963 964 if (Info->hasWorkItemIDZ()) { 965 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z); 966 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 967 CCInfo.AllocateReg(Reg); 968 } 969 970 if (Chains.empty()) 971 return Chain; 972 973 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 974 } 975 976 SDValue 977 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 978 bool isVarArg, 979 const SmallVectorImpl<ISD::OutputArg> &Outs, 980 const SmallVectorImpl<SDValue> &OutVals, 981 const SDLoc &DL, SelectionDAG &DAG) const { 982 MachineFunction &MF = DAG.getMachineFunction(); 983 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 984 985 if (!AMDGPU::isShader(CallConv)) 986 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, 987 OutVals, DL, DAG); 988 989 Info->setIfReturnsVoid(Outs.size() == 0); 990 991 SmallVector<ISD::OutputArg, 48> Splits; 992 SmallVector<SDValue, 48> SplitVals; 993 994 // Split vectors into their elements. 995 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 996 const ISD::OutputArg &Out = Outs[i]; 997 998 if (Out.VT.isVector()) { 999 MVT VT = Out.VT.getVectorElementType(); 1000 ISD::OutputArg NewOut = Out; 1001 NewOut.Flags.setSplit(); 1002 NewOut.VT = VT; 1003 1004 // We want the original number of vector elements here, e.g. 1005 // three or five, not four or eight. 1006 unsigned NumElements = Out.ArgVT.getVectorNumElements(); 1007 1008 for (unsigned j = 0; j != NumElements; ++j) { 1009 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i], 1010 DAG.getConstant(j, DL, MVT::i32)); 1011 SplitVals.push_back(Elem); 1012 Splits.push_back(NewOut); 1013 NewOut.PartOffset += NewOut.VT.getStoreSize(); 1014 } 1015 } else { 1016 SplitVals.push_back(OutVals[i]); 1017 Splits.push_back(Out); 1018 } 1019 } 1020 1021 // CCValAssign - represent the assignment of the return value to a location. 1022 SmallVector<CCValAssign, 48> RVLocs; 1023 1024 // CCState - Info about the registers and stack slots. 1025 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1026 *DAG.getContext()); 1027 1028 // Analyze outgoing return values. 1029 AnalyzeReturn(CCInfo, Splits); 1030 1031 SDValue Flag; 1032 SmallVector<SDValue, 48> RetOps; 1033 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1034 1035 // Copy the result values into the output registers. 1036 for (unsigned i = 0, realRVLocIdx = 0; 1037 i != RVLocs.size(); 1038 ++i, ++realRVLocIdx) { 1039 CCValAssign &VA = RVLocs[i]; 1040 assert(VA.isRegLoc() && "Can only return in registers!"); 1041 1042 SDValue Arg = SplitVals[realRVLocIdx]; 1043 1044 // Copied from other backends. 1045 switch (VA.getLocInfo()) { 1046 default: llvm_unreachable("Unknown loc info!"); 1047 case CCValAssign::Full: 1048 break; 1049 case CCValAssign::BCvt: 1050 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 1051 break; 1052 } 1053 1054 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 1055 Flag = Chain.getValue(1); 1056 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1057 } 1058 1059 // Update chain and glue. 1060 RetOps[0] = Chain; 1061 if (Flag.getNode()) 1062 RetOps.push_back(Flag); 1063 1064 unsigned Opc = Info->returnsVoid() ? AMDGPUISD::ENDPGM : AMDGPUISD::RETURN; 1065 return DAG.getNode(Opc, DL, MVT::Other, RetOps); 1066 } 1067 1068 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT, 1069 SelectionDAG &DAG) const { 1070 unsigned Reg = StringSwitch<unsigned>(RegName) 1071 .Case("m0", AMDGPU::M0) 1072 .Case("exec", AMDGPU::EXEC) 1073 .Case("exec_lo", AMDGPU::EXEC_LO) 1074 .Case("exec_hi", AMDGPU::EXEC_HI) 1075 .Case("flat_scratch", AMDGPU::FLAT_SCR) 1076 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) 1077 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) 1078 .Default(AMDGPU::NoRegister); 1079 1080 if (Reg == AMDGPU::NoRegister) { 1081 report_fatal_error(Twine("invalid register name \"" 1082 + StringRef(RegName) + "\".")); 1083 1084 } 1085 1086 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 1087 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { 1088 report_fatal_error(Twine("invalid register \"" 1089 + StringRef(RegName) + "\" for subtarget.")); 1090 } 1091 1092 switch (Reg) { 1093 case AMDGPU::M0: 1094 case AMDGPU::EXEC_LO: 1095 case AMDGPU::EXEC_HI: 1096 case AMDGPU::FLAT_SCR_LO: 1097 case AMDGPU::FLAT_SCR_HI: 1098 if (VT.getSizeInBits() == 32) 1099 return Reg; 1100 break; 1101 case AMDGPU::EXEC: 1102 case AMDGPU::FLAT_SCR: 1103 if (VT.getSizeInBits() == 64) 1104 return Reg; 1105 break; 1106 default: 1107 llvm_unreachable("missing register type checking"); 1108 } 1109 1110 report_fatal_error(Twine("invalid type for register \"" 1111 + StringRef(RegName) + "\".")); 1112 } 1113 1114 // If kill is not the last instruction, split the block so kill is always a 1115 // proper terminator. 1116 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI, 1117 MachineBasicBlock *BB) const { 1118 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1119 1120 MachineBasicBlock::iterator SplitPoint(&MI); 1121 ++SplitPoint; 1122 1123 if (SplitPoint == BB->end()) { 1124 // Don't bother with a new block. 1125 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR)); 1126 return BB; 1127 } 1128 1129 MachineFunction *MF = BB->getParent(); 1130 MachineBasicBlock *SplitBB 1131 = MF->CreateMachineBasicBlock(BB->getBasicBlock()); 1132 1133 MF->insert(++MachineFunction::iterator(BB), SplitBB); 1134 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end()); 1135 1136 SplitBB->transferSuccessorsAndUpdatePHIs(BB); 1137 BB->addSuccessor(SplitBB); 1138 1139 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR)); 1140 return SplitBB; 1141 } 1142 1143 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the 1144 // wavefront. If the value is uniform and just happens to be in a VGPR, this 1145 // will only do one iteration. In the worst case, this will loop 64 times. 1146 // 1147 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. 1148 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop( 1149 const SIInstrInfo *TII, 1150 MachineRegisterInfo &MRI, 1151 MachineBasicBlock &OrigBB, 1152 MachineBasicBlock &LoopBB, 1153 const DebugLoc &DL, 1154 const MachineOperand &IdxReg, 1155 unsigned InitReg, 1156 unsigned ResultReg, 1157 unsigned PhiReg, 1158 unsigned InitSaveExecReg, 1159 int Offset, 1160 bool UseGPRIdxMode) { 1161 MachineBasicBlock::iterator I = LoopBB.begin(); 1162 1163 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1164 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1165 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1166 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1167 1168 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) 1169 .addReg(InitReg) 1170 .addMBB(&OrigBB) 1171 .addReg(ResultReg) 1172 .addMBB(&LoopBB); 1173 1174 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) 1175 .addReg(InitSaveExecReg) 1176 .addMBB(&OrigBB) 1177 .addReg(NewExec) 1178 .addMBB(&LoopBB); 1179 1180 // Read the next variant <- also loop target. 1181 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) 1182 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef())); 1183 1184 // Compare the just read M0 value to all possible Idx values. 1185 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) 1186 .addReg(CurrentIdxReg) 1187 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg()); 1188 1189 if (UseGPRIdxMode) { 1190 unsigned IdxReg; 1191 if (Offset == 0) { 1192 IdxReg = CurrentIdxReg; 1193 } else { 1194 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1195 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg) 1196 .addReg(CurrentIdxReg, RegState::Kill) 1197 .addImm(Offset); 1198 } 1199 1200 MachineInstr *SetIdx = 1201 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_IDX)) 1202 .addReg(IdxReg, RegState::Kill); 1203 SetIdx->getOperand(2).setIsUndef(); 1204 } else { 1205 // Move index from VCC into M0 1206 if (Offset == 0) { 1207 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1208 .addReg(CurrentIdxReg, RegState::Kill); 1209 } else { 1210 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 1211 .addReg(CurrentIdxReg, RegState::Kill) 1212 .addImm(Offset); 1213 } 1214 } 1215 1216 // Update EXEC, save the original EXEC value to VCC. 1217 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec) 1218 .addReg(CondReg, RegState::Kill); 1219 1220 MRI.setSimpleHint(NewExec, CondReg); 1221 1222 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 1223 MachineInstr *InsertPt = 1224 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 1225 .addReg(AMDGPU::EXEC) 1226 .addReg(NewExec); 1227 1228 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use 1229 // s_cbranch_scc0? 1230 1231 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. 1232 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 1233 .addMBB(&LoopBB); 1234 1235 return InsertPt->getIterator(); 1236 } 1237 1238 // This has slightly sub-optimal regalloc when the source vector is killed by 1239 // the read. The register allocator does not understand that the kill is 1240 // per-workitem, so is kept alive for the whole loop so we end up not re-using a 1241 // subregister from it, using 1 more VGPR than necessary. This was saved when 1242 // this was expanded after register allocation. 1243 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII, 1244 MachineBasicBlock &MBB, 1245 MachineInstr &MI, 1246 unsigned InitResultReg, 1247 unsigned PhiReg, 1248 int Offset, 1249 bool UseGPRIdxMode) { 1250 MachineFunction *MF = MBB.getParent(); 1251 MachineRegisterInfo &MRI = MF->getRegInfo(); 1252 const DebugLoc &DL = MI.getDebugLoc(); 1253 MachineBasicBlock::iterator I(&MI); 1254 1255 unsigned DstReg = MI.getOperand(0).getReg(); 1256 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1257 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1258 1259 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); 1260 1261 // Save the EXEC mask 1262 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec) 1263 .addReg(AMDGPU::EXEC); 1264 1265 // To insert the loop we need to split the block. Move everything after this 1266 // point to a new block, and insert a new empty block between the two. 1267 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); 1268 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); 1269 MachineFunction::iterator MBBI(MBB); 1270 ++MBBI; 1271 1272 MF->insert(MBBI, LoopBB); 1273 MF->insert(MBBI, RemainderBB); 1274 1275 LoopBB->addSuccessor(LoopBB); 1276 LoopBB->addSuccessor(RemainderBB); 1277 1278 // Move the rest of the block into a new block. 1279 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 1280 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 1281 1282 MBB.addSuccessor(LoopBB); 1283 1284 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1285 1286 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, 1287 InitResultReg, DstReg, PhiReg, TmpExec, 1288 Offset, UseGPRIdxMode); 1289 1290 MachineBasicBlock::iterator First = RemainderBB->begin(); 1291 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 1292 .addReg(SaveExec); 1293 1294 return InsPt; 1295 } 1296 1297 // Returns subreg index, offset 1298 static std::pair<unsigned, int> 1299 computeIndirectRegAndOffset(const SIRegisterInfo &TRI, 1300 const TargetRegisterClass *SuperRC, 1301 unsigned VecReg, 1302 int Offset) { 1303 int NumElts = SuperRC->getSize() / 4; 1304 1305 // Skip out of bounds offsets, or else we would end up using an undefined 1306 // register. 1307 if (Offset >= NumElts || Offset < 0) 1308 return std::make_pair(AMDGPU::sub0, Offset); 1309 1310 return std::make_pair(AMDGPU::sub0 + Offset, 0); 1311 } 1312 1313 // Return true if the index is an SGPR and was set. 1314 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, 1315 MachineRegisterInfo &MRI, 1316 MachineInstr &MI, 1317 int Offset, 1318 bool UseGPRIdxMode, 1319 bool IsIndirectSrc) { 1320 MachineBasicBlock *MBB = MI.getParent(); 1321 const DebugLoc &DL = MI.getDebugLoc(); 1322 MachineBasicBlock::iterator I(&MI); 1323 1324 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1325 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); 1326 1327 assert(Idx->getReg() != AMDGPU::NoRegister); 1328 1329 if (!TII->getRegisterInfo().isSGPRClass(IdxRC)) 1330 return false; 1331 1332 if (UseGPRIdxMode) { 1333 unsigned IdxMode = IsIndirectSrc ? 1334 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE; 1335 if (Offset == 0) { 1336 MachineInstr *SetOn = 1337 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1338 .addOperand(*Idx) 1339 .addImm(IdxMode); 1340 1341 SetOn->getOperand(3).setIsUndef(); 1342 } else { 1343 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 1344 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) 1345 .addOperand(*Idx) 1346 .addImm(Offset); 1347 MachineInstr *SetOn = 1348 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1349 .addReg(Tmp, RegState::Kill) 1350 .addImm(IdxMode); 1351 1352 SetOn->getOperand(3).setIsUndef(); 1353 } 1354 1355 return true; 1356 } 1357 1358 if (Offset == 0) { 1359 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1360 .addOperand(*Idx); 1361 } else { 1362 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 1363 .addOperand(*Idx) 1364 .addImm(Offset); 1365 } 1366 1367 return true; 1368 } 1369 1370 // Control flow needs to be inserted if indexing with a VGPR. 1371 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, 1372 MachineBasicBlock &MBB, 1373 const SISubtarget &ST) { 1374 const SIInstrInfo *TII = ST.getInstrInfo(); 1375 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1376 MachineFunction *MF = MBB.getParent(); 1377 MachineRegisterInfo &MRI = MF->getRegInfo(); 1378 1379 unsigned Dst = MI.getOperand(0).getReg(); 1380 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); 1381 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 1382 1383 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); 1384 1385 unsigned SubReg; 1386 std::tie(SubReg, Offset) 1387 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); 1388 1389 bool UseGPRIdxMode = ST.hasVGPRIndexMode() && EnableVGPRIndexMode; 1390 1391 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) { 1392 MachineBasicBlock::iterator I(&MI); 1393 const DebugLoc &DL = MI.getDebugLoc(); 1394 1395 if (UseGPRIdxMode) { 1396 // TODO: Look at the uses to avoid the copy. This may require rescheduling 1397 // to avoid interfering with other uses, so probably requires a new 1398 // optimization pass. 1399 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 1400 .addReg(SrcReg, RegState::Undef, SubReg) 1401 .addReg(SrcReg, RegState::Implicit) 1402 .addReg(AMDGPU::M0, RegState::Implicit); 1403 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1404 } else { 1405 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 1406 .addReg(SrcReg, RegState::Undef, SubReg) 1407 .addReg(SrcReg, RegState::Implicit); 1408 } 1409 1410 MI.eraseFromParent(); 1411 1412 return &MBB; 1413 } 1414 1415 1416 const DebugLoc &DL = MI.getDebugLoc(); 1417 MachineBasicBlock::iterator I(&MI); 1418 1419 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1420 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1421 1422 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); 1423 1424 if (UseGPRIdxMode) { 1425 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1426 .addImm(0) // Reset inside loop. 1427 .addImm(VGPRIndexMode::SRC0_ENABLE); 1428 SetOn->getOperand(3).setIsUndef(); 1429 1430 // Disable again after the loop. 1431 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1432 } 1433 1434 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, UseGPRIdxMode); 1435 MachineBasicBlock *LoopBB = InsPt->getParent(); 1436 1437 if (UseGPRIdxMode) { 1438 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 1439 .addReg(SrcReg, RegState::Undef, SubReg) 1440 .addReg(SrcReg, RegState::Implicit) 1441 .addReg(AMDGPU::M0, RegState::Implicit); 1442 } else { 1443 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 1444 .addReg(SrcReg, RegState::Undef, SubReg) 1445 .addReg(SrcReg, RegState::Implicit); 1446 } 1447 1448 MI.eraseFromParent(); 1449 1450 return LoopBB; 1451 } 1452 1453 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, 1454 MachineBasicBlock &MBB, 1455 const SISubtarget &ST) { 1456 const SIInstrInfo *TII = ST.getInstrInfo(); 1457 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1458 MachineFunction *MF = MBB.getParent(); 1459 MachineRegisterInfo &MRI = MF->getRegInfo(); 1460 1461 unsigned Dst = MI.getOperand(0).getReg(); 1462 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); 1463 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1464 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); 1465 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 1466 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); 1467 1468 // This can be an immediate, but will be folded later. 1469 assert(Val->getReg()); 1470 1471 unsigned SubReg; 1472 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, 1473 SrcVec->getReg(), 1474 Offset); 1475 bool UseGPRIdxMode = ST.hasVGPRIndexMode() && EnableVGPRIndexMode; 1476 1477 if (Idx->getReg() == AMDGPU::NoRegister) { 1478 MachineBasicBlock::iterator I(&MI); 1479 const DebugLoc &DL = MI.getDebugLoc(); 1480 1481 assert(Offset == 0); 1482 1483 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) 1484 .addOperand(*SrcVec) 1485 .addOperand(*Val) 1486 .addImm(SubReg); 1487 1488 MI.eraseFromParent(); 1489 return &MBB; 1490 } 1491 1492 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) { 1493 MachineBasicBlock::iterator I(&MI); 1494 const DebugLoc &DL = MI.getDebugLoc(); 1495 1496 if (UseGPRIdxMode) { 1497 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 1498 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst 1499 .addOperand(*Val) 1500 .addReg(Dst, RegState::ImplicitDefine) 1501 .addReg(SrcVec->getReg(), RegState::Implicit) 1502 .addReg(AMDGPU::M0, RegState::Implicit); 1503 1504 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1505 } else { 1506 const MCInstrDesc &MovRelDesc = TII->get(AMDGPU::V_MOVRELD_B32_e32); 1507 1508 MachineInstr *MovRel = 1509 BuildMI(MBB, I, DL, MovRelDesc) 1510 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst 1511 .addOperand(*Val) 1512 .addReg(Dst, RegState::ImplicitDefine) 1513 .addReg(SrcVec->getReg(), RegState::Implicit); 1514 1515 const int ImpDefIdx = MovRelDesc.getNumOperands() + 1516 MovRelDesc.getNumImplicitUses(); 1517 const int ImpUseIdx = ImpDefIdx + 1; 1518 1519 MovRel->tieOperands(ImpDefIdx, ImpUseIdx); 1520 } 1521 1522 MI.eraseFromParent(); 1523 return &MBB; 1524 } 1525 1526 if (Val->isReg()) 1527 MRI.clearKillFlags(Val->getReg()); 1528 1529 const DebugLoc &DL = MI.getDebugLoc(); 1530 1531 if (UseGPRIdxMode) { 1532 MachineBasicBlock::iterator I(&MI); 1533 1534 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1535 .addImm(0) // Reset inside loop. 1536 .addImm(VGPRIndexMode::DST_ENABLE); 1537 SetOn->getOperand(3).setIsUndef(); 1538 1539 // Disable again after the loop. 1540 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1541 } 1542 1543 unsigned PhiReg = MRI.createVirtualRegister(VecRC); 1544 1545 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, 1546 Offset, UseGPRIdxMode); 1547 MachineBasicBlock *LoopBB = InsPt->getParent(); 1548 1549 if (UseGPRIdxMode) { 1550 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 1551 .addReg(PhiReg, RegState::Undef, SubReg) // vdst 1552 .addOperand(*Val) // src0 1553 .addReg(Dst, RegState::ImplicitDefine) 1554 .addReg(PhiReg, RegState::Implicit) 1555 .addReg(AMDGPU::M0, RegState::Implicit); 1556 } else { 1557 const MCInstrDesc &MovRelDesc = TII->get(AMDGPU::V_MOVRELD_B32_e32); 1558 // vdst is not actually read and just provides the base register index. 1559 MachineInstr *MovRel = 1560 BuildMI(*LoopBB, InsPt, DL, MovRelDesc) 1561 .addReg(PhiReg, RegState::Undef, SubReg) // vdst 1562 .addOperand(*Val) 1563 .addReg(Dst, RegState::ImplicitDefine) 1564 .addReg(PhiReg, RegState::Implicit); 1565 1566 const int ImpDefIdx = MovRelDesc.getNumOperands() + 1567 MovRelDesc.getNumImplicitUses(); 1568 const int ImpUseIdx = ImpDefIdx + 1; 1569 1570 MovRel->tieOperands(ImpDefIdx, ImpUseIdx); 1571 } 1572 1573 MI.eraseFromParent(); 1574 1575 return LoopBB; 1576 } 1577 1578 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( 1579 MachineInstr &MI, MachineBasicBlock *BB) const { 1580 switch (MI.getOpcode()) { 1581 case AMDGPU::SI_INIT_M0: { 1582 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1583 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), 1584 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1585 .addOperand(MI.getOperand(0)); 1586 MI.eraseFromParent(); 1587 return BB; 1588 } 1589 case AMDGPU::GET_GROUPSTATICSIZE: { 1590 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1591 1592 MachineFunction *MF = BB->getParent(); 1593 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1594 DebugLoc DL = MI.getDebugLoc(); 1595 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) 1596 .addOperand(MI.getOperand(0)) 1597 .addImm(MFI->getLDSSize()); 1598 MI.eraseFromParent(); 1599 return BB; 1600 } 1601 case AMDGPU::SI_INDIRECT_SRC_V1: 1602 case AMDGPU::SI_INDIRECT_SRC_V2: 1603 case AMDGPU::SI_INDIRECT_SRC_V4: 1604 case AMDGPU::SI_INDIRECT_SRC_V8: 1605 case AMDGPU::SI_INDIRECT_SRC_V16: 1606 return emitIndirectSrc(MI, *BB, *getSubtarget()); 1607 case AMDGPU::SI_INDIRECT_DST_V1: 1608 case AMDGPU::SI_INDIRECT_DST_V2: 1609 case AMDGPU::SI_INDIRECT_DST_V4: 1610 case AMDGPU::SI_INDIRECT_DST_V8: 1611 case AMDGPU::SI_INDIRECT_DST_V16: 1612 return emitIndirectDst(MI, *BB, *getSubtarget()); 1613 case AMDGPU::SI_KILL: 1614 return splitKillBlock(MI, BB); 1615 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 1616 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 1617 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1618 1619 unsigned Dst = MI.getOperand(0).getReg(); 1620 unsigned Src0 = MI.getOperand(1).getReg(); 1621 unsigned Src1 = MI.getOperand(2).getReg(); 1622 const DebugLoc &DL = MI.getDebugLoc(); 1623 unsigned SrcCond = MI.getOperand(3).getReg(); 1624 1625 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1626 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1627 1628 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 1629 .addReg(Src0, 0, AMDGPU::sub0) 1630 .addReg(Src1, 0, AMDGPU::sub0) 1631 .addReg(SrcCond); 1632 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 1633 .addReg(Src0, 0, AMDGPU::sub1) 1634 .addReg(Src1, 0, AMDGPU::sub1) 1635 .addReg(SrcCond); 1636 1637 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) 1638 .addReg(DstLo) 1639 .addImm(AMDGPU::sub0) 1640 .addReg(DstHi) 1641 .addImm(AMDGPU::sub1); 1642 MI.eraseFromParent(); 1643 return BB; 1644 } 1645 default: 1646 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 1647 } 1648 } 1649 1650 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1651 // This currently forces unfolding various combinations of fsub into fma with 1652 // free fneg'd operands. As long as we have fast FMA (controlled by 1653 // isFMAFasterThanFMulAndFAdd), we should perform these. 1654 1655 // When fma is quarter rate, for f64 where add / sub are at best half rate, 1656 // most of these combines appear to be cycle neutral but save on instruction 1657 // count / code size. 1658 return true; 1659 } 1660 1661 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, 1662 EVT VT) const { 1663 if (!VT.isVector()) { 1664 return MVT::i1; 1665 } 1666 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); 1667 } 1668 1669 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT) const { 1670 return MVT::i32; 1671 } 1672 1673 // Answering this is somewhat tricky and depends on the specific device which 1674 // have different rates for fma or all f64 operations. 1675 // 1676 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other 1677 // regardless of which device (although the number of cycles differs between 1678 // devices), so it is always profitable for f64. 1679 // 1680 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable 1681 // only on full rate devices. Normally, we should prefer selecting v_mad_f32 1682 // which we can always do even without fused FP ops since it returns the same 1683 // result as the separate operations and since it is always full 1684 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 1685 // however does not support denormals, so we do report fma as faster if we have 1686 // a fast fma device and require denormals. 1687 // 1688 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 1689 VT = VT.getScalarType(); 1690 1691 if (!VT.isSimple()) 1692 return false; 1693 1694 switch (VT.getSimpleVT().SimpleTy) { 1695 case MVT::f32: 1696 // This is as fast on some subtargets. However, we always have full rate f32 1697 // mad available which returns the same result as the separate operations 1698 // which we should prefer over fma. We can't use this if we want to support 1699 // denormals, so only report this in these cases. 1700 return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32(); 1701 case MVT::f64: 1702 return true; 1703 default: 1704 break; 1705 } 1706 1707 return false; 1708 } 1709 1710 //===----------------------------------------------------------------------===// 1711 // Custom DAG Lowering Operations 1712 //===----------------------------------------------------------------------===// 1713 1714 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 1715 switch (Op.getOpcode()) { 1716 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 1717 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 1718 case ISD::LOAD: { 1719 SDValue Result = LowerLOAD(Op, DAG); 1720 assert((!Result.getNode() || 1721 Result.getNode()->getNumValues() == 2) && 1722 "Load should return a value and a chain"); 1723 return Result; 1724 } 1725 1726 case ISD::FSIN: 1727 case ISD::FCOS: 1728 return LowerTrig(Op, DAG); 1729 case ISD::SELECT: return LowerSELECT(Op, DAG); 1730 case ISD::FDIV: return LowerFDIV(Op, DAG); 1731 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); 1732 case ISD::STORE: return LowerSTORE(Op, DAG); 1733 case ISD::GlobalAddress: { 1734 MachineFunction &MF = DAG.getMachineFunction(); 1735 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1736 return LowerGlobalAddress(MFI, Op, DAG); 1737 } 1738 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 1739 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 1740 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 1741 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); 1742 case ISD::TRAP: return lowerTRAP(Op, DAG); 1743 } 1744 return SDValue(); 1745 } 1746 1747 /// \brief Helper function for LowerBRCOND 1748 static SDNode *findUser(SDValue Value, unsigned Opcode) { 1749 1750 SDNode *Parent = Value.getNode(); 1751 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); 1752 I != E; ++I) { 1753 1754 if (I.getUse().get() != Value) 1755 continue; 1756 1757 if (I->getOpcode() == Opcode) 1758 return *I; 1759 } 1760 return nullptr; 1761 } 1762 1763 bool SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { 1764 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 1765 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { 1766 case AMDGPUIntrinsic::amdgcn_if: 1767 case AMDGPUIntrinsic::amdgcn_else: 1768 case AMDGPUIntrinsic::amdgcn_end_cf: 1769 case AMDGPUIntrinsic::amdgcn_loop: 1770 return true; 1771 default: 1772 return false; 1773 } 1774 } 1775 1776 if (Intr->getOpcode() == ISD::INTRINSIC_WO_CHAIN) { 1777 switch (cast<ConstantSDNode>(Intr->getOperand(0))->getZExtValue()) { 1778 case AMDGPUIntrinsic::amdgcn_break: 1779 case AMDGPUIntrinsic::amdgcn_if_break: 1780 case AMDGPUIntrinsic::amdgcn_else_break: 1781 return true; 1782 default: 1783 return false; 1784 } 1785 } 1786 1787 return false; 1788 } 1789 1790 void SITargetLowering::createDebuggerPrologueStackObjects( 1791 MachineFunction &MF) const { 1792 // Create stack objects that are used for emitting debugger prologue. 1793 // 1794 // Debugger prologue writes work group IDs and work item IDs to scratch memory 1795 // at fixed location in the following format: 1796 // offset 0: work group ID x 1797 // offset 4: work group ID y 1798 // offset 8: work group ID z 1799 // offset 16: work item ID x 1800 // offset 20: work item ID y 1801 // offset 24: work item ID z 1802 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1803 int ObjectIdx = 0; 1804 1805 // For each dimension: 1806 for (unsigned i = 0; i < 3; ++i) { 1807 // Create fixed stack object for work group ID. 1808 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true); 1809 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx); 1810 // Create fixed stack object for work item ID. 1811 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true); 1812 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx); 1813 } 1814 } 1815 1816 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { 1817 const Triple &TT = getTargetMachine().getTargetTriple(); 1818 return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS && 1819 AMDGPU::shouldEmitConstantsToTextSection(TT); 1820 } 1821 1822 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { 1823 return (GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || 1824 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) && 1825 !shouldEmitFixup(GV) && 1826 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 1827 } 1828 1829 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { 1830 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); 1831 } 1832 1833 /// This transforms the control flow intrinsics to get the branch destination as 1834 /// last parameter, also switches branch target with BR if the need arise 1835 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, 1836 SelectionDAG &DAG) const { 1837 1838 SDLoc DL(BRCOND); 1839 1840 SDNode *Intr = BRCOND.getOperand(1).getNode(); 1841 SDValue Target = BRCOND.getOperand(2); 1842 SDNode *BR = nullptr; 1843 SDNode *SetCC = nullptr; 1844 1845 if (Intr->getOpcode() == ISD::SETCC) { 1846 // As long as we negate the condition everything is fine 1847 SetCC = Intr; 1848 Intr = SetCC->getOperand(0).getNode(); 1849 1850 } else { 1851 // Get the target from BR if we don't negate the condition 1852 BR = findUser(BRCOND, ISD::BR); 1853 Target = BR->getOperand(1); 1854 } 1855 1856 // FIXME: This changes the types of the intrinsics instead of introducing new 1857 // nodes with the correct types. 1858 // e.g. llvm.amdgcn.loop 1859 1860 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3 1861 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088> 1862 1863 if (!isCFIntrinsic(Intr)) { 1864 // This is a uniform branch so we don't need to legalize. 1865 return BRCOND; 1866 } 1867 1868 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || 1869 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; 1870 1871 assert(!SetCC || 1872 (SetCC->getConstantOperandVal(1) == 1 && 1873 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == 1874 ISD::SETNE)); 1875 1876 // operands of the new intrinsic call 1877 SmallVector<SDValue, 4> Ops; 1878 if (HaveChain) 1879 Ops.push_back(BRCOND.getOperand(0)); 1880 1881 Ops.append(Intr->op_begin() + (HaveChain ? 1 : 0), Intr->op_end()); 1882 Ops.push_back(Target); 1883 1884 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); 1885 1886 // build the new intrinsic call 1887 SDNode *Result = DAG.getNode( 1888 Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL, 1889 DAG.getVTList(Res), Ops).getNode(); 1890 1891 if (!HaveChain) { 1892 SDValue Ops[] = { 1893 SDValue(Result, 0), 1894 BRCOND.getOperand(0) 1895 }; 1896 1897 Result = DAG.getMergeValues(Ops, DL).getNode(); 1898 } 1899 1900 if (BR) { 1901 // Give the branch instruction our target 1902 SDValue Ops[] = { 1903 BR->getOperand(0), 1904 BRCOND.getOperand(2) 1905 }; 1906 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); 1907 DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); 1908 BR = NewBR.getNode(); 1909 } 1910 1911 SDValue Chain = SDValue(Result, Result->getNumValues() - 1); 1912 1913 // Copy the intrinsic results to registers 1914 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { 1915 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); 1916 if (!CopyToReg) 1917 continue; 1918 1919 Chain = DAG.getCopyToReg( 1920 Chain, DL, 1921 CopyToReg->getOperand(1), 1922 SDValue(Result, i - 1), 1923 SDValue()); 1924 1925 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); 1926 } 1927 1928 // Remove the old intrinsic from the chain 1929 DAG.ReplaceAllUsesOfValueWith( 1930 SDValue(Intr, Intr->getNumValues() - 1), 1931 Intr->getOperand(0)); 1932 1933 return Chain; 1934 } 1935 1936 SDValue SITargetLowering::getSegmentAperture(unsigned AS, 1937 SelectionDAG &DAG) const { 1938 SDLoc SL; 1939 MachineFunction &MF = DAG.getMachineFunction(); 1940 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1941 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 1942 assert(UserSGPR != AMDGPU::NoRegister); 1943 1944 SDValue QueuePtr = CreateLiveInRegister( 1945 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 1946 1947 // Offset into amd_queue_t for group_segment_aperture_base_hi / 1948 // private_segment_aperture_base_hi. 1949 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44; 1950 1951 SDValue Ptr = DAG.getNode(ISD::ADD, SL, MVT::i64, QueuePtr, 1952 DAG.getConstant(StructOffset, SL, MVT::i64)); 1953 1954 // TODO: Use custom target PseudoSourceValue. 1955 // TODO: We should use the value from the IR intrinsic call, but it might not 1956 // be available and how do we get it? 1957 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()), 1958 AMDGPUAS::CONSTANT_ADDRESS)); 1959 1960 MachinePointerInfo PtrInfo(V, StructOffset); 1961 return DAG.getLoad(MVT::i32, SL, QueuePtr.getValue(1), Ptr, PtrInfo, 1962 MinAlign(64, StructOffset), 1963 MachineMemOperand::MODereferenceable | 1964 MachineMemOperand::MOInvariant); 1965 } 1966 1967 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, 1968 SelectionDAG &DAG) const { 1969 SDLoc SL(Op); 1970 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); 1971 1972 SDValue Src = ASC->getOperand(0); 1973 1974 // FIXME: Really support non-0 null pointers. 1975 SDValue SegmentNullPtr = DAG.getConstant(-1, SL, MVT::i32); 1976 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); 1977 1978 // flat -> local/private 1979 if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 1980 if (ASC->getDestAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 1981 ASC->getDestAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { 1982 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); 1983 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); 1984 1985 return DAG.getNode(ISD::SELECT, SL, MVT::i32, 1986 NonNull, Ptr, SegmentNullPtr); 1987 } 1988 } 1989 1990 // local/private -> flat 1991 if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 1992 if (ASC->getSrcAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 1993 ASC->getSrcAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { 1994 SDValue NonNull 1995 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); 1996 1997 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), DAG); 1998 SDValue CvtPtr 1999 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); 2000 2001 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, 2002 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), 2003 FlatNullPtr); 2004 } 2005 } 2006 2007 // global <-> flat are no-ops and never emitted. 2008 2009 const MachineFunction &MF = DAG.getMachineFunction(); 2010 DiagnosticInfoUnsupported InvalidAddrSpaceCast( 2011 *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); 2012 DAG.getContext()->diagnose(InvalidAddrSpaceCast); 2013 2014 return DAG.getUNDEF(ASC->getValueType(0)); 2015 } 2016 2017 bool 2018 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 2019 // We can fold offsets for anything that doesn't require a GOT relocation. 2020 return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || 2021 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) && 2022 !shouldEmitGOTReloc(GA->getGlobal()); 2023 } 2024 2025 static SDValue buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, 2026 SDLoc DL, unsigned Offset, EVT PtrVT, 2027 unsigned GAFlags = SIInstrInfo::MO_NONE) { 2028 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is 2029 // lowered to the following code sequence: 2030 // 2031 // For constant address space: 2032 // s_getpc_b64 s[0:1] 2033 // s_add_u32 s0, s0, $symbol 2034 // s_addc_u32 s1, s1, 0 2035 // 2036 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 2037 // a fixup or relocation is emitted to replace $symbol with a literal 2038 // constant, which is a pc-relative offset from the encoding of the $symbol 2039 // operand to the global variable. 2040 // 2041 // For global address space: 2042 // s_getpc_b64 s[0:1] 2043 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo 2044 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi 2045 // 2046 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 2047 // fixups or relocations are emitted to replace $symbol@*@lo and 2048 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, 2049 // which is a 64-bit pc-relative offset from the encoding of the $symbol 2050 // operand to the global variable. 2051 // 2052 // What we want here is an offset from the value returned by s_getpc 2053 // (which is the address of the s_add_u32 instruction) to the global 2054 // variable, but since the encoding of $symbol starts 4 bytes after the start 2055 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too 2056 // small. This requires us to add 4 to the global variable offset in order to 2057 // compute the correct address. 2058 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 2059 GAFlags); 2060 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 2061 GAFlags == SIInstrInfo::MO_NONE ? 2062 GAFlags : GAFlags + 1); 2063 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); 2064 } 2065 2066 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 2067 SDValue Op, 2068 SelectionDAG &DAG) const { 2069 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 2070 2071 if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS && 2072 GSD->getAddressSpace() != AMDGPUAS::GLOBAL_ADDRESS) 2073 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 2074 2075 SDLoc DL(GSD); 2076 const GlobalValue *GV = GSD->getGlobal(); 2077 EVT PtrVT = Op.getValueType(); 2078 2079 if (shouldEmitFixup(GV)) 2080 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); 2081 else if (shouldEmitPCReloc(GV)) 2082 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, 2083 SIInstrInfo::MO_REL32); 2084 2085 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, 2086 SIInstrInfo::MO_GOTPCREL32); 2087 2088 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); 2089 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 2090 const DataLayout &DataLayout = DAG.getDataLayout(); 2091 unsigned Align = DataLayout.getABITypeAlignment(PtrTy); 2092 // FIXME: Use a PseudoSourceValue once those can be assigned an address space. 2093 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 2094 2095 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align, 2096 MachineMemOperand::MODereferenceable | 2097 MachineMemOperand::MOInvariant); 2098 } 2099 2100 SDValue SITargetLowering::lowerTRAP(SDValue Op, 2101 SelectionDAG &DAG) const { 2102 const MachineFunction &MF = DAG.getMachineFunction(); 2103 DiagnosticInfoUnsupported NoTrap(*MF.getFunction(), 2104 "trap handler not supported", 2105 Op.getDebugLoc(), 2106 DS_Warning); 2107 DAG.getContext()->diagnose(NoTrap); 2108 2109 // Emit s_endpgm. 2110 2111 // FIXME: This should really be selected to s_trap, but that requires 2112 // setting up the trap handler for it o do anything. 2113 return DAG.getNode(AMDGPUISD::ENDPGM, SDLoc(Op), MVT::Other, 2114 Op.getOperand(0)); 2115 } 2116 2117 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, 2118 const SDLoc &DL, SDValue V) const { 2119 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as 2120 // the destination register. 2121 // 2122 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, 2123 // so we will end up with redundant moves to m0. 2124 // 2125 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. 2126 2127 // A Null SDValue creates a glue result. 2128 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, 2129 V, Chain); 2130 return SDValue(M0, 0); 2131 } 2132 2133 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, 2134 SDValue Op, 2135 MVT VT, 2136 unsigned Offset) const { 2137 SDLoc SL(Op); 2138 SDValue Param = LowerParameter(DAG, MVT::i32, MVT::i32, SL, 2139 DAG.getEntryNode(), Offset, false); 2140 // The local size values will have the hi 16-bits as zero. 2141 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, 2142 DAG.getValueType(VT)); 2143 } 2144 2145 static SDValue emitNonHSAIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) { 2146 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), 2147 "non-hsa intrinsic with hsa target", 2148 DL.getDebugLoc()); 2149 DAG.getContext()->diagnose(BadIntrin); 2150 return DAG.getUNDEF(VT); 2151 } 2152 2153 static SDValue emitRemovedIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) { 2154 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), 2155 "intrinsic not supported on subtarget", 2156 DL.getDebugLoc()); 2157 DAG.getContext()->diagnose(BadIntrin); 2158 return DAG.getUNDEF(VT); 2159 } 2160 2161 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 2162 SelectionDAG &DAG) const { 2163 MachineFunction &MF = DAG.getMachineFunction(); 2164 auto MFI = MF.getInfo<SIMachineFunctionInfo>(); 2165 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2166 2167 EVT VT = Op.getValueType(); 2168 SDLoc DL(Op); 2169 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2170 2171 // TODO: Should this propagate fast-math-flags? 2172 2173 switch (IntrinsicID) { 2174 case Intrinsic::amdgcn_dispatch_ptr: 2175 case Intrinsic::amdgcn_queue_ptr: { 2176 if (!Subtarget->isAmdCodeObjectV2()) { 2177 DiagnosticInfoUnsupported BadIntrin( 2178 *MF.getFunction(), "unsupported hsa intrinsic without hsa target", 2179 DL.getDebugLoc()); 2180 DAG.getContext()->diagnose(BadIntrin); 2181 return DAG.getUNDEF(VT); 2182 } 2183 2184 auto Reg = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? 2185 SIRegisterInfo::DISPATCH_PTR : SIRegisterInfo::QUEUE_PTR; 2186 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, 2187 TRI->getPreloadedValue(MF, Reg), VT); 2188 } 2189 case Intrinsic::amdgcn_implicitarg_ptr: { 2190 unsigned offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT); 2191 return LowerParameterPtr(DAG, DL, DAG.getEntryNode(), offset); 2192 } 2193 case Intrinsic::amdgcn_kernarg_segment_ptr: { 2194 unsigned Reg 2195 = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 2196 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 2197 } 2198 case Intrinsic::amdgcn_dispatch_id: { 2199 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::DISPATCH_ID); 2200 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 2201 } 2202 case Intrinsic::amdgcn_rcp: 2203 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); 2204 case Intrinsic::amdgcn_rsq: 2205 case AMDGPUIntrinsic::AMDGPU_rsq: // Legacy name 2206 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 2207 case Intrinsic::amdgcn_rsq_legacy: { 2208 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 2209 return emitRemovedIntrinsicError(DAG, DL, VT); 2210 2211 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); 2212 } 2213 case Intrinsic::amdgcn_rcp_legacy: { 2214 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 2215 return emitRemovedIntrinsicError(DAG, DL, VT); 2216 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); 2217 } 2218 case Intrinsic::amdgcn_rsq_clamp: { 2219 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 2220 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 2221 2222 Type *Type = VT.getTypeForEVT(*DAG.getContext()); 2223 APFloat Max = APFloat::getLargest(Type->getFltSemantics()); 2224 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); 2225 2226 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 2227 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, 2228 DAG.getConstantFP(Max, DL, VT)); 2229 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, 2230 DAG.getConstantFP(Min, DL, VT)); 2231 } 2232 case Intrinsic::r600_read_ngroups_x: 2233 if (Subtarget->isAmdHsaOS()) 2234 return emitNonHSAIntrinsicError(DAG, DL, VT); 2235 2236 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2237 SI::KernelInputOffsets::NGROUPS_X, false); 2238 case Intrinsic::r600_read_ngroups_y: 2239 if (Subtarget->isAmdHsaOS()) 2240 return emitNonHSAIntrinsicError(DAG, DL, VT); 2241 2242 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2243 SI::KernelInputOffsets::NGROUPS_Y, false); 2244 case Intrinsic::r600_read_ngroups_z: 2245 if (Subtarget->isAmdHsaOS()) 2246 return emitNonHSAIntrinsicError(DAG, DL, VT); 2247 2248 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2249 SI::KernelInputOffsets::NGROUPS_Z, false); 2250 case Intrinsic::r600_read_global_size_x: 2251 if (Subtarget->isAmdHsaOS()) 2252 return emitNonHSAIntrinsicError(DAG, DL, VT); 2253 2254 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2255 SI::KernelInputOffsets::GLOBAL_SIZE_X, false); 2256 case Intrinsic::r600_read_global_size_y: 2257 if (Subtarget->isAmdHsaOS()) 2258 return emitNonHSAIntrinsicError(DAG, DL, VT); 2259 2260 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2261 SI::KernelInputOffsets::GLOBAL_SIZE_Y, false); 2262 case Intrinsic::r600_read_global_size_z: 2263 if (Subtarget->isAmdHsaOS()) 2264 return emitNonHSAIntrinsicError(DAG, DL, VT); 2265 2266 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2267 SI::KernelInputOffsets::GLOBAL_SIZE_Z, false); 2268 case Intrinsic::r600_read_local_size_x: 2269 if (Subtarget->isAmdHsaOS()) 2270 return emitNonHSAIntrinsicError(DAG, DL, VT); 2271 2272 return lowerImplicitZextParam(DAG, Op, MVT::i16, 2273 SI::KernelInputOffsets::LOCAL_SIZE_X); 2274 case Intrinsic::r600_read_local_size_y: 2275 if (Subtarget->isAmdHsaOS()) 2276 return emitNonHSAIntrinsicError(DAG, DL, VT); 2277 2278 return lowerImplicitZextParam(DAG, Op, MVT::i16, 2279 SI::KernelInputOffsets::LOCAL_SIZE_Y); 2280 case Intrinsic::r600_read_local_size_z: 2281 if (Subtarget->isAmdHsaOS()) 2282 return emitNonHSAIntrinsicError(DAG, DL, VT); 2283 2284 return lowerImplicitZextParam(DAG, Op, MVT::i16, 2285 SI::KernelInputOffsets::LOCAL_SIZE_Z); 2286 case Intrinsic::amdgcn_workgroup_id_x: 2287 case Intrinsic::r600_read_tgid_x: 2288 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, 2289 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_X), VT); 2290 case Intrinsic::amdgcn_workgroup_id_y: 2291 case Intrinsic::r600_read_tgid_y: 2292 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, 2293 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Y), VT); 2294 case Intrinsic::amdgcn_workgroup_id_z: 2295 case Intrinsic::r600_read_tgid_z: 2296 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, 2297 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Z), VT); 2298 case Intrinsic::amdgcn_workitem_id_x: 2299 case Intrinsic::r600_read_tidig_x: 2300 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 2301 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X), VT); 2302 case Intrinsic::amdgcn_workitem_id_y: 2303 case Intrinsic::r600_read_tidig_y: 2304 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 2305 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y), VT); 2306 case Intrinsic::amdgcn_workitem_id_z: 2307 case Intrinsic::r600_read_tidig_z: 2308 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 2309 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z), VT); 2310 case AMDGPUIntrinsic::SI_load_const: { 2311 SDValue Ops[] = { 2312 Op.getOperand(1), 2313 Op.getOperand(2) 2314 }; 2315 2316 MachineMemOperand *MMO = MF.getMachineMemOperand( 2317 MachinePointerInfo(), 2318 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 2319 MachineMemOperand::MOInvariant, 2320 VT.getStoreSize(), 4); 2321 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL, 2322 Op->getVTList(), Ops, VT, MMO); 2323 } 2324 case AMDGPUIntrinsic::amdgcn_fdiv_fast: { 2325 return lowerFDIV_FAST(Op, DAG); 2326 } 2327 case AMDGPUIntrinsic::SI_vs_load_input: 2328 return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT, 2329 Op.getOperand(1), 2330 Op.getOperand(2), 2331 Op.getOperand(3)); 2332 2333 case AMDGPUIntrinsic::SI_fs_constant: { 2334 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); 2335 SDValue Glue = M0.getValue(1); 2336 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, 2337 DAG.getConstant(2, DL, MVT::i32), // P0 2338 Op.getOperand(1), Op.getOperand(2), Glue); 2339 } 2340 case AMDGPUIntrinsic::SI_packf16: 2341 if (Op.getOperand(1).isUndef() && Op.getOperand(2).isUndef()) 2342 return DAG.getUNDEF(MVT::i32); 2343 return Op; 2344 case AMDGPUIntrinsic::SI_fs_interp: { 2345 SDValue IJ = Op.getOperand(4); 2346 SDValue I = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, 2347 DAG.getConstant(0, DL, MVT::i32)); 2348 SDValue J = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, 2349 DAG.getConstant(1, DL, MVT::i32)); 2350 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); 2351 SDValue Glue = M0.getValue(1); 2352 SDValue P1 = DAG.getNode(AMDGPUISD::INTERP_P1, DL, 2353 DAG.getVTList(MVT::f32, MVT::Glue), 2354 I, Op.getOperand(1), Op.getOperand(2), Glue); 2355 Glue = SDValue(P1.getNode(), 1); 2356 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, P1, J, 2357 Op.getOperand(1), Op.getOperand(2), Glue); 2358 } 2359 case Intrinsic::amdgcn_interp_p1: { 2360 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 2361 SDValue Glue = M0.getValue(1); 2362 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), 2363 Op.getOperand(2), Op.getOperand(3), Glue); 2364 } 2365 case Intrinsic::amdgcn_interp_p2: { 2366 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 2367 SDValue Glue = SDValue(M0.getNode(), 1); 2368 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), 2369 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), 2370 Glue); 2371 } 2372 case Intrinsic::amdgcn_sin: 2373 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); 2374 2375 case Intrinsic::amdgcn_cos: 2376 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); 2377 2378 case Intrinsic::amdgcn_log_clamp: { 2379 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 2380 return SDValue(); 2381 2382 DiagnosticInfoUnsupported BadIntrin( 2383 *MF.getFunction(), "intrinsic not supported on subtarget", 2384 DL.getDebugLoc()); 2385 DAG.getContext()->diagnose(BadIntrin); 2386 return DAG.getUNDEF(VT); 2387 } 2388 case Intrinsic::amdgcn_ldexp: 2389 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, 2390 Op.getOperand(1), Op.getOperand(2)); 2391 2392 case Intrinsic::amdgcn_fract: 2393 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); 2394 2395 case Intrinsic::amdgcn_class: 2396 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, 2397 Op.getOperand(1), Op.getOperand(2)); 2398 case Intrinsic::amdgcn_div_fmas: 2399 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, 2400 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 2401 Op.getOperand(4)); 2402 2403 case Intrinsic::amdgcn_div_fixup: 2404 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, 2405 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 2406 2407 case Intrinsic::amdgcn_trig_preop: 2408 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, 2409 Op.getOperand(1), Op.getOperand(2)); 2410 case Intrinsic::amdgcn_div_scale: { 2411 // 3rd parameter required to be a constant. 2412 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 2413 if (!Param) 2414 return DAG.getUNDEF(VT); 2415 2416 // Translate to the operands expected by the machine instruction. The 2417 // first parameter must be the same as the first instruction. 2418 SDValue Numerator = Op.getOperand(1); 2419 SDValue Denominator = Op.getOperand(2); 2420 2421 // Note this order is opposite of the machine instruction's operations, 2422 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The 2423 // intrinsic has the numerator as the first operand to match a normal 2424 // division operation. 2425 2426 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; 2427 2428 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, 2429 Denominator, Numerator); 2430 } 2431 case Intrinsic::amdgcn_icmp: { 2432 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 2433 int CondCode = CD->getSExtValue(); 2434 2435 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || 2436 CondCode >= ICmpInst::Predicate::BAD_ICMP_PREDICATE) 2437 return DAG.getUNDEF(VT); 2438 2439 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); 2440 ISD::CondCode CCOpcode = getICmpCondCode(IcInput); 2441 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 2442 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 2443 } 2444 case Intrinsic::amdgcn_fcmp: { 2445 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 2446 int CondCode = CD->getSExtValue(); 2447 2448 if (CondCode <= FCmpInst::Predicate::FCMP_FALSE || 2449 CondCode >= FCmpInst::Predicate::FCMP_TRUE) 2450 return DAG.getUNDEF(VT); 2451 2452 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); 2453 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); 2454 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 2455 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 2456 } 2457 case Intrinsic::amdgcn_fmul_legacy: 2458 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, 2459 Op.getOperand(1), Op.getOperand(2)); 2460 case Intrinsic::amdgcn_sffbh: 2461 case AMDGPUIntrinsic::AMDGPU_flbit_i32: // Legacy name. 2462 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); 2463 default: 2464 return AMDGPUTargetLowering::LowerOperation(Op, DAG); 2465 } 2466 } 2467 2468 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 2469 SelectionDAG &DAG) const { 2470 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2471 switch (IntrID) { 2472 case Intrinsic::amdgcn_atomic_inc: 2473 case Intrinsic::amdgcn_atomic_dec: { 2474 MemSDNode *M = cast<MemSDNode>(Op); 2475 unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ? 2476 AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC; 2477 SDValue Ops[] = { 2478 M->getOperand(0), // Chain 2479 M->getOperand(2), // Ptr 2480 M->getOperand(3) // Value 2481 }; 2482 2483 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, 2484 M->getMemoryVT(), M->getMemOperand()); 2485 } 2486 default: 2487 return SDValue(); 2488 } 2489 } 2490 2491 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, 2492 SelectionDAG &DAG) const { 2493 MachineFunction &MF = DAG.getMachineFunction(); 2494 SDLoc DL(Op); 2495 SDValue Chain = Op.getOperand(0); 2496 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2497 2498 switch (IntrinsicID) { 2499 case AMDGPUIntrinsic::SI_sendmsg: { 2500 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 2501 SDValue Glue = Chain.getValue(1); 2502 return DAG.getNode(AMDGPUISD::SENDMSG, DL, MVT::Other, Chain, 2503 Op.getOperand(2), Glue); 2504 } 2505 case AMDGPUIntrinsic::SI_tbuffer_store: { 2506 SDValue Ops[] = { 2507 Chain, 2508 Op.getOperand(2), 2509 Op.getOperand(3), 2510 Op.getOperand(4), 2511 Op.getOperand(5), 2512 Op.getOperand(6), 2513 Op.getOperand(7), 2514 Op.getOperand(8), 2515 Op.getOperand(9), 2516 Op.getOperand(10), 2517 Op.getOperand(11), 2518 Op.getOperand(12), 2519 Op.getOperand(13), 2520 Op.getOperand(14) 2521 }; 2522 2523 EVT VT = Op.getOperand(3).getValueType(); 2524 2525 MachineMemOperand *MMO = MF.getMachineMemOperand( 2526 MachinePointerInfo(), 2527 MachineMemOperand::MOStore, 2528 VT.getStoreSize(), 4); 2529 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL, 2530 Op->getVTList(), Ops, VT, MMO); 2531 } 2532 case AMDGPUIntrinsic::AMDGPU_kill: { 2533 SDValue Src = Op.getOperand(2); 2534 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) { 2535 if (!K->isNegative()) 2536 return Chain; 2537 2538 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32); 2539 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne); 2540 } 2541 2542 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src); 2543 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast); 2544 } 2545 default: 2546 return SDValue(); 2547 } 2548 } 2549 2550 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 2551 SDLoc DL(Op); 2552 LoadSDNode *Load = cast<LoadSDNode>(Op); 2553 ISD::LoadExtType ExtType = Load->getExtensionType(); 2554 EVT MemVT = Load->getMemoryVT(); 2555 2556 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { 2557 assert(MemVT == MVT::i1 && "Only i1 non-extloads expected"); 2558 // FIXME: Copied from PPC 2559 // First, load into 32 bits, then truncate to 1 bit. 2560 2561 SDValue Chain = Load->getChain(); 2562 SDValue BasePtr = Load->getBasePtr(); 2563 MachineMemOperand *MMO = Load->getMemOperand(); 2564 2565 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 2566 BasePtr, MVT::i8, MMO); 2567 2568 SDValue Ops[] = { 2569 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), 2570 NewLD.getValue(1) 2571 }; 2572 2573 return DAG.getMergeValues(Ops, DL); 2574 } 2575 2576 if (!MemVT.isVector()) 2577 return SDValue(); 2578 2579 assert(Op.getValueType().getVectorElementType() == MVT::i32 && 2580 "Custom lowering for non-i32 vectors hasn't been implemented."); 2581 2582 unsigned AS = Load->getAddressSpace(); 2583 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, 2584 AS, Load->getAlignment())) { 2585 SDValue Ops[2]; 2586 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); 2587 return DAG.getMergeValues(Ops, DL); 2588 } 2589 2590 unsigned NumElements = MemVT.getVectorNumElements(); 2591 switch (AS) { 2592 case AMDGPUAS::CONSTANT_ADDRESS: 2593 if (isMemOpUniform(Load)) 2594 return SDValue(); 2595 // Non-uniform loads will be selected to MUBUF instructions, so they 2596 // have the same legalization requires ments as global and private 2597 // loads. 2598 // 2599 LLVM_FALLTHROUGH; 2600 case AMDGPUAS::GLOBAL_ADDRESS: 2601 case AMDGPUAS::FLAT_ADDRESS: 2602 if (NumElements > 4) 2603 return SplitVectorLoad(Op, DAG); 2604 // v4 loads are supported for private and global memory. 2605 return SDValue(); 2606 case AMDGPUAS::PRIVATE_ADDRESS: { 2607 // Depending on the setting of the private_element_size field in the 2608 // resource descriptor, we can only make private accesses up to a certain 2609 // size. 2610 switch (Subtarget->getMaxPrivateElementSize()) { 2611 case 4: 2612 return scalarizeVectorLoad(Load, DAG); 2613 case 8: 2614 if (NumElements > 2) 2615 return SplitVectorLoad(Op, DAG); 2616 return SDValue(); 2617 case 16: 2618 // Same as global/flat 2619 if (NumElements > 4) 2620 return SplitVectorLoad(Op, DAG); 2621 return SDValue(); 2622 default: 2623 llvm_unreachable("unsupported private_element_size"); 2624 } 2625 } 2626 case AMDGPUAS::LOCAL_ADDRESS: { 2627 if (NumElements > 2) 2628 return SplitVectorLoad(Op, DAG); 2629 2630 if (NumElements == 2) 2631 return SDValue(); 2632 2633 // If properly aligned, if we split we might be able to use ds_read_b64. 2634 return SplitVectorLoad(Op, DAG); 2635 } 2636 default: 2637 return SDValue(); 2638 } 2639 } 2640 2641 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2642 if (Op.getValueType() != MVT::i64) 2643 return SDValue(); 2644 2645 SDLoc DL(Op); 2646 SDValue Cond = Op.getOperand(0); 2647 2648 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 2649 SDValue One = DAG.getConstant(1, DL, MVT::i32); 2650 2651 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); 2652 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); 2653 2654 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); 2655 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); 2656 2657 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); 2658 2659 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); 2660 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); 2661 2662 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); 2663 2664 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); 2665 return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res); 2666 } 2667 2668 // Catch division cases where we can use shortcuts with rcp and rsq 2669 // instructions. 2670 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, 2671 SelectionDAG &DAG) const { 2672 SDLoc SL(Op); 2673 SDValue LHS = Op.getOperand(0); 2674 SDValue RHS = Op.getOperand(1); 2675 EVT VT = Op.getValueType(); 2676 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath; 2677 2678 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { 2679 if ((Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals()))) { 2680 2681 if (CLHS->isExactlyValue(1.0)) { 2682 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 2683 // the CI documentation has a worst case error of 1 ulp. 2684 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 2685 // use it as long as we aren't trying to use denormals. 2686 2687 // 1.0 / sqrt(x) -> rsq(x) 2688 // 2689 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP 2690 // error seems really high at 2^29 ULP. 2691 if (RHS.getOpcode() == ISD::FSQRT) 2692 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); 2693 2694 // 1.0 / x -> rcp(x) 2695 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 2696 } 2697 2698 // Same as for 1.0, but expand the sign out of the constant. 2699 if (CLHS->isExactlyValue(-1.0)) { 2700 // -1.0 / x -> rcp (fneg x) 2701 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 2702 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); 2703 } 2704 } 2705 } 2706 2707 const SDNodeFlags *Flags = Op->getFlags(); 2708 2709 if (Unsafe || Flags->hasAllowReciprocal()) { 2710 // Turn into multiply by the reciprocal. 2711 // x / y -> x * (1.0 / y) 2712 SDNodeFlags Flags; 2713 Flags.setUnsafeAlgebra(true); 2714 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 2715 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, &Flags); 2716 } 2717 2718 return SDValue(); 2719 } 2720 2721 // Faster 2.5 ULP division that does not support denormals. 2722 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { 2723 SDLoc SL(Op); 2724 SDValue LHS = Op.getOperand(1); 2725 SDValue RHS = Op.getOperand(2); 2726 2727 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); 2728 2729 const APFloat K0Val(BitsToFloat(0x6f800000)); 2730 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); 2731 2732 const APFloat K1Val(BitsToFloat(0x2f800000)); 2733 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); 2734 2735 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 2736 2737 EVT SetCCVT = 2738 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 2739 2740 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); 2741 2742 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); 2743 2744 // TODO: Should this propagate fast-math-flags? 2745 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); 2746 2747 // rcp does not support denormals. 2748 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); 2749 2750 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); 2751 2752 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); 2753 } 2754 2755 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { 2756 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 2757 return FastLowered; 2758 2759 SDLoc SL(Op); 2760 SDValue LHS = Op.getOperand(0); 2761 SDValue RHS = Op.getOperand(1); 2762 2763 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 2764 2765 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); 2766 2767 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, RHS, RHS, LHS); 2768 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, LHS, RHS, LHS); 2769 2770 // Denominator is scaled to not be denormal, so using rcp is ok. 2771 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, DenominatorScaled); 2772 2773 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, DenominatorScaled); 2774 2775 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f32, NegDivScale0, ApproxRcp, One); 2776 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, ApproxRcp); 2777 2778 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, NumeratorScaled, Fma1); 2779 2780 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, NumeratorScaled); 2781 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f32, Fma2, Fma1, Mul); 2782 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, NumeratorScaled); 2783 2784 SDValue Scale = NumeratorScaled.getValue(1); 2785 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, Fma4, Fma1, Fma3, Scale); 2786 2787 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS); 2788 } 2789 2790 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { 2791 if (DAG.getTarget().Options.UnsafeFPMath) 2792 return lowerFastUnsafeFDIV(Op, DAG); 2793 2794 SDLoc SL(Op); 2795 SDValue X = Op.getOperand(0); 2796 SDValue Y = Op.getOperand(1); 2797 2798 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 2799 2800 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); 2801 2802 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); 2803 2804 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); 2805 2806 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); 2807 2808 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); 2809 2810 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); 2811 2812 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); 2813 2814 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); 2815 2816 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); 2817 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); 2818 2819 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, 2820 NegDivScale0, Mul, DivScale1); 2821 2822 SDValue Scale; 2823 2824 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 2825 // Workaround a hardware bug on SI where the condition output from div_scale 2826 // is not usable. 2827 2828 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); 2829 2830 // Figure out if the scale to use for div_fmas. 2831 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 2832 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); 2833 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); 2834 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); 2835 2836 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); 2837 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); 2838 2839 SDValue Scale0Hi 2840 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); 2841 SDValue Scale1Hi 2842 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); 2843 2844 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); 2845 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); 2846 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); 2847 } else { 2848 Scale = DivScale1.getValue(1); 2849 } 2850 2851 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, 2852 Fma4, Fma3, Mul, Scale); 2853 2854 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); 2855 } 2856 2857 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { 2858 EVT VT = Op.getValueType(); 2859 2860 if (VT == MVT::f32) 2861 return LowerFDIV32(Op, DAG); 2862 2863 if (VT == MVT::f64) 2864 return LowerFDIV64(Op, DAG); 2865 2866 llvm_unreachable("Unexpected type for fdiv"); 2867 } 2868 2869 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 2870 SDLoc DL(Op); 2871 StoreSDNode *Store = cast<StoreSDNode>(Op); 2872 EVT VT = Store->getMemoryVT(); 2873 2874 if (VT == MVT::i1) { 2875 return DAG.getTruncStore(Store->getChain(), DL, 2876 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), 2877 Store->getBasePtr(), MVT::i1, Store->getMemOperand()); 2878 } 2879 2880 assert(VT.isVector() && 2881 Store->getValue().getValueType().getScalarType() == MVT::i32); 2882 2883 unsigned AS = Store->getAddressSpace(); 2884 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 2885 AS, Store->getAlignment())) { 2886 return expandUnalignedStore(Store, DAG); 2887 } 2888 2889 unsigned NumElements = VT.getVectorNumElements(); 2890 switch (AS) { 2891 case AMDGPUAS::GLOBAL_ADDRESS: 2892 case AMDGPUAS::FLAT_ADDRESS: 2893 if (NumElements > 4) 2894 return SplitVectorStore(Op, DAG); 2895 return SDValue(); 2896 case AMDGPUAS::PRIVATE_ADDRESS: { 2897 switch (Subtarget->getMaxPrivateElementSize()) { 2898 case 4: 2899 return scalarizeVectorStore(Store, DAG); 2900 case 8: 2901 if (NumElements > 2) 2902 return SplitVectorStore(Op, DAG); 2903 return SDValue(); 2904 case 16: 2905 if (NumElements > 4) 2906 return SplitVectorStore(Op, DAG); 2907 return SDValue(); 2908 default: 2909 llvm_unreachable("unsupported private_element_size"); 2910 } 2911 } 2912 case AMDGPUAS::LOCAL_ADDRESS: { 2913 if (NumElements > 2) 2914 return SplitVectorStore(Op, DAG); 2915 2916 if (NumElements == 2) 2917 return Op; 2918 2919 // If properly aligned, if we split we might be able to use ds_write_b64. 2920 return SplitVectorStore(Op, DAG); 2921 } 2922 default: 2923 llvm_unreachable("unhandled address space"); 2924 } 2925 } 2926 2927 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 2928 SDLoc DL(Op); 2929 EVT VT = Op.getValueType(); 2930 SDValue Arg = Op.getOperand(0); 2931 // TODO: Should this propagate fast-math-flags? 2932 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, 2933 DAG.getNode(ISD::FMUL, DL, VT, Arg, 2934 DAG.getConstantFP(0.5/M_PI, DL, 2935 VT))); 2936 2937 switch (Op.getOpcode()) { 2938 case ISD::FCOS: 2939 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart); 2940 case ISD::FSIN: 2941 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart); 2942 default: 2943 llvm_unreachable("Wrong trig opcode"); 2944 } 2945 } 2946 2947 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 2948 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); 2949 assert(AtomicNode->isCompareAndSwap()); 2950 unsigned AS = AtomicNode->getAddressSpace(); 2951 2952 // No custom lowering required for local address space 2953 if (!isFlatGlobalAddrSpace(AS)) 2954 return Op; 2955 2956 // Non-local address space requires custom lowering for atomic compare 2957 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 2958 SDLoc DL(Op); 2959 SDValue ChainIn = Op.getOperand(0); 2960 SDValue Addr = Op.getOperand(1); 2961 SDValue Old = Op.getOperand(2); 2962 SDValue New = Op.getOperand(3); 2963 EVT VT = Op.getValueType(); 2964 MVT SimpleVT = VT.getSimpleVT(); 2965 MVT VecType = MVT::getVectorVT(SimpleVT, 2); 2966 2967 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); 2968 SDValue Ops[] = { ChainIn, Addr, NewOld }; 2969 2970 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), 2971 Ops, VT, AtomicNode->getMemOperand()); 2972 } 2973 2974 //===----------------------------------------------------------------------===// 2975 // Custom DAG optimizations 2976 //===----------------------------------------------------------------------===// 2977 2978 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, 2979 DAGCombinerInfo &DCI) const { 2980 EVT VT = N->getValueType(0); 2981 EVT ScalarVT = VT.getScalarType(); 2982 if (ScalarVT != MVT::f32) 2983 return SDValue(); 2984 2985 SelectionDAG &DAG = DCI.DAG; 2986 SDLoc DL(N); 2987 2988 SDValue Src = N->getOperand(0); 2989 EVT SrcVT = Src.getValueType(); 2990 2991 // TODO: We could try to match extracting the higher bytes, which would be 2992 // easier if i8 vectors weren't promoted to i32 vectors, particularly after 2993 // types are legalized. v4i8 -> v4f32 is probably the only case to worry 2994 // about in practice. 2995 if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) { 2996 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { 2997 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); 2998 DCI.AddToWorklist(Cvt.getNode()); 2999 return Cvt; 3000 } 3001 } 3002 3003 return SDValue(); 3004 } 3005 3006 /// \brief Return true if the given offset Size in bytes can be folded into 3007 /// the immediate offsets of a memory instruction for the given address space. 3008 static bool canFoldOffset(unsigned OffsetSize, unsigned AS, 3009 const SISubtarget &STI) { 3010 switch (AS) { 3011 case AMDGPUAS::GLOBAL_ADDRESS: { 3012 // MUBUF instructions a 12-bit offset in bytes. 3013 return isUInt<12>(OffsetSize); 3014 } 3015 case AMDGPUAS::CONSTANT_ADDRESS: { 3016 // SMRD instructions have an 8-bit offset in dwords on SI and 3017 // a 20-bit offset in bytes on VI. 3018 if (STI.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 3019 return isUInt<20>(OffsetSize); 3020 else 3021 return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4); 3022 } 3023 case AMDGPUAS::LOCAL_ADDRESS: 3024 case AMDGPUAS::REGION_ADDRESS: { 3025 // The single offset versions have a 16-bit offset in bytes. 3026 return isUInt<16>(OffsetSize); 3027 } 3028 case AMDGPUAS::PRIVATE_ADDRESS: 3029 // Indirect register addressing does not use any offsets. 3030 default: 3031 return 0; 3032 } 3033 } 3034 3035 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) 3036 3037 // This is a variant of 3038 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), 3039 // 3040 // The normal DAG combiner will do this, but only if the add has one use since 3041 // that would increase the number of instructions. 3042 // 3043 // This prevents us from seeing a constant offset that can be folded into a 3044 // memory instruction's addressing mode. If we know the resulting add offset of 3045 // a pointer can be folded into an addressing offset, we can replace the pointer 3046 // operand with the add of new constant offset. This eliminates one of the uses, 3047 // and may allow the remaining use to also be simplified. 3048 // 3049 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, 3050 unsigned AddrSpace, 3051 DAGCombinerInfo &DCI) const { 3052 SDValue N0 = N->getOperand(0); 3053 SDValue N1 = N->getOperand(1); 3054 3055 if (N0.getOpcode() != ISD::ADD) 3056 return SDValue(); 3057 3058 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); 3059 if (!CN1) 3060 return SDValue(); 3061 3062 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3063 if (!CAdd) 3064 return SDValue(); 3065 3066 // If the resulting offset is too large, we can't fold it into the addressing 3067 // mode offset. 3068 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); 3069 if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *getSubtarget())) 3070 return SDValue(); 3071 3072 SelectionDAG &DAG = DCI.DAG; 3073 SDLoc SL(N); 3074 EVT VT = N->getValueType(0); 3075 3076 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); 3077 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); 3078 3079 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset); 3080 } 3081 3082 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { 3083 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || 3084 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || 3085 (Opc == ISD::XOR && Val == 0); 3086 } 3087 3088 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This 3089 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit 3090 // integer combine opportunities since most 64-bit operations are decomposed 3091 // this way. TODO: We won't want this for SALU especially if it is an inline 3092 // immediate. 3093 SDValue SITargetLowering::splitBinaryBitConstantOp( 3094 DAGCombinerInfo &DCI, 3095 const SDLoc &SL, 3096 unsigned Opc, SDValue LHS, 3097 const ConstantSDNode *CRHS) const { 3098 uint64_t Val = CRHS->getZExtValue(); 3099 uint32_t ValLo = Lo_32(Val); 3100 uint32_t ValHi = Hi_32(Val); 3101 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3102 3103 if ((bitOpWithConstantIsReducible(Opc, ValLo) || 3104 bitOpWithConstantIsReducible(Opc, ValHi)) || 3105 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { 3106 // If we need to materialize a 64-bit immediate, it will be split up later 3107 // anyway. Avoid creating the harder to understand 64-bit immediate 3108 // materialization. 3109 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); 3110 } 3111 3112 return SDValue(); 3113 } 3114 3115 SDValue SITargetLowering::performAndCombine(SDNode *N, 3116 DAGCombinerInfo &DCI) const { 3117 if (DCI.isBeforeLegalize()) 3118 return SDValue(); 3119 3120 SelectionDAG &DAG = DCI.DAG; 3121 EVT VT = N->getValueType(0); 3122 SDValue LHS = N->getOperand(0); 3123 SDValue RHS = N->getOperand(1); 3124 3125 3126 if (VT == MVT::i64) { 3127 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 3128 if (CRHS) { 3129 if (SDValue Split 3130 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) 3131 return Split; 3132 } 3133 } 3134 3135 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> 3136 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) 3137 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { 3138 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 3139 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); 3140 3141 SDValue X = LHS.getOperand(0); 3142 SDValue Y = RHS.getOperand(0); 3143 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) 3144 return SDValue(); 3145 3146 if (LCC == ISD::SETO) { 3147 if (X != LHS.getOperand(1)) 3148 return SDValue(); 3149 3150 if (RCC == ISD::SETUNE) { 3151 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); 3152 if (!C1 || !C1->isInfinity() || C1->isNegative()) 3153 return SDValue(); 3154 3155 const uint32_t Mask = SIInstrFlags::N_NORMAL | 3156 SIInstrFlags::N_SUBNORMAL | 3157 SIInstrFlags::N_ZERO | 3158 SIInstrFlags::P_ZERO | 3159 SIInstrFlags::P_SUBNORMAL | 3160 SIInstrFlags::P_NORMAL; 3161 3162 static_assert(((~(SIInstrFlags::S_NAN | 3163 SIInstrFlags::Q_NAN | 3164 SIInstrFlags::N_INFINITY | 3165 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, 3166 "mask not equal"); 3167 3168 SDLoc DL(N); 3169 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 3170 X, DAG.getConstant(Mask, DL, MVT::i32)); 3171 } 3172 } 3173 } 3174 3175 return SDValue(); 3176 } 3177 3178 SDValue SITargetLowering::performOrCombine(SDNode *N, 3179 DAGCombinerInfo &DCI) const { 3180 SelectionDAG &DAG = DCI.DAG; 3181 SDValue LHS = N->getOperand(0); 3182 SDValue RHS = N->getOperand(1); 3183 3184 EVT VT = N->getValueType(0); 3185 if (VT == MVT::i1) { 3186 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) 3187 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && 3188 RHS.getOpcode() == AMDGPUISD::FP_CLASS) { 3189 SDValue Src = LHS.getOperand(0); 3190 if (Src != RHS.getOperand(0)) 3191 return SDValue(); 3192 3193 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 3194 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 3195 if (!CLHS || !CRHS) 3196 return SDValue(); 3197 3198 // Only 10 bits are used. 3199 static const uint32_t MaxMask = 0x3ff; 3200 3201 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; 3202 SDLoc DL(N); 3203 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 3204 Src, DAG.getConstant(NewMask, DL, MVT::i32)); 3205 } 3206 3207 return SDValue(); 3208 } 3209 3210 if (VT != MVT::i64) 3211 return SDValue(); 3212 3213 // TODO: This could be a generic combine with a predicate for extracting the 3214 // high half of an integer being free. 3215 3216 // (or i64:x, (zero_extend i32:y)) -> 3217 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) 3218 if (LHS.getOpcode() == ISD::ZERO_EXTEND && 3219 RHS.getOpcode() != ISD::ZERO_EXTEND) 3220 std::swap(LHS, RHS); 3221 3222 if (RHS.getOpcode() == ISD::ZERO_EXTEND) { 3223 SDValue ExtSrc = RHS.getOperand(0); 3224 EVT SrcVT = ExtSrc.getValueType(); 3225 if (SrcVT == MVT::i32) { 3226 SDLoc SL(N); 3227 SDValue LowLHS, HiBits; 3228 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); 3229 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); 3230 3231 DCI.AddToWorklist(LowOr.getNode()); 3232 DCI.AddToWorklist(HiBits.getNode()); 3233 3234 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 3235 LowOr, HiBits); 3236 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 3237 } 3238 } 3239 3240 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3241 if (CRHS) { 3242 if (SDValue Split 3243 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) 3244 return Split; 3245 } 3246 3247 return SDValue(); 3248 } 3249 3250 SDValue SITargetLowering::performXorCombine(SDNode *N, 3251 DAGCombinerInfo &DCI) const { 3252 EVT VT = N->getValueType(0); 3253 if (VT != MVT::i64) 3254 return SDValue(); 3255 3256 SDValue LHS = N->getOperand(0); 3257 SDValue RHS = N->getOperand(1); 3258 3259 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 3260 if (CRHS) { 3261 if (SDValue Split 3262 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) 3263 return Split; 3264 } 3265 3266 return SDValue(); 3267 } 3268 3269 SDValue SITargetLowering::performClassCombine(SDNode *N, 3270 DAGCombinerInfo &DCI) const { 3271 SelectionDAG &DAG = DCI.DAG; 3272 SDValue Mask = N->getOperand(1); 3273 3274 // fp_class x, 0 -> false 3275 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { 3276 if (CMask->isNullValue()) 3277 return DAG.getConstant(0, SDLoc(N), MVT::i1); 3278 } 3279 3280 if (N->getOperand(0).isUndef()) 3281 return DAG.getUNDEF(MVT::i1); 3282 3283 return SDValue(); 3284 } 3285 3286 // Constant fold canonicalize. 3287 SDValue SITargetLowering::performFCanonicalizeCombine( 3288 SDNode *N, 3289 DAGCombinerInfo &DCI) const { 3290 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); 3291 if (!CFP) 3292 return SDValue(); 3293 3294 SelectionDAG &DAG = DCI.DAG; 3295 const APFloat &C = CFP->getValueAPF(); 3296 3297 // Flush denormals to 0 if not enabled. 3298 if (C.isDenormal()) { 3299 EVT VT = N->getValueType(0); 3300 if (VT == MVT::f32 && !Subtarget->hasFP32Denormals()) 3301 return DAG.getConstantFP(0.0, SDLoc(N), VT); 3302 3303 if (VT == MVT::f64 && !Subtarget->hasFP64Denormals()) 3304 return DAG.getConstantFP(0.0, SDLoc(N), VT); 3305 } 3306 3307 if (C.isNaN()) { 3308 EVT VT = N->getValueType(0); 3309 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); 3310 if (C.isSignaling()) { 3311 // Quiet a signaling NaN. 3312 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 3313 } 3314 3315 // Make sure it is the canonical NaN bitpattern. 3316 // 3317 // TODO: Can we use -1 as the canonical NaN value since it's an inline 3318 // immediate? 3319 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) 3320 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 3321 } 3322 3323 return SDValue(CFP, 0); 3324 } 3325 3326 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { 3327 switch (Opc) { 3328 case ISD::FMAXNUM: 3329 return AMDGPUISD::FMAX3; 3330 case ISD::SMAX: 3331 return AMDGPUISD::SMAX3; 3332 case ISD::UMAX: 3333 return AMDGPUISD::UMAX3; 3334 case ISD::FMINNUM: 3335 return AMDGPUISD::FMIN3; 3336 case ISD::SMIN: 3337 return AMDGPUISD::SMIN3; 3338 case ISD::UMIN: 3339 return AMDGPUISD::UMIN3; 3340 default: 3341 llvm_unreachable("Not a min/max opcode"); 3342 } 3343 } 3344 3345 static SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL, 3346 SDValue Op0, SDValue Op1, bool Signed) { 3347 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); 3348 if (!K1) 3349 return SDValue(); 3350 3351 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 3352 if (!K0) 3353 return SDValue(); 3354 3355 if (Signed) { 3356 if (K0->getAPIntValue().sge(K1->getAPIntValue())) 3357 return SDValue(); 3358 } else { 3359 if (K0->getAPIntValue().uge(K1->getAPIntValue())) 3360 return SDValue(); 3361 } 3362 3363 EVT VT = K0->getValueType(0); 3364 return DAG.getNode(Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3, SL, VT, 3365 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); 3366 } 3367 3368 static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) { 3369 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions()) 3370 return true; 3371 3372 return DAG.isKnownNeverNaN(Op); 3373 } 3374 3375 static SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL, 3376 SDValue Op0, SDValue Op1) { 3377 ConstantFPSDNode *K1 = dyn_cast<ConstantFPSDNode>(Op1); 3378 if (!K1) 3379 return SDValue(); 3380 3381 ConstantFPSDNode *K0 = dyn_cast<ConstantFPSDNode>(Op0.getOperand(1)); 3382 if (!K0) 3383 return SDValue(); 3384 3385 // Ordered >= (although NaN inputs should have folded away by now). 3386 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF()); 3387 if (Cmp == APFloat::cmpGreaterThan) 3388 return SDValue(); 3389 3390 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a 3391 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would then 3392 // give the other result, which is different from med3 with a NaN input. 3393 SDValue Var = Op0.getOperand(0); 3394 if (!isKnownNeverSNan(DAG, Var)) 3395 return SDValue(); 3396 3397 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), 3398 Var, SDValue(K0, 0), SDValue(K1, 0)); 3399 } 3400 3401 SDValue SITargetLowering::performMinMaxCombine(SDNode *N, 3402 DAGCombinerInfo &DCI) const { 3403 SelectionDAG &DAG = DCI.DAG; 3404 3405 unsigned Opc = N->getOpcode(); 3406 SDValue Op0 = N->getOperand(0); 3407 SDValue Op1 = N->getOperand(1); 3408 3409 // Only do this if the inner op has one use since this will just increases 3410 // register pressure for no benefit. 3411 3412 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY) { 3413 // max(max(a, b), c) -> max3(a, b, c) 3414 // min(min(a, b), c) -> min3(a, b, c) 3415 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { 3416 SDLoc DL(N); 3417 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 3418 DL, 3419 N->getValueType(0), 3420 Op0.getOperand(0), 3421 Op0.getOperand(1), 3422 Op1); 3423 } 3424 3425 // Try commuted. 3426 // max(a, max(b, c)) -> max3(a, b, c) 3427 // min(a, min(b, c)) -> min3(a, b, c) 3428 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { 3429 SDLoc DL(N); 3430 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 3431 DL, 3432 N->getValueType(0), 3433 Op0, 3434 Op1.getOperand(0), 3435 Op1.getOperand(1)); 3436 } 3437 } 3438 3439 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) 3440 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { 3441 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) 3442 return Med3; 3443 } 3444 3445 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { 3446 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) 3447 return Med3; 3448 } 3449 3450 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) 3451 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || 3452 (Opc == AMDGPUISD::FMIN_LEGACY && 3453 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && 3454 N->getValueType(0) == MVT::f32 && Op0.hasOneUse()) { 3455 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) 3456 return Res; 3457 } 3458 3459 return SDValue(); 3460 } 3461 3462 SDValue SITargetLowering::performSetCCCombine(SDNode *N, 3463 DAGCombinerInfo &DCI) const { 3464 SelectionDAG &DAG = DCI.DAG; 3465 SDLoc SL(N); 3466 3467 SDValue LHS = N->getOperand(0); 3468 SDValue RHS = N->getOperand(1); 3469 EVT VT = LHS.getValueType(); 3470 3471 if (VT != MVT::f32 && VT != MVT::f64) 3472 return SDValue(); 3473 3474 // Match isinf pattern 3475 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) 3476 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 3477 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { 3478 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 3479 if (!CRHS) 3480 return SDValue(); 3481 3482 const APFloat &APF = CRHS->getValueAPF(); 3483 if (APF.isInfinity() && !APF.isNegative()) { 3484 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY; 3485 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), 3486 DAG.getConstant(Mask, SL, MVT::i32)); 3487 } 3488 } 3489 3490 return SDValue(); 3491 } 3492 3493 SDValue SITargetLowering::PerformDAGCombine(SDNode *N, 3494 DAGCombinerInfo &DCI) const { 3495 SelectionDAG &DAG = DCI.DAG; 3496 SDLoc DL(N); 3497 3498 switch (N->getOpcode()) { 3499 default: 3500 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 3501 case ISD::SETCC: 3502 return performSetCCCombine(N, DCI); 3503 case ISD::FMAXNUM: 3504 case ISD::FMINNUM: 3505 case ISD::SMAX: 3506 case ISD::SMIN: 3507 case ISD::UMAX: 3508 case ISD::UMIN: 3509 case AMDGPUISD::FMIN_LEGACY: 3510 case AMDGPUISD::FMAX_LEGACY: { 3511 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG && 3512 N->getValueType(0) != MVT::f64 && 3513 getTargetMachine().getOptLevel() > CodeGenOpt::None) 3514 return performMinMaxCombine(N, DCI); 3515 break; 3516 } 3517 3518 case AMDGPUISD::CVT_F32_UBYTE0: 3519 case AMDGPUISD::CVT_F32_UBYTE1: 3520 case AMDGPUISD::CVT_F32_UBYTE2: 3521 case AMDGPUISD::CVT_F32_UBYTE3: { 3522 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; 3523 SDValue Src = N->getOperand(0); 3524 3525 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero. 3526 if (Src.getOpcode() == ISD::SRL) { 3527 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x 3528 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x 3529 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x 3530 3531 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src.getOperand(1))) { 3532 unsigned SrcOffset = C->getZExtValue() + 8 * Offset; 3533 if (SrcOffset < 32 && SrcOffset % 8 == 0) { 3534 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, DL, 3535 MVT::f32, Src.getOperand(0)); 3536 } 3537 } 3538 } 3539 3540 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); 3541 3542 APInt KnownZero, KnownOne; 3543 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 3544 !DCI.isBeforeLegalizeOps()); 3545 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3546 if (TLO.ShrinkDemandedConstant(Src, Demanded) || 3547 TLI.SimplifyDemandedBits(Src, Demanded, KnownZero, KnownOne, TLO)) { 3548 DCI.CommitTargetLoweringOpt(TLO); 3549 } 3550 3551 break; 3552 } 3553 3554 case ISD::UINT_TO_FP: { 3555 return performUCharToFloatCombine(N, DCI); 3556 } 3557 case ISD::FADD: { 3558 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3559 break; 3560 3561 EVT VT = N->getValueType(0); 3562 if (VT != MVT::f32) 3563 break; 3564 3565 // Only do this if we are not trying to support denormals. v_mad_f32 does 3566 // not support denormals ever. 3567 if (Subtarget->hasFP32Denormals()) 3568 break; 3569 3570 SDValue LHS = N->getOperand(0); 3571 SDValue RHS = N->getOperand(1); 3572 3573 // These should really be instruction patterns, but writing patterns with 3574 // source modiifiers is a pain. 3575 3576 // fadd (fadd (a, a), b) -> mad 2.0, a, b 3577 if (LHS.getOpcode() == ISD::FADD) { 3578 SDValue A = LHS.getOperand(0); 3579 if (A == LHS.getOperand(1)) { 3580 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); 3581 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, RHS); 3582 } 3583 } 3584 3585 // fadd (b, fadd (a, a)) -> mad 2.0, a, b 3586 if (RHS.getOpcode() == ISD::FADD) { 3587 SDValue A = RHS.getOperand(0); 3588 if (A == RHS.getOperand(1)) { 3589 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); 3590 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, LHS); 3591 } 3592 } 3593 3594 return SDValue(); 3595 } 3596 case ISD::FSUB: { 3597 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3598 break; 3599 3600 EVT VT = N->getValueType(0); 3601 3602 // Try to get the fneg to fold into the source modifier. This undoes generic 3603 // DAG combines and folds them into the mad. 3604 // 3605 // Only do this if we are not trying to support denormals. v_mad_f32 does 3606 // not support denormals ever. 3607 if (VT == MVT::f32 && 3608 !Subtarget->hasFP32Denormals()) { 3609 SDValue LHS = N->getOperand(0); 3610 SDValue RHS = N->getOperand(1); 3611 if (LHS.getOpcode() == ISD::FADD) { 3612 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) 3613 3614 SDValue A = LHS.getOperand(0); 3615 if (A == LHS.getOperand(1)) { 3616 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); 3617 SDValue NegRHS = DAG.getNode(ISD::FNEG, DL, VT, RHS); 3618 3619 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, NegRHS); 3620 } 3621 } 3622 3623 if (RHS.getOpcode() == ISD::FADD) { 3624 // (fsub c, (fadd a, a)) -> mad -2.0, a, c 3625 3626 SDValue A = RHS.getOperand(0); 3627 if (A == RHS.getOperand(1)) { 3628 const SDValue NegTwo = DAG.getConstantFP(-2.0, DL, MVT::f32); 3629 return DAG.getNode(ISD::FMAD, DL, VT, NegTwo, A, LHS); 3630 } 3631 } 3632 3633 return SDValue(); 3634 } 3635 3636 break; 3637 } 3638 case ISD::LOAD: 3639 case ISD::STORE: 3640 case ISD::ATOMIC_LOAD: 3641 case ISD::ATOMIC_STORE: 3642 case ISD::ATOMIC_CMP_SWAP: 3643 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 3644 case ISD::ATOMIC_SWAP: 3645 case ISD::ATOMIC_LOAD_ADD: 3646 case ISD::ATOMIC_LOAD_SUB: 3647 case ISD::ATOMIC_LOAD_AND: 3648 case ISD::ATOMIC_LOAD_OR: 3649 case ISD::ATOMIC_LOAD_XOR: 3650 case ISD::ATOMIC_LOAD_NAND: 3651 case ISD::ATOMIC_LOAD_MIN: 3652 case ISD::ATOMIC_LOAD_MAX: 3653 case ISD::ATOMIC_LOAD_UMIN: 3654 case ISD::ATOMIC_LOAD_UMAX: 3655 case AMDGPUISD::ATOMIC_INC: 3656 case AMDGPUISD::ATOMIC_DEC: { // TODO: Target mem intrinsics. 3657 if (DCI.isBeforeLegalize()) 3658 break; 3659 3660 MemSDNode *MemNode = cast<MemSDNode>(N); 3661 SDValue Ptr = MemNode->getBasePtr(); 3662 3663 // TODO: We could also do this for multiplies. 3664 unsigned AS = MemNode->getAddressSpace(); 3665 if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUAS::PRIVATE_ADDRESS) { 3666 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI); 3667 if (NewPtr) { 3668 SmallVector<SDValue, 8> NewOps(MemNode->op_begin(), MemNode->op_end()); 3669 3670 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; 3671 return SDValue(DAG.UpdateNodeOperands(MemNode, NewOps), 0); 3672 } 3673 } 3674 break; 3675 } 3676 case ISD::AND: 3677 return performAndCombine(N, DCI); 3678 case ISD::OR: 3679 return performOrCombine(N, DCI); 3680 case ISD::XOR: 3681 return performXorCombine(N, DCI); 3682 case AMDGPUISD::FP_CLASS: 3683 return performClassCombine(N, DCI); 3684 case ISD::FCANONICALIZE: 3685 return performFCanonicalizeCombine(N, DCI); 3686 case AMDGPUISD::FRACT: 3687 case AMDGPUISD::RCP: 3688 case AMDGPUISD::RSQ: 3689 case AMDGPUISD::RCP_LEGACY: 3690 case AMDGPUISD::RSQ_LEGACY: 3691 case AMDGPUISD::RSQ_CLAMP: 3692 case AMDGPUISD::LDEXP: { 3693 SDValue Src = N->getOperand(0); 3694 if (Src.isUndef()) 3695 return Src; 3696 break; 3697 } 3698 } 3699 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 3700 } 3701 3702 /// \brief Helper function for adjustWritemask 3703 static unsigned SubIdx2Lane(unsigned Idx) { 3704 switch (Idx) { 3705 default: return 0; 3706 case AMDGPU::sub0: return 0; 3707 case AMDGPU::sub1: return 1; 3708 case AMDGPU::sub2: return 2; 3709 case AMDGPU::sub3: return 3; 3710 } 3711 } 3712 3713 /// \brief Adjust the writemask of MIMG instructions 3714 void SITargetLowering::adjustWritemask(MachineSDNode *&Node, 3715 SelectionDAG &DAG) const { 3716 SDNode *Users[4] = { }; 3717 unsigned Lane = 0; 3718 unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3; 3719 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); 3720 unsigned NewDmask = 0; 3721 3722 // Try to figure out the used register components 3723 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); 3724 I != E; ++I) { 3725 3726 // Abort if we can't understand the usage 3727 if (!I->isMachineOpcode() || 3728 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) 3729 return; 3730 3731 // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used. 3732 // Note that subregs are packed, i.e. Lane==0 is the first bit set 3733 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 3734 // set, etc. 3735 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 3736 3737 // Set which texture component corresponds to the lane. 3738 unsigned Comp; 3739 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) { 3740 assert(Dmask); 3741 Comp = countTrailingZeros(Dmask); 3742 Dmask &= ~(1 << Comp); 3743 } 3744 3745 // Abort if we have more than one user per component 3746 if (Users[Lane]) 3747 return; 3748 3749 Users[Lane] = *I; 3750 NewDmask |= 1 << Comp; 3751 } 3752 3753 // Abort if there's no change 3754 if (NewDmask == OldDmask) 3755 return; 3756 3757 // Adjust the writemask in the node 3758 std::vector<SDValue> Ops; 3759 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); 3760 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); 3761 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); 3762 Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops); 3763 3764 // If we only got one lane, replace it with a copy 3765 // (if NewDmask has only one bit set...) 3766 if (NewDmask && (NewDmask & (NewDmask-1)) == 0) { 3767 SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(), 3768 MVT::i32); 3769 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 3770 SDLoc(), Users[Lane]->getValueType(0), 3771 SDValue(Node, 0), RC); 3772 DAG.ReplaceAllUsesWith(Users[Lane], Copy); 3773 return; 3774 } 3775 3776 // Update the users of the node with the new indices 3777 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { 3778 3779 SDNode *User = Users[i]; 3780 if (!User) 3781 continue; 3782 3783 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); 3784 DAG.UpdateNodeOperands(User, User->getOperand(0), Op); 3785 3786 switch (Idx) { 3787 default: break; 3788 case AMDGPU::sub0: Idx = AMDGPU::sub1; break; 3789 case AMDGPU::sub1: Idx = AMDGPU::sub2; break; 3790 case AMDGPU::sub2: Idx = AMDGPU::sub3; break; 3791 } 3792 } 3793 } 3794 3795 static bool isFrameIndexOp(SDValue Op) { 3796 if (Op.getOpcode() == ISD::AssertZext) 3797 Op = Op.getOperand(0); 3798 3799 return isa<FrameIndexSDNode>(Op); 3800 } 3801 3802 /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) 3803 /// with frame index operands. 3804 /// LLVM assumes that inputs are to these instructions are registers. 3805 void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, 3806 SelectionDAG &DAG) const { 3807 3808 SmallVector<SDValue, 8> Ops; 3809 for (unsigned i = 0; i < Node->getNumOperands(); ++i) { 3810 if (!isFrameIndexOp(Node->getOperand(i))) { 3811 Ops.push_back(Node->getOperand(i)); 3812 continue; 3813 } 3814 3815 SDLoc DL(Node); 3816 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, 3817 Node->getOperand(i).getValueType(), 3818 Node->getOperand(i)), 0)); 3819 } 3820 3821 DAG.UpdateNodeOperands(Node, Ops); 3822 } 3823 3824 /// \brief Fold the instructions after selecting them. 3825 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, 3826 SelectionDAG &DAG) const { 3827 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3828 unsigned Opcode = Node->getMachineOpcode(); 3829 3830 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && 3831 !TII->isGather4(Opcode)) 3832 adjustWritemask(Node, DAG); 3833 3834 if (Opcode == AMDGPU::INSERT_SUBREG || 3835 Opcode == AMDGPU::REG_SEQUENCE) { 3836 legalizeTargetIndependentNode(Node, DAG); 3837 return Node; 3838 } 3839 return Node; 3840 } 3841 3842 /// \brief Assign the register class depending on the number of 3843 /// bits set in the writemask 3844 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 3845 SDNode *Node) const { 3846 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3847 3848 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 3849 3850 if (TII->isVOP3(MI.getOpcode())) { 3851 // Make sure constant bus requirements are respected. 3852 TII->legalizeOperandsVOP3(MRI, MI); 3853 return; 3854 } 3855 3856 if (TII->isMIMG(MI)) { 3857 unsigned VReg = MI.getOperand(0).getReg(); 3858 unsigned DmaskIdx = MI.getNumOperands() == 12 ? 3 : 4; 3859 unsigned Writemask = MI.getOperand(DmaskIdx).getImm(); 3860 unsigned BitsSet = 0; 3861 for (unsigned i = 0; i < 4; ++i) 3862 BitsSet += Writemask & (1 << i) ? 1 : 0; 3863 3864 const TargetRegisterClass *RC; 3865 switch (BitsSet) { 3866 default: return; 3867 case 1: RC = &AMDGPU::VGPR_32RegClass; break; 3868 case 2: RC = &AMDGPU::VReg_64RegClass; break; 3869 case 3: RC = &AMDGPU::VReg_96RegClass; break; 3870 } 3871 3872 unsigned NewOpcode = TII->getMaskedMIMGOp(MI.getOpcode(), BitsSet); 3873 MI.setDesc(TII->get(NewOpcode)); 3874 MRI.setRegClass(VReg, RC); 3875 return; 3876 } 3877 3878 // Replace unused atomics with the no return version. 3879 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); 3880 if (NoRetAtomicOp != -1) { 3881 if (!Node->hasAnyUseOfValue(0)) { 3882 MI.setDesc(TII->get(NoRetAtomicOp)); 3883 MI.RemoveOperand(0); 3884 return; 3885 } 3886 3887 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg 3888 // instruction, because the return type of these instructions is a vec2 of 3889 // the memory type, so it can be tied to the input operand. 3890 // This means these instructions always have a use, so we need to add a 3891 // special case to check if the atomic has only one extract_subreg use, 3892 // which itself has no uses. 3893 if ((Node->hasNUsesOfValue(1, 0) && 3894 Node->use_begin()->isMachineOpcode() && 3895 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && 3896 !Node->use_begin()->hasAnyUseOfValue(0))) { 3897 unsigned Def = MI.getOperand(0).getReg(); 3898 3899 // Change this into a noret atomic. 3900 MI.setDesc(TII->get(NoRetAtomicOp)); 3901 MI.RemoveOperand(0); 3902 3903 // If we only remove the def operand from the atomic instruction, the 3904 // extract_subreg will be left with a use of a vreg without a def. 3905 // So we need to insert an implicit_def to avoid machine verifier 3906 // errors. 3907 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), 3908 TII->get(AMDGPU::IMPLICIT_DEF), Def); 3909 } 3910 return; 3911 } 3912 } 3913 3914 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, 3915 uint64_t Val) { 3916 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); 3917 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); 3918 } 3919 3920 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, 3921 const SDLoc &DL, 3922 SDValue Ptr) const { 3923 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3924 3925 // Build the half of the subregister with the constants before building the 3926 // full 128-bit register. If we are building multiple resource descriptors, 3927 // this will allow CSEing of the 2-component register. 3928 const SDValue Ops0[] = { 3929 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), 3930 buildSMovImm32(DAG, DL, 0), 3931 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 3932 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), 3933 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) 3934 }; 3935 3936 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, 3937 MVT::v2i32, Ops0), 0); 3938 3939 // Combine the constants and the pointer. 3940 const SDValue Ops1[] = { 3941 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 3942 Ptr, 3943 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), 3944 SubRegHi, 3945 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) 3946 }; 3947 3948 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); 3949 } 3950 3951 /// \brief Return a resource descriptor with the 'Add TID' bit enabled 3952 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] 3953 /// of the resource descriptor) to create an offset, which is added to 3954 /// the resource pointer. 3955 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, 3956 SDValue Ptr, uint32_t RsrcDword1, 3957 uint64_t RsrcDword2And3) const { 3958 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); 3959 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); 3960 if (RsrcDword1) { 3961 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, 3962 DAG.getConstant(RsrcDword1, DL, MVT::i32)), 3963 0); 3964 } 3965 3966 SDValue DataLo = buildSMovImm32(DAG, DL, 3967 RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); 3968 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); 3969 3970 const SDValue Ops[] = { 3971 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 3972 PtrLo, 3973 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 3974 PtrHi, 3975 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), 3976 DataLo, 3977 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), 3978 DataHi, 3979 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) 3980 }; 3981 3982 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); 3983 } 3984 3985 SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG, 3986 const TargetRegisterClass *RC, 3987 unsigned Reg, EVT VT) const { 3988 SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT); 3989 3990 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()), 3991 cast<RegisterSDNode>(VReg)->getReg(), VT); 3992 } 3993 3994 //===----------------------------------------------------------------------===// 3995 // SI Inline Assembly Support 3996 //===----------------------------------------------------------------------===// 3997 3998 std::pair<unsigned, const TargetRegisterClass *> 3999 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 4000 StringRef Constraint, 4001 MVT VT) const { 4002 4003 if (Constraint.size() == 1) { 4004 switch (Constraint[0]) { 4005 case 's': 4006 case 'r': 4007 switch (VT.getSizeInBits()) { 4008 default: 4009 return std::make_pair(0U, nullptr); 4010 case 32: 4011 return std::make_pair(0U, &AMDGPU::SReg_32RegClass); 4012 case 64: 4013 return std::make_pair(0U, &AMDGPU::SGPR_64RegClass); 4014 case 128: 4015 return std::make_pair(0U, &AMDGPU::SReg_128RegClass); 4016 case 256: 4017 return std::make_pair(0U, &AMDGPU::SReg_256RegClass); 4018 } 4019 4020 case 'v': 4021 switch (VT.getSizeInBits()) { 4022 default: 4023 return std::make_pair(0U, nullptr); 4024 case 32: 4025 return std::make_pair(0U, &AMDGPU::VGPR_32RegClass); 4026 case 64: 4027 return std::make_pair(0U, &AMDGPU::VReg_64RegClass); 4028 case 96: 4029 return std::make_pair(0U, &AMDGPU::VReg_96RegClass); 4030 case 128: 4031 return std::make_pair(0U, &AMDGPU::VReg_128RegClass); 4032 case 256: 4033 return std::make_pair(0U, &AMDGPU::VReg_256RegClass); 4034 case 512: 4035 return std::make_pair(0U, &AMDGPU::VReg_512RegClass); 4036 } 4037 } 4038 } 4039 4040 if (Constraint.size() > 1) { 4041 const TargetRegisterClass *RC = nullptr; 4042 if (Constraint[1] == 'v') { 4043 RC = &AMDGPU::VGPR_32RegClass; 4044 } else if (Constraint[1] == 's') { 4045 RC = &AMDGPU::SGPR_32RegClass; 4046 } 4047 4048 if (RC) { 4049 uint32_t Idx; 4050 bool Failed = Constraint.substr(2).getAsInteger(10, Idx); 4051 if (!Failed && Idx < RC->getNumRegs()) 4052 return std::make_pair(RC->getRegister(Idx), RC); 4053 } 4054 } 4055 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 4056 } 4057 4058 SITargetLowering::ConstraintType 4059 SITargetLowering::getConstraintType(StringRef Constraint) const { 4060 if (Constraint.size() == 1) { 4061 switch (Constraint[0]) { 4062 default: break; 4063 case 's': 4064 case 'v': 4065 return C_RegisterClass; 4066 } 4067 } 4068 return TargetLowering::getConstraintType(Constraint); 4069 } 4070