1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief Custom DAG lowering for SI 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifdef _MSC_VER 16 // Provide M_PI. 17 #define _USE_MATH_DEFINES 18 #include <cmath> 19 #endif 20 21 #include "AMDGPU.h" 22 #include "AMDGPUIntrinsicInfo.h" 23 #include "AMDGPUSubtarget.h" 24 #include "SIISelLowering.h" 25 #include "SIInstrInfo.h" 26 #include "SIMachineFunctionInfo.h" 27 #include "SIRegisterInfo.h" 28 #include "llvm/ADT/BitVector.h" 29 #include "llvm/ADT/StringSwitch.h" 30 #include "llvm/CodeGen/CallingConvLower.h" 31 #include "llvm/CodeGen/MachineInstrBuilder.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/CodeGen/SelectionDAG.h" 34 #include "llvm/IR/DiagnosticInfo.h" 35 #include "llvm/IR/Function.h" 36 37 using namespace llvm; 38 39 static unsigned findFirstFreeSGPR(CCState &CCInfo) { 40 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); 41 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { 42 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { 43 return AMDGPU::SGPR0 + Reg; 44 } 45 } 46 llvm_unreachable("Cannot allocate sgpr"); 47 } 48 49 SITargetLowering::SITargetLowering(TargetMachine &TM, 50 const AMDGPUSubtarget &STI) 51 : AMDGPUTargetLowering(TM, STI) { 52 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); 53 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); 54 55 addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass); 56 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); 57 58 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); 59 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); 60 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); 61 62 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); 63 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); 64 65 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); 66 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); 67 68 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); 69 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); 70 71 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); 72 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); 73 74 computeRegisterProperties(STI.getRegisterInfo()); 75 76 // We need to custom lower vector stores from local memory 77 setOperationAction(ISD::LOAD, MVT::v2i32, Custom); 78 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 79 setOperationAction(ISD::LOAD, MVT::v8i32, Custom); 80 setOperationAction(ISD::LOAD, MVT::v16i32, Custom); 81 setOperationAction(ISD::LOAD, MVT::i1, Custom); 82 83 setOperationAction(ISD::STORE, MVT::v2i32, Custom); 84 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 85 setOperationAction(ISD::STORE, MVT::v8i32, Custom); 86 setOperationAction(ISD::STORE, MVT::v16i32, Custom); 87 setOperationAction(ISD::STORE, MVT::i1, Custom); 88 89 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 90 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 91 setOperationAction(ISD::FrameIndex, MVT::i32, Custom); 92 setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand); 93 94 setOperationAction(ISD::SELECT, MVT::i1, Promote); 95 setOperationAction(ISD::SELECT, MVT::i64, Custom); 96 setOperationAction(ISD::SELECT, MVT::f64, Promote); 97 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); 98 99 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 100 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 101 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 102 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 103 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); 104 105 setOperationAction(ISD::SETCC, MVT::i1, Promote); 106 setOperationAction(ISD::SETCC, MVT::v2i1, Expand); 107 setOperationAction(ISD::SETCC, MVT::v4i1, Expand); 108 109 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); 110 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 111 112 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); 113 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); 114 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 115 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); 116 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 117 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); 118 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); 119 120 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); 121 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); 122 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 123 124 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 125 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 126 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 127 setOperationAction(ISD::BR_CC, MVT::i64, Expand); 128 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 129 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 130 131 // We only support LOAD/STORE and vector manipulation ops for vectors 132 // with > 4 elements. 133 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64}) { 134 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 135 switch (Op) { 136 case ISD::LOAD: 137 case ISD::STORE: 138 case ISD::BUILD_VECTOR: 139 case ISD::BITCAST: 140 case ISD::EXTRACT_VECTOR_ELT: 141 case ISD::INSERT_VECTOR_ELT: 142 case ISD::INSERT_SUBVECTOR: 143 case ISD::EXTRACT_SUBVECTOR: 144 case ISD::SCALAR_TO_VECTOR: 145 break; 146 case ISD::CONCAT_VECTORS: 147 setOperationAction(Op, VT, Custom); 148 break; 149 default: 150 setOperationAction(Op, VT, Expand); 151 break; 152 } 153 } 154 } 155 156 // Most operations are naturally 32-bit vector operations. We only support 157 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. 158 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { 159 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 160 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); 161 162 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 163 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); 164 165 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 166 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); 167 168 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 169 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); 170 } 171 172 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); 173 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); 174 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); 175 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); 176 177 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, 178 // and output demarshalling 179 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 180 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 181 182 // We can't return success/failure, only the old value, 183 // let LLVM add the comparison 184 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); 185 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); 186 187 if (Subtarget->hasFlatAddressSpace()) { 188 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); 189 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); 190 } 191 192 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 193 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 194 195 // On SI this is s_memtime and s_memrealtime on VI. 196 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 197 198 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 199 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 200 201 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) { 202 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 203 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 204 setOperationAction(ISD::FRINT, MVT::f64, Legal); 205 } 206 207 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 208 209 setOperationAction(ISD::FSIN, MVT::f32, Custom); 210 setOperationAction(ISD::FCOS, MVT::f32, Custom); 211 setOperationAction(ISD::FDIV, MVT::f32, Custom); 212 setOperationAction(ISD::FDIV, MVT::f64, Custom); 213 214 215 setTargetDAGCombine(ISD::FADD); 216 setTargetDAGCombine(ISD::FSUB); 217 setTargetDAGCombine(ISD::FMINNUM); 218 setTargetDAGCombine(ISD::FMAXNUM); 219 setTargetDAGCombine(ISD::SMIN); 220 setTargetDAGCombine(ISD::SMAX); 221 setTargetDAGCombine(ISD::UMIN); 222 setTargetDAGCombine(ISD::UMAX); 223 setTargetDAGCombine(ISD::SETCC); 224 setTargetDAGCombine(ISD::AND); 225 setTargetDAGCombine(ISD::OR); 226 setTargetDAGCombine(ISD::UINT_TO_FP); 227 setTargetDAGCombine(ISD::FCANONICALIZE); 228 229 // All memory operations. Some folding on the pointer operand is done to help 230 // matching the constant offsets in the addressing modes. 231 setTargetDAGCombine(ISD::LOAD); 232 setTargetDAGCombine(ISD::STORE); 233 setTargetDAGCombine(ISD::ATOMIC_LOAD); 234 setTargetDAGCombine(ISD::ATOMIC_STORE); 235 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); 236 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 237 setTargetDAGCombine(ISD::ATOMIC_SWAP); 238 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); 239 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); 240 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); 241 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); 242 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); 243 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); 244 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); 245 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); 246 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); 247 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); 248 249 setSchedulingPreference(Sched::RegPressure); 250 } 251 252 //===----------------------------------------------------------------------===// 253 // TargetLowering queries 254 //===----------------------------------------------------------------------===// 255 256 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 257 const CallInst &CI, 258 unsigned IntrID) const { 259 switch (IntrID) { 260 case Intrinsic::amdgcn_atomic_inc: 261 case Intrinsic::amdgcn_atomic_dec: 262 Info.opc = ISD::INTRINSIC_W_CHAIN; 263 Info.memVT = MVT::getVT(CI.getType()); 264 Info.ptrVal = CI.getOperand(0); 265 Info.align = 0; 266 Info.vol = false; 267 Info.readMem = true; 268 Info.writeMem = true; 269 return true; 270 default: 271 return false; 272 } 273 } 274 275 bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &, 276 EVT) const { 277 // SI has some legal vector types, but no legal vector operations. Say no 278 // shuffles are legal in order to prefer scalarizing some vector operations. 279 return false; 280 } 281 282 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { 283 // Flat instructions do not have offsets, and only have the register 284 // address. 285 return AM.BaseOffs == 0 && (AM.Scale == 0 || AM.Scale == 1); 286 } 287 288 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { 289 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and 290 // additionally can do r + r + i with addr64. 32-bit has more addressing 291 // mode options. Depending on the resource constant, it can also do 292 // (i64 r0) + (i32 r1) * (i14 i). 293 // 294 // Private arrays end up using a scratch buffer most of the time, so also 295 // assume those use MUBUF instructions. Scratch loads / stores are currently 296 // implemented as mubuf instructions with offen bit set, so slightly 297 // different than the normal addr64. 298 if (!isUInt<12>(AM.BaseOffs)) 299 return false; 300 301 // FIXME: Since we can split immediate into soffset and immediate offset, 302 // would it make sense to allow any immediate? 303 304 switch (AM.Scale) { 305 case 0: // r + i or just i, depending on HasBaseReg. 306 return true; 307 case 1: 308 return true; // We have r + r or r + i. 309 case 2: 310 if (AM.HasBaseReg) { 311 // Reject 2 * r + r. 312 return false; 313 } 314 315 // Allow 2 * r as r + r 316 // Or 2 * r + i is allowed as r + r + i. 317 return true; 318 default: // Don't allow n * r 319 return false; 320 } 321 } 322 323 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, 324 const AddrMode &AM, Type *Ty, 325 unsigned AS) const { 326 // No global is ever allowed as a base. 327 if (AM.BaseGV) 328 return false; 329 330 switch (AS) { 331 case AMDGPUAS::GLOBAL_ADDRESS: { 332 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 333 // Assume the we will use FLAT for all global memory accesses 334 // on VI. 335 // FIXME: This assumption is currently wrong. On VI we still use 336 // MUBUF instructions for the r + i addressing mode. As currently 337 // implemented, the MUBUF instructions only work on buffer < 4GB. 338 // It may be possible to support > 4GB buffers with MUBUF instructions, 339 // by setting the stride value in the resource descriptor which would 340 // increase the size limit to (stride * 4GB). However, this is risky, 341 // because it has never been validated. 342 return isLegalFlatAddressingMode(AM); 343 } 344 345 return isLegalMUBUFAddressingMode(AM); 346 } 347 case AMDGPUAS::CONSTANT_ADDRESS: { 348 // If the offset isn't a multiple of 4, it probably isn't going to be 349 // correctly aligned. 350 if (AM.BaseOffs % 4 != 0) 351 return isLegalMUBUFAddressingMode(AM); 352 353 // There are no SMRD extloads, so if we have to do a small type access we 354 // will use a MUBUF load. 355 // FIXME?: We also need to do this if unaligned, but we don't know the 356 // alignment here. 357 if (DL.getTypeStoreSize(Ty) < 4) 358 return isLegalMUBUFAddressingMode(AM); 359 360 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { 361 // SMRD instructions have an 8-bit, dword offset on SI. 362 if (!isUInt<8>(AM.BaseOffs / 4)) 363 return false; 364 } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) { 365 // On CI+, this can also be a 32-bit literal constant offset. If it fits 366 // in 8-bits, it can use a smaller encoding. 367 if (!isUInt<32>(AM.BaseOffs / 4)) 368 return false; 369 } else if (Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) { 370 // On VI, these use the SMEM format and the offset is 20-bit in bytes. 371 if (!isUInt<20>(AM.BaseOffs)) 372 return false; 373 } else 374 llvm_unreachable("unhandled generation"); 375 376 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 377 return true; 378 379 if (AM.Scale == 1 && AM.HasBaseReg) 380 return true; 381 382 return false; 383 } 384 385 case AMDGPUAS::PRIVATE_ADDRESS: 386 return isLegalMUBUFAddressingMode(AM); 387 388 case AMDGPUAS::LOCAL_ADDRESS: 389 case AMDGPUAS::REGION_ADDRESS: { 390 // Basic, single offset DS instructions allow a 16-bit unsigned immediate 391 // field. 392 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have 393 // an 8-bit dword offset but we don't know the alignment here. 394 if (!isUInt<16>(AM.BaseOffs)) 395 return false; 396 397 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 398 return true; 399 400 if (AM.Scale == 1 && AM.HasBaseReg) 401 return true; 402 403 return false; 404 } 405 case AMDGPUAS::FLAT_ADDRESS: 406 case AMDGPUAS::UNKNOWN_ADDRESS_SPACE: 407 // For an unknown address space, this usually means that this is for some 408 // reason being used for pure arithmetic, and not based on some addressing 409 // computation. We don't have instructions that compute pointers with any 410 // addressing modes, so treat them as having no offset like flat 411 // instructions. 412 return isLegalFlatAddressingMode(AM); 413 414 default: 415 llvm_unreachable("unhandled address space"); 416 } 417 } 418 419 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 420 unsigned AddrSpace, 421 unsigned Align, 422 bool *IsFast) const { 423 if (IsFast) 424 *IsFast = false; 425 426 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, 427 // which isn't a simple VT. 428 if (!VT.isSimple() || VT == MVT::Other) 429 return false; 430 431 // TODO - CI+ supports unaligned memory accesses, but this requires driver 432 // support. 433 434 // XXX - The only mention I see of this in the ISA manual is for LDS direct 435 // reads the "byte address and must be dword aligned". Is it also true for the 436 // normal loads and stores? 437 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS) { 438 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte 439 // aligned, 8 byte access in a single operation using ds_read2/write2_b32 440 // with adjacent offsets. 441 bool AlignedBy4 = (Align % 4 == 0); 442 if (IsFast) 443 *IsFast = AlignedBy4; 444 return AlignedBy4; 445 } 446 447 // Smaller than dword value must be aligned. 448 // FIXME: This should be allowed on CI+ 449 if (VT.bitsLT(MVT::i32)) 450 return false; 451 452 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the 453 // byte-address are ignored, thus forcing Dword alignment. 454 // This applies to private, global, and constant memory. 455 if (IsFast) 456 *IsFast = true; 457 458 return VT.bitsGT(MVT::i32) && Align % 4 == 0; 459 } 460 461 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, 462 unsigned SrcAlign, bool IsMemset, 463 bool ZeroMemset, 464 bool MemcpyStrSrc, 465 MachineFunction &MF) const { 466 // FIXME: Should account for address space here. 467 468 // The default fallback uses the private pointer size as a guess for a type to 469 // use. Make sure we switch these to 64-bit accesses. 470 471 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global 472 return MVT::v4i32; 473 474 if (Size >= 8 && DstAlign >= 4) 475 return MVT::v2i32; 476 477 // Use the default. 478 return MVT::Other; 479 } 480 481 static bool isFlatGlobalAddrSpace(unsigned AS) { 482 return AS == AMDGPUAS::GLOBAL_ADDRESS || 483 AS == AMDGPUAS::FLAT_ADDRESS || 484 AS == AMDGPUAS::CONSTANT_ADDRESS; 485 } 486 487 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, 488 unsigned DestAS) const { 489 return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS); 490 } 491 492 493 bool SITargetLowering::isMemOpUniform(const SDNode *N) const { 494 const MemSDNode *MemNode = cast<MemSDNode>(N); 495 const Value *Ptr = MemNode->getMemOperand()->getValue(); 496 497 // UndefValue means this is a load of a kernel input. These are uniform. 498 // Sometimes LDS instructions have constant pointers 499 if (isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || isa<Constant>(Ptr) || 500 isa<GlobalValue>(Ptr)) 501 return true; 502 503 const Instruction *I = dyn_cast_or_null<Instruction>(Ptr); 504 return I && I->getMetadata("amdgpu.uniform"); 505 } 506 507 TargetLoweringBase::LegalizeTypeAction 508 SITargetLowering::getPreferredVectorAction(EVT VT) const { 509 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) 510 return TypeSplitVector; 511 512 return TargetLoweringBase::getPreferredVectorAction(VT); 513 } 514 515 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 516 Type *Ty) const { 517 const SIInstrInfo *TII = 518 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 519 return TII->isInlineConstant(Imm); 520 } 521 522 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { 523 524 // SimplifySetCC uses this function to determine whether or not it should 525 // create setcc with i1 operands. We don't have instructions for i1 setcc. 526 if (VT == MVT::i1 && Op == ISD::SETCC) 527 return false; 528 529 return TargetLowering::isTypeDesirableForOp(Op, VT); 530 } 531 532 SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, 533 SDLoc SL, SDValue Chain, 534 unsigned Offset, bool Signed) const { 535 const DataLayout &DL = DAG.getDataLayout(); 536 MachineFunction &MF = DAG.getMachineFunction(); 537 const SIRegisterInfo *TRI = 538 static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo()); 539 unsigned InputPtrReg = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 540 541 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 542 543 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 544 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); 545 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 546 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, 547 MRI.getLiveInVirtReg(InputPtrReg), PtrVT); 548 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, 549 DAG.getConstant(Offset, SL, PtrVT)); 550 SDValue PtrOffset = DAG.getUNDEF(PtrVT); 551 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 552 553 unsigned Align = DL.getABITypeAlignment(Ty); 554 555 ISD::LoadExtType ExtTy = Signed ? ISD::SEXTLOAD : ISD::ZEXTLOAD; 556 if (MemVT.isFloatingPoint()) 557 ExtTy = ISD::EXTLOAD; 558 559 return DAG.getLoad(ISD::UNINDEXED, ExtTy, 560 VT, SL, Chain, Ptr, PtrOffset, PtrInfo, MemVT, 561 false, // isVolatile 562 true, // isNonTemporal 563 true, // isInvariant 564 Align); // Alignment 565 } 566 567 SDValue SITargetLowering::LowerFormalArguments( 568 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 569 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG, 570 SmallVectorImpl<SDValue> &InVals) const { 571 const SIRegisterInfo *TRI = 572 static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo()); 573 574 MachineFunction &MF = DAG.getMachineFunction(); 575 FunctionType *FType = MF.getFunction()->getFunctionType(); 576 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 577 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>(); 578 579 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { 580 const Function *Fn = MF.getFunction(); 581 DiagnosticInfoUnsupported NoGraphicsHSA( 582 *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); 583 DAG.getContext()->diagnose(NoGraphicsHSA); 584 return DAG.getEntryNode(); 585 } 586 587 SmallVector<ISD::InputArg, 16> Splits; 588 BitVector Skipped(Ins.size()); 589 590 for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) { 591 const ISD::InputArg &Arg = Ins[i]; 592 593 // First check if it's a PS input addr 594 if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() && 595 !Arg.Flags.isByVal() && PSInputNum <= 15) { 596 597 if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) { 598 // We can safely skip PS inputs 599 Skipped.set(i); 600 ++PSInputNum; 601 continue; 602 } 603 604 Info->markPSInputAllocated(PSInputNum); 605 if (Arg.Used) 606 Info->PSInputEna |= 1 << PSInputNum; 607 608 ++PSInputNum; 609 } 610 611 if (AMDGPU::isShader(CallConv)) { 612 // Second split vertices into their elements 613 if (Arg.VT.isVector()) { 614 ISD::InputArg NewArg = Arg; 615 NewArg.Flags.setSplit(); 616 NewArg.VT = Arg.VT.getVectorElementType(); 617 618 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a 619 // three or five element vertex only needs three or five registers, 620 // NOT four or eight. 621 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 622 unsigned NumElements = ParamType->getVectorNumElements(); 623 624 for (unsigned j = 0; j != NumElements; ++j) { 625 Splits.push_back(NewArg); 626 NewArg.PartOffset += NewArg.VT.getStoreSize(); 627 } 628 } else { 629 Splits.push_back(Arg); 630 } 631 } 632 } 633 634 SmallVector<CCValAssign, 16> ArgLocs; 635 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 636 *DAG.getContext()); 637 638 // At least one interpolation mode must be enabled or else the GPU will hang. 639 // 640 // Check PSInputAddr instead of PSInputEna. The idea is that if the user set 641 // PSInputAddr, the user wants to enable some bits after the compilation 642 // based on run-time states. Since we can't know what the final PSInputEna 643 // will look like, so we shouldn't do anything here and the user should take 644 // responsibility for the correct programming. 645 // 646 // Otherwise, the following restrictions apply: 647 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 648 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 649 // enabled too. 650 if (CallConv == CallingConv::AMDGPU_PS && 651 ((Info->getPSInputAddr() & 0x7F) == 0 || 652 ((Info->getPSInputAddr() & 0xF) == 0 && 653 Info->isPSInputAllocated(11)))) { 654 CCInfo.AllocateReg(AMDGPU::VGPR0); 655 CCInfo.AllocateReg(AMDGPU::VGPR1); 656 Info->markPSInputAllocated(0); 657 Info->PSInputEna |= 1; 658 } 659 660 if (!AMDGPU::isShader(CallConv)) { 661 getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins, 662 Splits); 663 664 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); 665 } else { 666 assert(!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr() && 667 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && 668 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && 669 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && 670 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && 671 !Info->hasWorkItemIDZ()); 672 } 673 674 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 675 if (Info->hasPrivateSegmentBuffer()) { 676 unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI); 677 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass); 678 CCInfo.AllocateReg(PrivateSegmentBufferReg); 679 } 680 681 if (Info->hasDispatchPtr()) { 682 unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI); 683 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SReg_64RegClass); 684 CCInfo.AllocateReg(DispatchPtrReg); 685 } 686 687 if (Info->hasQueuePtr()) { 688 unsigned QueuePtrReg = Info->addQueuePtr(*TRI); 689 MF.addLiveIn(QueuePtrReg, &AMDGPU::SReg_64RegClass); 690 CCInfo.AllocateReg(QueuePtrReg); 691 } 692 693 if (Info->hasKernargSegmentPtr()) { 694 unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI); 695 MF.addLiveIn(InputPtrReg, &AMDGPU::SReg_64RegClass); 696 CCInfo.AllocateReg(InputPtrReg); 697 } 698 699 if (Info->hasFlatScratchInit()) { 700 unsigned FlatScratchInitReg = Info->addFlatScratchInit(*TRI); 701 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SReg_64RegClass); 702 CCInfo.AllocateReg(FlatScratchInitReg); 703 } 704 705 AnalyzeFormalArguments(CCInfo, Splits); 706 707 SmallVector<SDValue, 16> Chains; 708 709 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { 710 711 const ISD::InputArg &Arg = Ins[i]; 712 if (Skipped[i]) { 713 InVals.push_back(DAG.getUNDEF(Arg.VT)); 714 continue; 715 } 716 717 CCValAssign &VA = ArgLocs[ArgIdx++]; 718 MVT VT = VA.getLocVT(); 719 720 if (VA.isMemLoc()) { 721 VT = Ins[i].VT; 722 EVT MemVT = Splits[i].VT; 723 const unsigned Offset = Subtarget->getExplicitKernelArgOffset() + 724 VA.getLocMemOffset(); 725 // The first 36 bytes of the input buffer contains information about 726 // thread group and global sizes. 727 SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, Chain, 728 Offset, Ins[i].Flags.isSExt()); 729 Chains.push_back(Arg.getValue(1)); 730 731 auto *ParamTy = 732 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); 733 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && 734 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 735 // On SI local pointers are just offsets into LDS, so they are always 736 // less than 16-bits. On CI and newer they could potentially be 737 // real pointers, so we can't guarantee their size. 738 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, 739 DAG.getValueType(MVT::i16)); 740 } 741 742 InVals.push_back(Arg); 743 Info->ABIArgOffset = Offset + MemVT.getStoreSize(); 744 continue; 745 } 746 assert(VA.isRegLoc() && "Parameter must be in a register!"); 747 748 unsigned Reg = VA.getLocReg(); 749 750 if (VT == MVT::i64) { 751 // For now assume it is a pointer 752 Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, 753 &AMDGPU::SReg_64RegClass); 754 Reg = MF.addLiveIn(Reg, &AMDGPU::SReg_64RegClass); 755 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 756 InVals.push_back(Copy); 757 continue; 758 } 759 760 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 761 762 Reg = MF.addLiveIn(Reg, RC); 763 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); 764 765 if (Arg.VT.isVector()) { 766 767 // Build a vector from the registers 768 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 769 unsigned NumElements = ParamType->getVectorNumElements(); 770 771 SmallVector<SDValue, 4> Regs; 772 Regs.push_back(Val); 773 for (unsigned j = 1; j != NumElements; ++j) { 774 Reg = ArgLocs[ArgIdx++].getLocReg(); 775 Reg = MF.addLiveIn(Reg, RC); 776 777 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 778 Regs.push_back(Copy); 779 } 780 781 // Fill up the missing vector elements 782 NumElements = Arg.VT.getVectorNumElements() - NumElements; 783 Regs.append(NumElements, DAG.getUNDEF(VT)); 784 785 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs)); 786 continue; 787 } 788 789 InVals.push_back(Val); 790 } 791 792 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 793 // these from the dispatch pointer. 794 795 // Start adding system SGPRs. 796 if (Info->hasWorkGroupIDX()) { 797 unsigned Reg = Info->addWorkGroupIDX(); 798 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 799 CCInfo.AllocateReg(Reg); 800 } 801 802 if (Info->hasWorkGroupIDY()) { 803 unsigned Reg = Info->addWorkGroupIDY(); 804 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 805 CCInfo.AllocateReg(Reg); 806 } 807 808 if (Info->hasWorkGroupIDZ()) { 809 unsigned Reg = Info->addWorkGroupIDZ(); 810 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 811 CCInfo.AllocateReg(Reg); 812 } 813 814 if (Info->hasWorkGroupInfo()) { 815 unsigned Reg = Info->addWorkGroupInfo(); 816 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 817 CCInfo.AllocateReg(Reg); 818 } 819 820 if (Info->hasPrivateSegmentWaveByteOffset()) { 821 // Scratch wave offset passed in system SGPR. 822 unsigned PrivateSegmentWaveByteOffsetReg; 823 824 if (AMDGPU::isShader(CallConv)) { 825 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); 826 Info->setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); 827 } else 828 PrivateSegmentWaveByteOffsetReg = Info->addPrivateSegmentWaveByteOffset(); 829 830 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); 831 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); 832 } 833 834 // Now that we've figured out where the scratch register inputs are, see if 835 // should reserve the arguments and use them directly. 836 bool HasStackObjects = MF.getFrameInfo()->hasStackObjects(); 837 // Record that we know we have non-spill stack objects so we don't need to 838 // check all stack objects later. 839 if (HasStackObjects) 840 Info->setHasNonSpillStackObjects(true); 841 842 if (ST.isAmdHsaOS()) { 843 // TODO: Assume we will spill without optimizations. 844 if (HasStackObjects) { 845 // If we have stack objects, we unquestionably need the private buffer 846 // resource. For the HSA ABI, this will be the first 4 user SGPR 847 // inputs. We can reserve those and use them directly. 848 849 unsigned PrivateSegmentBufferReg = TRI->getPreloadedValue( 850 MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); 851 Info->setScratchRSrcReg(PrivateSegmentBufferReg); 852 853 unsigned PrivateSegmentWaveByteOffsetReg = TRI->getPreloadedValue( 854 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 855 Info->setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg); 856 } else { 857 unsigned ReservedBufferReg 858 = TRI->reservedPrivateSegmentBufferReg(MF); 859 unsigned ReservedOffsetReg 860 = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 861 862 // We tentatively reserve the last registers (skipping the last two 863 // which may contain VCC). After register allocation, we'll replace 864 // these with the ones immediately after those which were really 865 // allocated. In the prologue copies will be inserted from the argument 866 // to these reserved registers. 867 Info->setScratchRSrcReg(ReservedBufferReg); 868 Info->setScratchWaveOffsetReg(ReservedOffsetReg); 869 } 870 } else { 871 unsigned ReservedBufferReg = TRI->reservedPrivateSegmentBufferReg(MF); 872 873 // Without HSA, relocations are used for the scratch pointer and the 874 // buffer resource setup is always inserted in the prologue. Scratch wave 875 // offset is still in an input SGPR. 876 Info->setScratchRSrcReg(ReservedBufferReg); 877 878 if (HasStackObjects) { 879 unsigned ScratchWaveOffsetReg = TRI->getPreloadedValue( 880 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 881 Info->setScratchWaveOffsetReg(ScratchWaveOffsetReg); 882 } else { 883 unsigned ReservedOffsetReg 884 = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 885 Info->setScratchWaveOffsetReg(ReservedOffsetReg); 886 } 887 } 888 889 if (Info->hasWorkItemIDX()) { 890 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X); 891 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 892 CCInfo.AllocateReg(Reg); 893 } 894 895 if (Info->hasWorkItemIDY()) { 896 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y); 897 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 898 CCInfo.AllocateReg(Reg); 899 } 900 901 if (Info->hasWorkItemIDZ()) { 902 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z); 903 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 904 CCInfo.AllocateReg(Reg); 905 } 906 907 if (Chains.empty()) 908 return Chain; 909 910 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 911 } 912 913 SDValue SITargetLowering::LowerReturn(SDValue Chain, 914 CallingConv::ID CallConv, 915 bool isVarArg, 916 const SmallVectorImpl<ISD::OutputArg> &Outs, 917 const SmallVectorImpl<SDValue> &OutVals, 918 SDLoc DL, SelectionDAG &DAG) const { 919 MachineFunction &MF = DAG.getMachineFunction(); 920 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 921 922 if (!AMDGPU::isShader(CallConv)) 923 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, 924 OutVals, DL, DAG); 925 926 Info->setIfReturnsVoid(Outs.size() == 0); 927 928 SmallVector<ISD::OutputArg, 48> Splits; 929 SmallVector<SDValue, 48> SplitVals; 930 931 // Split vectors into their elements. 932 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 933 const ISD::OutputArg &Out = Outs[i]; 934 935 if (Out.VT.isVector()) { 936 MVT VT = Out.VT.getVectorElementType(); 937 ISD::OutputArg NewOut = Out; 938 NewOut.Flags.setSplit(); 939 NewOut.VT = VT; 940 941 // We want the original number of vector elements here, e.g. 942 // three or five, not four or eight. 943 unsigned NumElements = Out.ArgVT.getVectorNumElements(); 944 945 for (unsigned j = 0; j != NumElements; ++j) { 946 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i], 947 DAG.getConstant(j, DL, MVT::i32)); 948 SplitVals.push_back(Elem); 949 Splits.push_back(NewOut); 950 NewOut.PartOffset += NewOut.VT.getStoreSize(); 951 } 952 } else { 953 SplitVals.push_back(OutVals[i]); 954 Splits.push_back(Out); 955 } 956 } 957 958 // CCValAssign - represent the assignment of the return value to a location. 959 SmallVector<CCValAssign, 48> RVLocs; 960 961 // CCState - Info about the registers and stack slots. 962 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 963 *DAG.getContext()); 964 965 // Analyze outgoing return values. 966 AnalyzeReturn(CCInfo, Splits); 967 968 SDValue Flag; 969 SmallVector<SDValue, 48> RetOps; 970 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 971 972 // Copy the result values into the output registers. 973 for (unsigned i = 0, realRVLocIdx = 0; 974 i != RVLocs.size(); 975 ++i, ++realRVLocIdx) { 976 CCValAssign &VA = RVLocs[i]; 977 assert(VA.isRegLoc() && "Can only return in registers!"); 978 979 SDValue Arg = SplitVals[realRVLocIdx]; 980 981 // Copied from other backends. 982 switch (VA.getLocInfo()) { 983 default: llvm_unreachable("Unknown loc info!"); 984 case CCValAssign::Full: 985 break; 986 case CCValAssign::BCvt: 987 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 988 break; 989 } 990 991 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 992 Flag = Chain.getValue(1); 993 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 994 } 995 996 // Update chain and glue. 997 RetOps[0] = Chain; 998 if (Flag.getNode()) 999 RetOps.push_back(Flag); 1000 1001 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, RetOps); 1002 } 1003 1004 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT, 1005 SelectionDAG &DAG) const { 1006 unsigned Reg = StringSwitch<unsigned>(RegName) 1007 .Case("m0", AMDGPU::M0) 1008 .Case("exec", AMDGPU::EXEC) 1009 .Case("exec_lo", AMDGPU::EXEC_LO) 1010 .Case("exec_hi", AMDGPU::EXEC_HI) 1011 .Case("flat_scratch", AMDGPU::FLAT_SCR) 1012 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) 1013 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) 1014 .Default(AMDGPU::NoRegister); 1015 1016 if (Reg == AMDGPU::NoRegister) { 1017 report_fatal_error(Twine("invalid register name \"" 1018 + StringRef(RegName) + "\".")); 1019 1020 } 1021 1022 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && 1023 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { 1024 report_fatal_error(Twine("invalid register \"" 1025 + StringRef(RegName) + "\" for subtarget.")); 1026 } 1027 1028 switch (Reg) { 1029 case AMDGPU::M0: 1030 case AMDGPU::EXEC_LO: 1031 case AMDGPU::EXEC_HI: 1032 case AMDGPU::FLAT_SCR_LO: 1033 case AMDGPU::FLAT_SCR_HI: 1034 if (VT.getSizeInBits() == 32) 1035 return Reg; 1036 break; 1037 case AMDGPU::EXEC: 1038 case AMDGPU::FLAT_SCR: 1039 if (VT.getSizeInBits() == 64) 1040 return Reg; 1041 break; 1042 default: 1043 llvm_unreachable("missing register type checking"); 1044 } 1045 1046 report_fatal_error(Twine("invalid type for register \"" 1047 + StringRef(RegName) + "\".")); 1048 } 1049 1050 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( 1051 MachineInstr *MI, MachineBasicBlock *BB) const { 1052 switch (MI->getOpcode()) { 1053 case AMDGPU::SI_INIT_M0: { 1054 const SIInstrInfo *TII = 1055 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 1056 BuildMI(*BB, MI->getIterator(), MI->getDebugLoc(), 1057 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1058 .addOperand(MI->getOperand(0)); 1059 MI->eraseFromParent(); 1060 break; 1061 } 1062 case AMDGPU::BRANCH: 1063 return BB; 1064 case AMDGPU::GET_GROUPSTATICSIZE: { 1065 const SIInstrInfo *TII = 1066 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 1067 MachineFunction *MF = BB->getParent(); 1068 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1069 DebugLoc DL = MI->getDebugLoc(); 1070 BuildMI (*BB, MI, DL, TII->get(AMDGPU::S_MOVK_I32)) 1071 .addOperand(MI->getOperand(0)) 1072 .addImm(MFI->LDSSize); 1073 MI->eraseFromParent(); 1074 return BB; 1075 } 1076 default: 1077 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 1078 } 1079 return BB; 1080 } 1081 1082 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1083 // This currently forces unfolding various combinations of fsub into fma with 1084 // free fneg'd operands. As long as we have fast FMA (controlled by 1085 // isFMAFasterThanFMulAndFAdd), we should perform these. 1086 1087 // When fma is quarter rate, for f64 where add / sub are at best half rate, 1088 // most of these combines appear to be cycle neutral but save on instruction 1089 // count / code size. 1090 return true; 1091 } 1092 1093 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, 1094 EVT VT) const { 1095 if (!VT.isVector()) { 1096 return MVT::i1; 1097 } 1098 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); 1099 } 1100 1101 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT) const { 1102 return MVT::i32; 1103 } 1104 1105 // Answering this is somewhat tricky and depends on the specific device which 1106 // have different rates for fma or all f64 operations. 1107 // 1108 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other 1109 // regardless of which device (although the number of cycles differs between 1110 // devices), so it is always profitable for f64. 1111 // 1112 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable 1113 // only on full rate devices. Normally, we should prefer selecting v_mad_f32 1114 // which we can always do even without fused FP ops since it returns the same 1115 // result as the separate operations and since it is always full 1116 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 1117 // however does not support denormals, so we do report fma as faster if we have 1118 // a fast fma device and require denormals. 1119 // 1120 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 1121 VT = VT.getScalarType(); 1122 1123 if (!VT.isSimple()) 1124 return false; 1125 1126 switch (VT.getSimpleVT().SimpleTy) { 1127 case MVT::f32: 1128 // This is as fast on some subtargets. However, we always have full rate f32 1129 // mad available which returns the same result as the separate operations 1130 // which we should prefer over fma. We can't use this if we want to support 1131 // denormals, so only report this in these cases. 1132 return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32(); 1133 case MVT::f64: 1134 return true; 1135 default: 1136 break; 1137 } 1138 1139 return false; 1140 } 1141 1142 //===----------------------------------------------------------------------===// 1143 // Custom DAG Lowering Operations 1144 //===----------------------------------------------------------------------===// 1145 1146 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 1147 switch (Op.getOpcode()) { 1148 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 1149 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG); 1150 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 1151 case ISD::LOAD: { 1152 SDValue Result = LowerLOAD(Op, DAG); 1153 assert((!Result.getNode() || 1154 Result.getNode()->getNumValues() == 2) && 1155 "Load should return a value and a chain"); 1156 return Result; 1157 } 1158 1159 case ISD::FSIN: 1160 case ISD::FCOS: 1161 return LowerTrig(Op, DAG); 1162 case ISD::SELECT: return LowerSELECT(Op, DAG); 1163 case ISD::FDIV: return LowerFDIV(Op, DAG); 1164 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); 1165 case ISD::STORE: return LowerSTORE(Op, DAG); 1166 case ISD::GlobalAddress: { 1167 MachineFunction &MF = DAG.getMachineFunction(); 1168 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1169 return LowerGlobalAddress(MFI, Op, DAG); 1170 } 1171 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 1172 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 1173 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 1174 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); 1175 } 1176 return SDValue(); 1177 } 1178 1179 /// \brief Helper function for LowerBRCOND 1180 static SDNode *findUser(SDValue Value, unsigned Opcode) { 1181 1182 SDNode *Parent = Value.getNode(); 1183 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); 1184 I != E; ++I) { 1185 1186 if (I.getUse().get() != Value) 1187 continue; 1188 1189 if (I->getOpcode() == Opcode) 1190 return *I; 1191 } 1192 return nullptr; 1193 } 1194 1195 SDValue SITargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const { 1196 1197 SDLoc SL(Op); 1198 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Op); 1199 unsigned FrameIndex = FINode->getIndex(); 1200 1201 // A FrameIndex node represents a 32-bit offset into scratch memory. If the 1202 // high bit of a frame index offset were to be set, this would mean that it 1203 // represented an offset of ~2GB * 64 = ~128GB from the start of the scratch 1204 // buffer, with 64 being the number of threads per wave. 1205 // 1206 // The maximum private allocation for the entire GPU is 4G, and we are 1207 // concerned with the largest the index could ever be for an individual 1208 // workitem. This will occur with the minmum dispatch size. If a program 1209 // requires more, the dispatch size will be reduced. 1210 // 1211 // With this limit, we can mark the high bit of the FrameIndex node as known 1212 // zero, which is important, because it means in most situations we can prove 1213 // that values derived from FrameIndex nodes are non-negative. This enables us 1214 // to take advantage of more addressing modes when accessing scratch buffers, 1215 // since for scratch reads/writes, the register offset must always be 1216 // positive. 1217 1218 uint64_t MaxGPUAlloc = UINT64_C(4) * 1024 * 1024 * 1024; 1219 1220 // XXX - It is unclear if partial dispatch works. Assume it works at half wave 1221 // granularity. It is probably a full wave. 1222 uint64_t MinGranularity = 32; 1223 1224 unsigned KnownBits = Log2_64(MaxGPUAlloc / MinGranularity); 1225 EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), KnownBits); 1226 1227 SDValue TFI = DAG.getTargetFrameIndex(FrameIndex, MVT::i32); 1228 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, TFI, 1229 DAG.getValueType(ExtVT)); 1230 } 1231 1232 bool SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { 1233 if (Intr->getOpcode() != ISD::INTRINSIC_W_CHAIN) 1234 return false; 1235 1236 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { 1237 default: return false; 1238 case AMDGPUIntrinsic::amdgcn_if: 1239 case AMDGPUIntrinsic::amdgcn_else: 1240 case AMDGPUIntrinsic::amdgcn_break: 1241 case AMDGPUIntrinsic::amdgcn_if_break: 1242 case AMDGPUIntrinsic::amdgcn_else_break: 1243 case AMDGPUIntrinsic::amdgcn_loop: 1244 case AMDGPUIntrinsic::amdgcn_end_cf: 1245 return true; 1246 } 1247 } 1248 1249 /// This transforms the control flow intrinsics to get the branch destination as 1250 /// last parameter, also switches branch target with BR if the need arise 1251 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, 1252 SelectionDAG &DAG) const { 1253 1254 SDLoc DL(BRCOND); 1255 1256 SDNode *Intr = BRCOND.getOperand(1).getNode(); 1257 SDValue Target = BRCOND.getOperand(2); 1258 SDNode *BR = nullptr; 1259 SDNode *SetCC = nullptr; 1260 1261 if (Intr->getOpcode() == ISD::SETCC) { 1262 // As long as we negate the condition everything is fine 1263 SetCC = Intr; 1264 Intr = SetCC->getOperand(0).getNode(); 1265 1266 } else { 1267 // Get the target from BR if we don't negate the condition 1268 BR = findUser(BRCOND, ISD::BR); 1269 Target = BR->getOperand(1); 1270 } 1271 1272 if (!isCFIntrinsic(Intr)) { 1273 // This is a uniform branch so we don't need to legalize. 1274 return BRCOND; 1275 } 1276 1277 assert(!SetCC || 1278 (SetCC->getConstantOperandVal(1) == 1 && 1279 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == 1280 ISD::SETNE)); 1281 1282 // Build the result and 1283 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); 1284 1285 // operands of the new intrinsic call 1286 SmallVector<SDValue, 4> Ops; 1287 Ops.push_back(BRCOND.getOperand(0)); 1288 Ops.append(Intr->op_begin() + 1, Intr->op_end()); 1289 Ops.push_back(Target); 1290 1291 // build the new intrinsic call 1292 SDNode *Result = DAG.getNode( 1293 Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL, 1294 DAG.getVTList(Res), Ops).getNode(); 1295 1296 if (BR) { 1297 // Give the branch instruction our target 1298 SDValue Ops[] = { 1299 BR->getOperand(0), 1300 BRCOND.getOperand(2) 1301 }; 1302 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); 1303 DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); 1304 BR = NewBR.getNode(); 1305 } 1306 1307 SDValue Chain = SDValue(Result, Result->getNumValues() - 1); 1308 1309 // Copy the intrinsic results to registers 1310 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { 1311 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); 1312 if (!CopyToReg) 1313 continue; 1314 1315 Chain = DAG.getCopyToReg( 1316 Chain, DL, 1317 CopyToReg->getOperand(1), 1318 SDValue(Result, i - 1), 1319 SDValue()); 1320 1321 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); 1322 } 1323 1324 // Remove the old intrinsic from the chain 1325 DAG.ReplaceAllUsesOfValueWith( 1326 SDValue(Intr, Intr->getNumValues() - 1), 1327 Intr->getOperand(0)); 1328 1329 return Chain; 1330 } 1331 1332 SDValue SITargetLowering::getSegmentAperture(unsigned AS, 1333 SelectionDAG &DAG) const { 1334 SDLoc SL; 1335 MachineFunction &MF = DAG.getMachineFunction(); 1336 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1337 SDValue QueuePtr = CreateLiveInRegister( 1338 DAG, &AMDGPU::SReg_64RegClass, Info->getQueuePtrUserSGPR(), MVT::i64); 1339 1340 // Offset into amd_queue_t for group_segment_aperture_base_hi / 1341 // private_segment_aperture_base_hi. 1342 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44; 1343 1344 SDValue Ptr = DAG.getNode(ISD::ADD, SL, MVT::i64, QueuePtr, 1345 DAG.getConstant(StructOffset, SL, MVT::i64)); 1346 1347 // TODO: Use custom target PseudoSourceValue. 1348 // TODO: We should use the value from the IR intrinsic call, but it might not 1349 // be available and how do we get it? 1350 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()), 1351 AMDGPUAS::CONSTANT_ADDRESS)); 1352 1353 MachinePointerInfo PtrInfo(V, StructOffset); 1354 return DAG.getLoad(MVT::i32, SL, QueuePtr.getValue(1), Ptr, 1355 PtrInfo, false, 1356 false, true, 1357 MinAlign(64, StructOffset)); 1358 } 1359 1360 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, 1361 SelectionDAG &DAG) const { 1362 SDLoc SL(Op); 1363 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); 1364 1365 SDValue Src = ASC->getOperand(0); 1366 1367 // FIXME: Really support non-0 null pointers. 1368 SDValue SegmentNullPtr = DAG.getConstant(-1, SL, MVT::i32); 1369 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); 1370 1371 // flat -> local/private 1372 if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 1373 if (ASC->getDestAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 1374 ASC->getDestAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { 1375 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); 1376 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); 1377 1378 return DAG.getNode(ISD::SELECT, SL, MVT::i32, 1379 NonNull, Ptr, SegmentNullPtr); 1380 } 1381 } 1382 1383 // local/private -> flat 1384 if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 1385 if (ASC->getSrcAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 1386 ASC->getSrcAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { 1387 SDValue NonNull 1388 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); 1389 1390 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), DAG); 1391 SDValue CvtPtr 1392 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); 1393 1394 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, 1395 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), 1396 FlatNullPtr); 1397 } 1398 } 1399 1400 // global <-> flat are no-ops and never emitted. 1401 1402 const MachineFunction &MF = DAG.getMachineFunction(); 1403 DiagnosticInfoUnsupported InvalidAddrSpaceCast( 1404 *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); 1405 DAG.getContext()->diagnose(InvalidAddrSpaceCast); 1406 1407 return DAG.getUNDEF(ASC->getValueType(0)); 1408 } 1409 1410 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, SDLoc DL, 1411 SDValue V) const { 1412 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as 1413 // the destination register. 1414 // 1415 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, 1416 // so we will end up with redundant moves to m0. 1417 // 1418 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. 1419 1420 // A Null SDValue creates a glue result. 1421 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, 1422 V, Chain); 1423 return SDValue(M0, 0); 1424 } 1425 1426 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, 1427 SDValue Op, 1428 MVT VT, 1429 unsigned Offset) const { 1430 SDLoc SL(Op); 1431 SDValue Param = LowerParameter(DAG, MVT::i32, MVT::i32, SL, 1432 DAG.getEntryNode(), Offset, false); 1433 // The local size values will have the hi 16-bits as zero. 1434 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, 1435 DAG.getValueType(VT)); 1436 } 1437 1438 static SDValue emitNonHSAIntrinsicError(SelectionDAG& DAG, EVT VT) { 1439 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), 1440 "non-hsa intrinsic with hsa target"); 1441 DAG.getContext()->diagnose(BadIntrin); 1442 return DAG.getUNDEF(VT); 1443 } 1444 1445 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 1446 SelectionDAG &DAG) const { 1447 MachineFunction &MF = DAG.getMachineFunction(); 1448 auto MFI = MF.getInfo<SIMachineFunctionInfo>(); 1449 const SIRegisterInfo *TRI = 1450 static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo()); 1451 1452 EVT VT = Op.getValueType(); 1453 SDLoc DL(Op); 1454 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1455 1456 // TODO: Should this propagate fast-math-flags? 1457 1458 switch (IntrinsicID) { 1459 case Intrinsic::amdgcn_dispatch_ptr: 1460 case Intrinsic::amdgcn_queue_ptr: { 1461 if (!Subtarget->isAmdHsaOS()) { 1462 DiagnosticInfoUnsupported BadIntrin( 1463 *MF.getFunction(), "unsupported hsa intrinsic without hsa target", 1464 DL.getDebugLoc()); 1465 DAG.getContext()->diagnose(BadIntrin); 1466 return DAG.getUNDEF(VT); 1467 } 1468 1469 auto Reg = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? 1470 SIRegisterInfo::DISPATCH_PTR : SIRegisterInfo::QUEUE_PTR; 1471 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, 1472 TRI->getPreloadedValue(MF, Reg), VT); 1473 } 1474 case Intrinsic::amdgcn_kernarg_segment_ptr: { 1475 unsigned Reg 1476 = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 1477 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 1478 } 1479 case Intrinsic::amdgcn_rcp: 1480 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); 1481 case Intrinsic::amdgcn_rsq: 1482 case AMDGPUIntrinsic::AMDGPU_rsq: // Legacy name 1483 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 1484 case Intrinsic::amdgcn_rsq_clamp: 1485 case AMDGPUIntrinsic::AMDGPU_rsq_clamped: { // Legacy name 1486 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) 1487 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 1488 1489 Type *Type = VT.getTypeForEVT(*DAG.getContext()); 1490 APFloat Max = APFloat::getLargest(Type->getFltSemantics()); 1491 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); 1492 1493 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 1494 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, 1495 DAG.getConstantFP(Max, DL, VT)); 1496 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, 1497 DAG.getConstantFP(Min, DL, VT)); 1498 } 1499 case Intrinsic::r600_read_ngroups_x: 1500 if (Subtarget->isAmdHsaOS()) 1501 return emitNonHSAIntrinsicError(DAG, VT); 1502 1503 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 1504 SI::KernelInputOffsets::NGROUPS_X, false); 1505 case Intrinsic::r600_read_ngroups_y: 1506 if (Subtarget->isAmdHsaOS()) 1507 return emitNonHSAIntrinsicError(DAG, VT); 1508 1509 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 1510 SI::KernelInputOffsets::NGROUPS_Y, false); 1511 case Intrinsic::r600_read_ngroups_z: 1512 if (Subtarget->isAmdHsaOS()) 1513 return emitNonHSAIntrinsicError(DAG, VT); 1514 1515 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 1516 SI::KernelInputOffsets::NGROUPS_Z, false); 1517 case Intrinsic::r600_read_global_size_x: 1518 if (Subtarget->isAmdHsaOS()) 1519 return emitNonHSAIntrinsicError(DAG, VT); 1520 1521 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 1522 SI::KernelInputOffsets::GLOBAL_SIZE_X, false); 1523 case Intrinsic::r600_read_global_size_y: 1524 if (Subtarget->isAmdHsaOS()) 1525 return emitNonHSAIntrinsicError(DAG, VT); 1526 1527 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 1528 SI::KernelInputOffsets::GLOBAL_SIZE_Y, false); 1529 case Intrinsic::r600_read_global_size_z: 1530 if (Subtarget->isAmdHsaOS()) 1531 return emitNonHSAIntrinsicError(DAG, VT); 1532 1533 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 1534 SI::KernelInputOffsets::GLOBAL_SIZE_Z, false); 1535 case Intrinsic::r600_read_local_size_x: 1536 if (Subtarget->isAmdHsaOS()) 1537 return emitNonHSAIntrinsicError(DAG, VT); 1538 1539 return lowerImplicitZextParam(DAG, Op, MVT::i16, 1540 SI::KernelInputOffsets::LOCAL_SIZE_X); 1541 case Intrinsic::r600_read_local_size_y: 1542 if (Subtarget->isAmdHsaOS()) 1543 return emitNonHSAIntrinsicError(DAG, VT); 1544 1545 return lowerImplicitZextParam(DAG, Op, MVT::i16, 1546 SI::KernelInputOffsets::LOCAL_SIZE_Y); 1547 case Intrinsic::r600_read_local_size_z: 1548 if (Subtarget->isAmdHsaOS()) 1549 return emitNonHSAIntrinsicError(DAG, VT); 1550 1551 return lowerImplicitZextParam(DAG, Op, MVT::i16, 1552 SI::KernelInputOffsets::LOCAL_SIZE_Z); 1553 case Intrinsic::amdgcn_read_workdim: 1554 case AMDGPUIntrinsic::AMDGPU_read_workdim: // Legacy name. 1555 // Really only 2 bits. 1556 return lowerImplicitZextParam(DAG, Op, MVT::i8, 1557 getImplicitParameterOffset(MFI, GRID_DIM)); 1558 case Intrinsic::amdgcn_workgroup_id_x: 1559 case Intrinsic::r600_read_tgid_x: 1560 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, 1561 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_X), VT); 1562 case Intrinsic::amdgcn_workgroup_id_y: 1563 case Intrinsic::r600_read_tgid_y: 1564 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, 1565 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Y), VT); 1566 case Intrinsic::amdgcn_workgroup_id_z: 1567 case Intrinsic::r600_read_tgid_z: 1568 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, 1569 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Z), VT); 1570 case Intrinsic::amdgcn_workitem_id_x: 1571 case Intrinsic::r600_read_tidig_x: 1572 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 1573 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X), VT); 1574 case Intrinsic::amdgcn_workitem_id_y: 1575 case Intrinsic::r600_read_tidig_y: 1576 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 1577 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y), VT); 1578 case Intrinsic::amdgcn_workitem_id_z: 1579 case Intrinsic::r600_read_tidig_z: 1580 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 1581 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z), VT); 1582 case AMDGPUIntrinsic::SI_load_const: { 1583 SDValue Ops[] = { 1584 Op.getOperand(1), 1585 Op.getOperand(2) 1586 }; 1587 1588 MachineMemOperand *MMO = MF.getMachineMemOperand( 1589 MachinePointerInfo(), 1590 MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, 1591 VT.getStoreSize(), 4); 1592 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL, 1593 Op->getVTList(), Ops, VT, MMO); 1594 } 1595 case AMDGPUIntrinsic::SI_vs_load_input: 1596 return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT, 1597 Op.getOperand(1), 1598 Op.getOperand(2), 1599 Op.getOperand(3)); 1600 1601 case AMDGPUIntrinsic::SI_fs_constant: { 1602 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); 1603 SDValue Glue = M0.getValue(1); 1604 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, 1605 DAG.getConstant(2, DL, MVT::i32), // P0 1606 Op.getOperand(1), Op.getOperand(2), Glue); 1607 } 1608 case AMDGPUIntrinsic::SI_packf16: 1609 if (Op.getOperand(1).isUndef() && Op.getOperand(2).isUndef()) 1610 return DAG.getUNDEF(MVT::i32); 1611 return Op; 1612 case AMDGPUIntrinsic::SI_fs_interp: { 1613 SDValue IJ = Op.getOperand(4); 1614 SDValue I = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, 1615 DAG.getConstant(0, DL, MVT::i32)); 1616 SDValue J = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, 1617 DAG.getConstant(1, DL, MVT::i32)); 1618 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); 1619 SDValue Glue = M0.getValue(1); 1620 SDValue P1 = DAG.getNode(AMDGPUISD::INTERP_P1, DL, 1621 DAG.getVTList(MVT::f32, MVT::Glue), 1622 I, Op.getOperand(1), Op.getOperand(2), Glue); 1623 Glue = SDValue(P1.getNode(), 1); 1624 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, P1, J, 1625 Op.getOperand(1), Op.getOperand(2), Glue); 1626 } 1627 case Intrinsic::amdgcn_interp_p1: { 1628 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 1629 SDValue Glue = M0.getValue(1); 1630 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), 1631 Op.getOperand(2), Op.getOperand(3), Glue); 1632 } 1633 case Intrinsic::amdgcn_interp_p2: { 1634 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 1635 SDValue Glue = SDValue(M0.getNode(), 1); 1636 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), 1637 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), 1638 Glue); 1639 } 1640 case Intrinsic::amdgcn_sin: 1641 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); 1642 1643 case Intrinsic::amdgcn_cos: 1644 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); 1645 1646 case Intrinsic::amdgcn_log_clamp: { 1647 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) 1648 return SDValue(); 1649 1650 DiagnosticInfoUnsupported BadIntrin( 1651 *MF.getFunction(), "intrinsic not supported on subtarget", 1652 DL.getDebugLoc()); 1653 DAG.getContext()->diagnose(BadIntrin); 1654 return DAG.getUNDEF(VT); 1655 } 1656 case Intrinsic::amdgcn_ldexp: 1657 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, 1658 Op.getOperand(1), Op.getOperand(2)); 1659 1660 case Intrinsic::amdgcn_fract: 1661 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); 1662 1663 case Intrinsic::amdgcn_class: 1664 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, 1665 Op.getOperand(1), Op.getOperand(2)); 1666 case Intrinsic::amdgcn_div_fmas: 1667 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, 1668 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 1669 Op.getOperand(4)); 1670 1671 case Intrinsic::amdgcn_div_fixup: 1672 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, 1673 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 1674 1675 case Intrinsic::amdgcn_trig_preop: 1676 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, 1677 Op.getOperand(1), Op.getOperand(2)); 1678 case Intrinsic::amdgcn_div_scale: { 1679 // 3rd parameter required to be a constant. 1680 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 1681 if (!Param) 1682 return DAG.getUNDEF(VT); 1683 1684 // Translate to the operands expected by the machine instruction. The 1685 // first parameter must be the same as the first instruction. 1686 SDValue Numerator = Op.getOperand(1); 1687 SDValue Denominator = Op.getOperand(2); 1688 1689 // Note this order is opposite of the machine instruction's operations, 1690 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The 1691 // intrinsic has the numerator as the first operand to match a normal 1692 // division operation. 1693 1694 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; 1695 1696 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, 1697 Denominator, Numerator); 1698 } 1699 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte0: 1700 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Op.getOperand(1)); 1701 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte1: 1702 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE1, DL, VT, Op.getOperand(1)); 1703 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte2: 1704 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE2, DL, VT, Op.getOperand(1)); 1705 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte3: 1706 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE3, DL, VT, Op.getOperand(1)); 1707 default: 1708 return AMDGPUTargetLowering::LowerOperation(Op, DAG); 1709 } 1710 } 1711 1712 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 1713 SelectionDAG &DAG) const { 1714 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1715 switch (IntrID) { 1716 case Intrinsic::amdgcn_atomic_inc: 1717 case Intrinsic::amdgcn_atomic_dec: { 1718 MemSDNode *M = cast<MemSDNode>(Op); 1719 unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ? 1720 AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC; 1721 SDValue Ops[] = { 1722 M->getOperand(0), // Chain 1723 M->getOperand(2), // Ptr 1724 M->getOperand(3) // Value 1725 }; 1726 1727 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, 1728 M->getMemoryVT(), M->getMemOperand()); 1729 } 1730 default: 1731 return SDValue(); 1732 } 1733 } 1734 1735 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, 1736 SelectionDAG &DAG) const { 1737 MachineFunction &MF = DAG.getMachineFunction(); 1738 SDLoc DL(Op); 1739 SDValue Chain = Op.getOperand(0); 1740 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1741 1742 switch (IntrinsicID) { 1743 case AMDGPUIntrinsic::SI_sendmsg: { 1744 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 1745 SDValue Glue = Chain.getValue(1); 1746 return DAG.getNode(AMDGPUISD::SENDMSG, DL, MVT::Other, Chain, 1747 Op.getOperand(2), Glue); 1748 } 1749 case AMDGPUIntrinsic::SI_tbuffer_store: { 1750 SDValue Ops[] = { 1751 Chain, 1752 Op.getOperand(2), 1753 Op.getOperand(3), 1754 Op.getOperand(4), 1755 Op.getOperand(5), 1756 Op.getOperand(6), 1757 Op.getOperand(7), 1758 Op.getOperand(8), 1759 Op.getOperand(9), 1760 Op.getOperand(10), 1761 Op.getOperand(11), 1762 Op.getOperand(12), 1763 Op.getOperand(13), 1764 Op.getOperand(14) 1765 }; 1766 1767 EVT VT = Op.getOperand(3).getValueType(); 1768 1769 MachineMemOperand *MMO = MF.getMachineMemOperand( 1770 MachinePointerInfo(), 1771 MachineMemOperand::MOStore, 1772 VT.getStoreSize(), 4); 1773 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL, 1774 Op->getVTList(), Ops, VT, MMO); 1775 } 1776 default: 1777 return SDValue(); 1778 } 1779 } 1780 1781 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 1782 SDLoc DL(Op); 1783 LoadSDNode *Load = cast<LoadSDNode>(Op); 1784 ISD::LoadExtType ExtType = Load->getExtensionType(); 1785 EVT MemVT = Load->getMemoryVT(); 1786 1787 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { 1788 assert(MemVT == MVT::i1 && "Only i1 non-extloads expected"); 1789 // FIXME: Copied from PPC 1790 // First, load into 32 bits, then truncate to 1 bit. 1791 1792 SDValue Chain = Load->getChain(); 1793 SDValue BasePtr = Load->getBasePtr(); 1794 MachineMemOperand *MMO = Load->getMemOperand(); 1795 1796 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 1797 BasePtr, MVT::i8, MMO); 1798 1799 SDValue Ops[] = { 1800 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), 1801 NewLD.getValue(1) 1802 }; 1803 1804 return DAG.getMergeValues(Ops, DL); 1805 } 1806 1807 if (!MemVT.isVector()) 1808 return SDValue(); 1809 1810 assert(Op.getValueType().getVectorElementType() == MVT::i32 && 1811 "Custom lowering for non-i32 vectors hasn't been implemented."); 1812 1813 unsigned AS = Load->getAddressSpace(); 1814 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, 1815 AS, Load->getAlignment())) { 1816 SDValue Ops[2]; 1817 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); 1818 return DAG.getMergeValues(Ops, DL); 1819 } 1820 1821 unsigned NumElements = MemVT.getVectorNumElements(); 1822 switch (AS) { 1823 case AMDGPUAS::CONSTANT_ADDRESS: 1824 if (isMemOpUniform(Load)) 1825 return SDValue(); 1826 // Non-uniform loads will be selected to MUBUF instructions, so they 1827 // have the same legalization requires ments as global and private 1828 // loads. 1829 // 1830 // Fall-through 1831 case AMDGPUAS::GLOBAL_ADDRESS: 1832 case AMDGPUAS::FLAT_ADDRESS: 1833 if (NumElements > 4) 1834 return SplitVectorLoad(Op, DAG); 1835 // v4 loads are supported for private and global memory. 1836 return SDValue(); 1837 case AMDGPUAS::PRIVATE_ADDRESS: { 1838 // Depending on the setting of the private_element_size field in the 1839 // resource descriptor, we can only make private accesses up to a certain 1840 // size. 1841 switch (Subtarget->getMaxPrivateElementSize()) { 1842 case 4: 1843 return scalarizeVectorLoad(Load, DAG); 1844 case 8: 1845 if (NumElements > 2) 1846 return SplitVectorLoad(Op, DAG); 1847 return SDValue(); 1848 case 16: 1849 // Same as global/flat 1850 if (NumElements > 4) 1851 return SplitVectorLoad(Op, DAG); 1852 return SDValue(); 1853 default: 1854 llvm_unreachable("unsupported private_element_size"); 1855 } 1856 } 1857 case AMDGPUAS::LOCAL_ADDRESS: { 1858 if (NumElements > 2) 1859 return SplitVectorLoad(Op, DAG); 1860 1861 if (NumElements == 2) 1862 return SDValue(); 1863 1864 // If properly aligned, if we split we might be able to use ds_read_b64. 1865 return SplitVectorLoad(Op, DAG); 1866 } 1867 default: 1868 return SDValue(); 1869 } 1870 } 1871 1872 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 1873 if (Op.getValueType() != MVT::i64) 1874 return SDValue(); 1875 1876 SDLoc DL(Op); 1877 SDValue Cond = Op.getOperand(0); 1878 1879 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 1880 SDValue One = DAG.getConstant(1, DL, MVT::i32); 1881 1882 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); 1883 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); 1884 1885 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); 1886 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); 1887 1888 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); 1889 1890 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); 1891 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); 1892 1893 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); 1894 1895 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); 1896 return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res); 1897 } 1898 1899 // Catch division cases where we can use shortcuts with rcp and rsq 1900 // instructions. 1901 SDValue SITargetLowering::LowerFastFDIV(SDValue Op, SelectionDAG &DAG) const { 1902 SDLoc SL(Op); 1903 SDValue LHS = Op.getOperand(0); 1904 SDValue RHS = Op.getOperand(1); 1905 EVT VT = Op.getValueType(); 1906 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath; 1907 1908 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { 1909 if ((Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals())) && 1910 CLHS->isExactlyValue(1.0)) { 1911 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 1912 // the CI documentation has a worst case error of 1 ulp. 1913 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 1914 // use it as long as we aren't trying to use denormals. 1915 1916 // 1.0 / sqrt(x) -> rsq(x) 1917 // 1918 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP 1919 // error seems really high at 2^29 ULP. 1920 if (RHS.getOpcode() == ISD::FSQRT) 1921 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); 1922 1923 // 1.0 / x -> rcp(x) 1924 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 1925 } 1926 } 1927 1928 if (Unsafe) { 1929 // Turn into multiply by the reciprocal. 1930 // x / y -> x * (1.0 / y) 1931 SDNodeFlags Flags; 1932 Flags.setUnsafeAlgebra(true); 1933 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 1934 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, &Flags); 1935 } 1936 1937 return SDValue(); 1938 } 1939 1940 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { 1941 if (SDValue FastLowered = LowerFastFDIV(Op, DAG)) 1942 return FastLowered; 1943 1944 // This uses v_rcp_f32 which does not handle denormals. Let this hit a 1945 // selection error for now rather than do something incorrect. 1946 if (Subtarget->hasFP32Denormals()) 1947 return SDValue(); 1948 1949 SDLoc SL(Op); 1950 SDValue LHS = Op.getOperand(0); 1951 SDValue RHS = Op.getOperand(1); 1952 1953 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); 1954 1955 const APFloat K0Val(BitsToFloat(0x6f800000)); 1956 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); 1957 1958 const APFloat K1Val(BitsToFloat(0x2f800000)); 1959 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); 1960 1961 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 1962 1963 EVT SetCCVT = 1964 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 1965 1966 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); 1967 1968 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); 1969 1970 // TODO: Should this propagate fast-math-flags? 1971 1972 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); 1973 1974 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); 1975 1976 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); 1977 1978 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); 1979 } 1980 1981 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { 1982 if (DAG.getTarget().Options.UnsafeFPMath) 1983 return LowerFastFDIV(Op, DAG); 1984 1985 SDLoc SL(Op); 1986 SDValue X = Op.getOperand(0); 1987 SDValue Y = Op.getOperand(1); 1988 1989 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 1990 1991 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); 1992 1993 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); 1994 1995 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); 1996 1997 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); 1998 1999 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); 2000 2001 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); 2002 2003 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); 2004 2005 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); 2006 2007 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); 2008 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); 2009 2010 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, 2011 NegDivScale0, Mul, DivScale1); 2012 2013 SDValue Scale; 2014 2015 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { 2016 // Workaround a hardware bug on SI where the condition output from div_scale 2017 // is not usable. 2018 2019 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); 2020 2021 // Figure out if the scale to use for div_fmas. 2022 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 2023 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); 2024 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); 2025 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); 2026 2027 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); 2028 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); 2029 2030 SDValue Scale0Hi 2031 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); 2032 SDValue Scale1Hi 2033 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); 2034 2035 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); 2036 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); 2037 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); 2038 } else { 2039 Scale = DivScale1.getValue(1); 2040 } 2041 2042 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, 2043 Fma4, Fma3, Mul, Scale); 2044 2045 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); 2046 } 2047 2048 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { 2049 EVT VT = Op.getValueType(); 2050 2051 if (VT == MVT::f32) 2052 return LowerFDIV32(Op, DAG); 2053 2054 if (VT == MVT::f64) 2055 return LowerFDIV64(Op, DAG); 2056 2057 llvm_unreachable("Unexpected type for fdiv"); 2058 } 2059 2060 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 2061 SDLoc DL(Op); 2062 StoreSDNode *Store = cast<StoreSDNode>(Op); 2063 EVT VT = Store->getMemoryVT(); 2064 2065 if (VT == MVT::i1) { 2066 return DAG.getTruncStore(Store->getChain(), DL, 2067 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), 2068 Store->getBasePtr(), MVT::i1, Store->getMemOperand()); 2069 } 2070 2071 assert(VT.isVector() && 2072 Store->getValue().getValueType().getScalarType() == MVT::i32); 2073 2074 unsigned AS = Store->getAddressSpace(); 2075 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 2076 AS, Store->getAlignment())) { 2077 return expandUnalignedStore(Store, DAG); 2078 } 2079 2080 unsigned NumElements = VT.getVectorNumElements(); 2081 switch (AS) { 2082 case AMDGPUAS::GLOBAL_ADDRESS: 2083 case AMDGPUAS::FLAT_ADDRESS: 2084 if (NumElements > 4) 2085 return SplitVectorStore(Op, DAG); 2086 return SDValue(); 2087 case AMDGPUAS::PRIVATE_ADDRESS: { 2088 switch (Subtarget->getMaxPrivateElementSize()) { 2089 case 4: 2090 return scalarizeVectorStore(Store, DAG); 2091 case 8: 2092 if (NumElements > 2) 2093 return SplitVectorStore(Op, DAG); 2094 return SDValue(); 2095 case 16: 2096 if (NumElements > 4) 2097 return SplitVectorStore(Op, DAG); 2098 return SDValue(); 2099 default: 2100 llvm_unreachable("unsupported private_element_size"); 2101 } 2102 } 2103 case AMDGPUAS::LOCAL_ADDRESS: { 2104 if (NumElements > 2) 2105 return SplitVectorStore(Op, DAG); 2106 2107 if (NumElements == 2) 2108 return Op; 2109 2110 // If properly aligned, if we split we might be able to use ds_write_b64. 2111 return SplitVectorStore(Op, DAG); 2112 } 2113 default: 2114 llvm_unreachable("unhandled address space"); 2115 } 2116 } 2117 2118 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 2119 SDLoc DL(Op); 2120 EVT VT = Op.getValueType(); 2121 SDValue Arg = Op.getOperand(0); 2122 // TODO: Should this propagate fast-math-flags? 2123 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, 2124 DAG.getNode(ISD::FMUL, DL, VT, Arg, 2125 DAG.getConstantFP(0.5/M_PI, DL, 2126 VT))); 2127 2128 switch (Op.getOpcode()) { 2129 case ISD::FCOS: 2130 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart); 2131 case ISD::FSIN: 2132 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart); 2133 default: 2134 llvm_unreachable("Wrong trig opcode"); 2135 } 2136 } 2137 2138 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 2139 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); 2140 assert(AtomicNode->isCompareAndSwap()); 2141 unsigned AS = AtomicNode->getAddressSpace(); 2142 2143 // No custom lowering required for local address space 2144 if (!isFlatGlobalAddrSpace(AS)) 2145 return Op; 2146 2147 // Non-local address space requires custom lowering for atomic compare 2148 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 2149 SDLoc DL(Op); 2150 SDValue ChainIn = Op.getOperand(0); 2151 SDValue Addr = Op.getOperand(1); 2152 SDValue Old = Op.getOperand(2); 2153 SDValue New = Op.getOperand(3); 2154 EVT VT = Op.getValueType(); 2155 MVT SimpleVT = VT.getSimpleVT(); 2156 MVT VecType = MVT::getVectorVT(SimpleVT, 2); 2157 2158 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); 2159 SDValue Ops[] = { ChainIn, Addr, NewOld }; 2160 SDVTList VTList = DAG.getVTList(VT, MVT::Other); 2161 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, 2162 VTList, Ops, VT, AtomicNode->getMemOperand()); 2163 } 2164 2165 //===----------------------------------------------------------------------===// 2166 // Custom DAG optimizations 2167 //===----------------------------------------------------------------------===// 2168 2169 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, 2170 DAGCombinerInfo &DCI) const { 2171 EVT VT = N->getValueType(0); 2172 EVT ScalarVT = VT.getScalarType(); 2173 if (ScalarVT != MVT::f32) 2174 return SDValue(); 2175 2176 SelectionDAG &DAG = DCI.DAG; 2177 SDLoc DL(N); 2178 2179 SDValue Src = N->getOperand(0); 2180 EVT SrcVT = Src.getValueType(); 2181 2182 // TODO: We could try to match extracting the higher bytes, which would be 2183 // easier if i8 vectors weren't promoted to i32 vectors, particularly after 2184 // types are legalized. v4i8 -> v4f32 is probably the only case to worry 2185 // about in practice. 2186 if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) { 2187 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { 2188 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); 2189 DCI.AddToWorklist(Cvt.getNode()); 2190 return Cvt; 2191 } 2192 } 2193 2194 // We are primarily trying to catch operations on illegal vector types 2195 // before they are expanded. 2196 // For scalars, we can use the more flexible method of checking masked bits 2197 // after legalization. 2198 if (!DCI.isBeforeLegalize() || 2199 !SrcVT.isVector() || 2200 SrcVT.getVectorElementType() != MVT::i8) { 2201 return SDValue(); 2202 } 2203 2204 assert(DCI.isBeforeLegalize() && "Unexpected legal type"); 2205 2206 // Weird sized vectors are a pain to handle, but we know 3 is really the same 2207 // size as 4. 2208 unsigned NElts = SrcVT.getVectorNumElements(); 2209 if (!SrcVT.isSimple() && NElts != 3) 2210 return SDValue(); 2211 2212 // Handle v4i8 -> v4f32 extload. Replace the v4i8 with a legal i32 load to 2213 // prevent a mess from expanding to v4i32 and repacking. 2214 if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) { 2215 EVT LoadVT = getEquivalentMemType(*DAG.getContext(), SrcVT); 2216 EVT RegVT = getEquivalentLoadRegType(*DAG.getContext(), SrcVT); 2217 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32, NElts); 2218 LoadSDNode *Load = cast<LoadSDNode>(Src); 2219 2220 unsigned AS = Load->getAddressSpace(); 2221 unsigned Align = Load->getAlignment(); 2222 Type *Ty = LoadVT.getTypeForEVT(*DAG.getContext()); 2223 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 2224 2225 // Don't try to replace the load if we have to expand it due to alignment 2226 // problems. Otherwise we will end up scalarizing the load, and trying to 2227 // repack into the vector for no real reason. 2228 if (Align < ABIAlignment && 2229 !allowsMisalignedMemoryAccesses(LoadVT, AS, Align, nullptr)) { 2230 return SDValue(); 2231 } 2232 2233 SDValue NewLoad = DAG.getExtLoad(ISD::ZEXTLOAD, DL, RegVT, 2234 Load->getChain(), 2235 Load->getBasePtr(), 2236 LoadVT, 2237 Load->getMemOperand()); 2238 2239 // Make sure successors of the original load stay after it by updating 2240 // them to use the new Chain. 2241 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), NewLoad.getValue(1)); 2242 2243 SmallVector<SDValue, 4> Elts; 2244 if (RegVT.isVector()) 2245 DAG.ExtractVectorElements(NewLoad, Elts); 2246 else 2247 Elts.push_back(NewLoad); 2248 2249 SmallVector<SDValue, 4> Ops; 2250 2251 unsigned EltIdx = 0; 2252 for (SDValue Elt : Elts) { 2253 unsigned ComponentsInElt = std::min(4u, NElts - 4 * EltIdx); 2254 for (unsigned I = 0; I < ComponentsInElt; ++I) { 2255 unsigned Opc = AMDGPUISD::CVT_F32_UBYTE0 + I; 2256 SDValue Cvt = DAG.getNode(Opc, DL, MVT::f32, Elt); 2257 DCI.AddToWorklist(Cvt.getNode()); 2258 Ops.push_back(Cvt); 2259 } 2260 2261 ++EltIdx; 2262 } 2263 2264 assert(Ops.size() == NElts); 2265 2266 return DAG.getBuildVector(FloatVT, DL, Ops); 2267 } 2268 2269 return SDValue(); 2270 } 2271 2272 /// \brief Return true if the given offset Size in bytes can be folded into 2273 /// the immediate offsets of a memory instruction for the given address space. 2274 static bool canFoldOffset(unsigned OffsetSize, unsigned AS, 2275 const AMDGPUSubtarget &STI) { 2276 switch (AS) { 2277 case AMDGPUAS::GLOBAL_ADDRESS: { 2278 // MUBUF instructions a 12-bit offset in bytes. 2279 return isUInt<12>(OffsetSize); 2280 } 2281 case AMDGPUAS::CONSTANT_ADDRESS: { 2282 // SMRD instructions have an 8-bit offset in dwords on SI and 2283 // a 20-bit offset in bytes on VI. 2284 if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 2285 return isUInt<20>(OffsetSize); 2286 else 2287 return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4); 2288 } 2289 case AMDGPUAS::LOCAL_ADDRESS: 2290 case AMDGPUAS::REGION_ADDRESS: { 2291 // The single offset versions have a 16-bit offset in bytes. 2292 return isUInt<16>(OffsetSize); 2293 } 2294 case AMDGPUAS::PRIVATE_ADDRESS: 2295 // Indirect register addressing does not use any offsets. 2296 default: 2297 return 0; 2298 } 2299 } 2300 2301 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) 2302 2303 // This is a variant of 2304 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), 2305 // 2306 // The normal DAG combiner will do this, but only if the add has one use since 2307 // that would increase the number of instructions. 2308 // 2309 // This prevents us from seeing a constant offset that can be folded into a 2310 // memory instruction's addressing mode. If we know the resulting add offset of 2311 // a pointer can be folded into an addressing offset, we can replace the pointer 2312 // operand with the add of new constant offset. This eliminates one of the uses, 2313 // and may allow the remaining use to also be simplified. 2314 // 2315 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, 2316 unsigned AddrSpace, 2317 DAGCombinerInfo &DCI) const { 2318 SDValue N0 = N->getOperand(0); 2319 SDValue N1 = N->getOperand(1); 2320 2321 if (N0.getOpcode() != ISD::ADD) 2322 return SDValue(); 2323 2324 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); 2325 if (!CN1) 2326 return SDValue(); 2327 2328 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 2329 if (!CAdd) 2330 return SDValue(); 2331 2332 // If the resulting offset is too large, we can't fold it into the addressing 2333 // mode offset. 2334 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); 2335 if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *Subtarget)) 2336 return SDValue(); 2337 2338 SelectionDAG &DAG = DCI.DAG; 2339 SDLoc SL(N); 2340 EVT VT = N->getValueType(0); 2341 2342 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); 2343 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); 2344 2345 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset); 2346 } 2347 2348 SDValue SITargetLowering::performAndCombine(SDNode *N, 2349 DAGCombinerInfo &DCI) const { 2350 if (DCI.isBeforeLegalize()) 2351 return SDValue(); 2352 2353 if (SDValue Base = AMDGPUTargetLowering::performAndCombine(N, DCI)) 2354 return Base; 2355 2356 SelectionDAG &DAG = DCI.DAG; 2357 2358 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> 2359 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) 2360 SDValue LHS = N->getOperand(0); 2361 SDValue RHS = N->getOperand(1); 2362 2363 if (LHS.getOpcode() == ISD::SETCC && 2364 RHS.getOpcode() == ISD::SETCC) { 2365 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 2366 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); 2367 2368 SDValue X = LHS.getOperand(0); 2369 SDValue Y = RHS.getOperand(0); 2370 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) 2371 return SDValue(); 2372 2373 if (LCC == ISD::SETO) { 2374 if (X != LHS.getOperand(1)) 2375 return SDValue(); 2376 2377 if (RCC == ISD::SETUNE) { 2378 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); 2379 if (!C1 || !C1->isInfinity() || C1->isNegative()) 2380 return SDValue(); 2381 2382 const uint32_t Mask = SIInstrFlags::N_NORMAL | 2383 SIInstrFlags::N_SUBNORMAL | 2384 SIInstrFlags::N_ZERO | 2385 SIInstrFlags::P_ZERO | 2386 SIInstrFlags::P_SUBNORMAL | 2387 SIInstrFlags::P_NORMAL; 2388 2389 static_assert(((~(SIInstrFlags::S_NAN | 2390 SIInstrFlags::Q_NAN | 2391 SIInstrFlags::N_INFINITY | 2392 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, 2393 "mask not equal"); 2394 2395 SDLoc DL(N); 2396 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 2397 X, DAG.getConstant(Mask, DL, MVT::i32)); 2398 } 2399 } 2400 } 2401 2402 return SDValue(); 2403 } 2404 2405 SDValue SITargetLowering::performOrCombine(SDNode *N, 2406 DAGCombinerInfo &DCI) const { 2407 SelectionDAG &DAG = DCI.DAG; 2408 SDValue LHS = N->getOperand(0); 2409 SDValue RHS = N->getOperand(1); 2410 2411 EVT VT = N->getValueType(0); 2412 if (VT == MVT::i64) { 2413 // TODO: This could be a generic combine with a predicate for extracting the 2414 // high half of an integer being free. 2415 2416 // (or i64:x, (zero_extend i32:y)) -> 2417 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) 2418 if (LHS.getOpcode() == ISD::ZERO_EXTEND && 2419 RHS.getOpcode() != ISD::ZERO_EXTEND) 2420 std::swap(LHS, RHS); 2421 2422 if (RHS.getOpcode() == ISD::ZERO_EXTEND) { 2423 SDValue ExtSrc = RHS.getOperand(0); 2424 EVT SrcVT = ExtSrc.getValueType(); 2425 if (SrcVT == MVT::i32) { 2426 SDLoc SL(N); 2427 SDValue LowLHS, HiBits; 2428 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); 2429 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); 2430 2431 DCI.AddToWorklist(LowOr.getNode()); 2432 DCI.AddToWorklist(HiBits.getNode()); 2433 2434 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 2435 LowOr, HiBits); 2436 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 2437 } 2438 } 2439 } 2440 2441 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) 2442 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && 2443 RHS.getOpcode() == AMDGPUISD::FP_CLASS) { 2444 SDValue Src = LHS.getOperand(0); 2445 if (Src != RHS.getOperand(0)) 2446 return SDValue(); 2447 2448 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 2449 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 2450 if (!CLHS || !CRHS) 2451 return SDValue(); 2452 2453 // Only 10 bits are used. 2454 static const uint32_t MaxMask = 0x3ff; 2455 2456 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; 2457 SDLoc DL(N); 2458 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 2459 Src, DAG.getConstant(NewMask, DL, MVT::i32)); 2460 } 2461 2462 return SDValue(); 2463 } 2464 2465 SDValue SITargetLowering::performClassCombine(SDNode *N, 2466 DAGCombinerInfo &DCI) const { 2467 SelectionDAG &DAG = DCI.DAG; 2468 SDValue Mask = N->getOperand(1); 2469 2470 // fp_class x, 0 -> false 2471 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { 2472 if (CMask->isNullValue()) 2473 return DAG.getConstant(0, SDLoc(N), MVT::i1); 2474 } 2475 2476 return SDValue(); 2477 } 2478 2479 // Constant fold canonicalize. 2480 SDValue SITargetLowering::performFCanonicalizeCombine( 2481 SDNode *N, 2482 DAGCombinerInfo &DCI) const { 2483 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); 2484 if (!CFP) 2485 return SDValue(); 2486 2487 SelectionDAG &DAG = DCI.DAG; 2488 const APFloat &C = CFP->getValueAPF(); 2489 2490 // Flush denormals to 0 if not enabled. 2491 if (C.isDenormal()) { 2492 EVT VT = N->getValueType(0); 2493 if (VT == MVT::f32 && !Subtarget->hasFP32Denormals()) 2494 return DAG.getConstantFP(0.0, SDLoc(N), VT); 2495 2496 if (VT == MVT::f64 && !Subtarget->hasFP64Denormals()) 2497 return DAG.getConstantFP(0.0, SDLoc(N), VT); 2498 } 2499 2500 if (C.isNaN()) { 2501 EVT VT = N->getValueType(0); 2502 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); 2503 if (C.isSignaling()) { 2504 // Quiet a signaling NaN. 2505 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 2506 } 2507 2508 // Make sure it is the canonical NaN bitpattern. 2509 // 2510 // TODO: Can we use -1 as the canonical NaN value since it's an inline 2511 // immediate? 2512 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) 2513 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 2514 } 2515 2516 return SDValue(CFP, 0); 2517 } 2518 2519 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { 2520 switch (Opc) { 2521 case ISD::FMAXNUM: 2522 return AMDGPUISD::FMAX3; 2523 case ISD::SMAX: 2524 return AMDGPUISD::SMAX3; 2525 case ISD::UMAX: 2526 return AMDGPUISD::UMAX3; 2527 case ISD::FMINNUM: 2528 return AMDGPUISD::FMIN3; 2529 case ISD::SMIN: 2530 return AMDGPUISD::SMIN3; 2531 case ISD::UMIN: 2532 return AMDGPUISD::UMIN3; 2533 default: 2534 llvm_unreachable("Not a min/max opcode"); 2535 } 2536 } 2537 2538 static SDValue performIntMed3ImmCombine(SelectionDAG &DAG, 2539 SDLoc SL, 2540 SDValue Op0, 2541 SDValue Op1, 2542 bool Signed) { 2543 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); 2544 if (!K1) 2545 return SDValue(); 2546 2547 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 2548 if (!K0) 2549 return SDValue(); 2550 2551 2552 if (Signed) { 2553 if (K0->getAPIntValue().sge(K1->getAPIntValue())) 2554 return SDValue(); 2555 } else { 2556 if (K0->getAPIntValue().uge(K1->getAPIntValue())) 2557 return SDValue(); 2558 } 2559 2560 EVT VT = K0->getValueType(0); 2561 return DAG.getNode(Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3, SL, VT, 2562 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); 2563 } 2564 2565 static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) { 2566 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions()) 2567 return true; 2568 2569 return DAG.isKnownNeverNaN(Op); 2570 } 2571 2572 static SDValue performFPMed3ImmCombine(SelectionDAG &DAG, 2573 SDLoc SL, 2574 SDValue Op0, 2575 SDValue Op1) { 2576 ConstantFPSDNode *K1 = dyn_cast<ConstantFPSDNode>(Op1); 2577 if (!K1) 2578 return SDValue(); 2579 2580 ConstantFPSDNode *K0 = dyn_cast<ConstantFPSDNode>(Op0.getOperand(1)); 2581 if (!K0) 2582 return SDValue(); 2583 2584 // Ordered >= (although NaN inputs should have folded away by now). 2585 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF()); 2586 if (Cmp == APFloat::cmpGreaterThan) 2587 return SDValue(); 2588 2589 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a 2590 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would then 2591 // give the other result, which is different from med3 with a NaN input. 2592 SDValue Var = Op0.getOperand(0); 2593 if (!isKnownNeverSNan(DAG, Var)) 2594 return SDValue(); 2595 2596 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), 2597 Var, SDValue(K0, 0), SDValue(K1, 0)); 2598 } 2599 2600 SDValue SITargetLowering::performMinMaxCombine(SDNode *N, 2601 DAGCombinerInfo &DCI) const { 2602 SelectionDAG &DAG = DCI.DAG; 2603 2604 unsigned Opc = N->getOpcode(); 2605 SDValue Op0 = N->getOperand(0); 2606 SDValue Op1 = N->getOperand(1); 2607 2608 // Only do this if the inner op has one use since this will just increases 2609 // register pressure for no benefit. 2610 2611 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY) { 2612 // max(max(a, b), c) -> max3(a, b, c) 2613 // min(min(a, b), c) -> min3(a, b, c) 2614 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { 2615 SDLoc DL(N); 2616 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 2617 DL, 2618 N->getValueType(0), 2619 Op0.getOperand(0), 2620 Op0.getOperand(1), 2621 Op1); 2622 } 2623 2624 // Try commuted. 2625 // max(a, max(b, c)) -> max3(a, b, c) 2626 // min(a, min(b, c)) -> min3(a, b, c) 2627 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { 2628 SDLoc DL(N); 2629 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 2630 DL, 2631 N->getValueType(0), 2632 Op0, 2633 Op1.getOperand(0), 2634 Op1.getOperand(1)); 2635 } 2636 } 2637 2638 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) 2639 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { 2640 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) 2641 return Med3; 2642 } 2643 2644 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { 2645 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) 2646 return Med3; 2647 } 2648 2649 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) 2650 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || 2651 (Opc == AMDGPUISD::FMIN_LEGACY && 2652 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && 2653 N->getValueType(0) == MVT::f32 && Op0.hasOneUse()) { 2654 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) 2655 return Res; 2656 } 2657 2658 return SDValue(); 2659 } 2660 2661 SDValue SITargetLowering::performSetCCCombine(SDNode *N, 2662 DAGCombinerInfo &DCI) const { 2663 SelectionDAG &DAG = DCI.DAG; 2664 SDLoc SL(N); 2665 2666 SDValue LHS = N->getOperand(0); 2667 SDValue RHS = N->getOperand(1); 2668 EVT VT = LHS.getValueType(); 2669 2670 if (VT != MVT::f32 && VT != MVT::f64) 2671 return SDValue(); 2672 2673 // Match isinf pattern 2674 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) 2675 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 2676 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { 2677 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 2678 if (!CRHS) 2679 return SDValue(); 2680 2681 const APFloat &APF = CRHS->getValueAPF(); 2682 if (APF.isInfinity() && !APF.isNegative()) { 2683 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY; 2684 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), 2685 DAG.getConstant(Mask, SL, MVT::i32)); 2686 } 2687 } 2688 2689 return SDValue(); 2690 } 2691 2692 SDValue SITargetLowering::PerformDAGCombine(SDNode *N, 2693 DAGCombinerInfo &DCI) const { 2694 SelectionDAG &DAG = DCI.DAG; 2695 SDLoc DL(N); 2696 2697 switch (N->getOpcode()) { 2698 default: 2699 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 2700 case ISD::SETCC: 2701 return performSetCCCombine(N, DCI); 2702 case ISD::FMAXNUM: 2703 case ISD::FMINNUM: 2704 case ISD::SMAX: 2705 case ISD::SMIN: 2706 case ISD::UMAX: 2707 case ISD::UMIN: 2708 case AMDGPUISD::FMIN_LEGACY: 2709 case AMDGPUISD::FMAX_LEGACY: { 2710 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG && 2711 N->getValueType(0) != MVT::f64 && 2712 getTargetMachine().getOptLevel() > CodeGenOpt::None) 2713 return performMinMaxCombine(N, DCI); 2714 break; 2715 } 2716 2717 case AMDGPUISD::CVT_F32_UBYTE0: 2718 case AMDGPUISD::CVT_F32_UBYTE1: 2719 case AMDGPUISD::CVT_F32_UBYTE2: 2720 case AMDGPUISD::CVT_F32_UBYTE3: { 2721 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; 2722 SDValue Src = N->getOperand(0); 2723 2724 if (Src.getOpcode() == ISD::SRL) { 2725 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x 2726 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x 2727 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x 2728 2729 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src.getOperand(1))) { 2730 unsigned SrcOffset = C->getZExtValue() + 8 * Offset; 2731 if (SrcOffset < 32 && SrcOffset % 8 == 0) { 2732 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, DL, 2733 MVT::f32, Src.getOperand(0)); 2734 } 2735 } 2736 } 2737 2738 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); 2739 2740 APInt KnownZero, KnownOne; 2741 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 2742 !DCI.isBeforeLegalizeOps()); 2743 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2744 if (TLO.ShrinkDemandedConstant(Src, Demanded) || 2745 TLI.SimplifyDemandedBits(Src, Demanded, KnownZero, KnownOne, TLO)) { 2746 DCI.CommitTargetLoweringOpt(TLO); 2747 } 2748 2749 break; 2750 } 2751 2752 case ISD::UINT_TO_FP: { 2753 return performUCharToFloatCombine(N, DCI); 2754 } 2755 case ISD::FADD: { 2756 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 2757 break; 2758 2759 EVT VT = N->getValueType(0); 2760 if (VT != MVT::f32) 2761 break; 2762 2763 // Only do this if we are not trying to support denormals. v_mad_f32 does 2764 // not support denormals ever. 2765 if (Subtarget->hasFP32Denormals()) 2766 break; 2767 2768 SDValue LHS = N->getOperand(0); 2769 SDValue RHS = N->getOperand(1); 2770 2771 // These should really be instruction patterns, but writing patterns with 2772 // source modiifiers is a pain. 2773 2774 // fadd (fadd (a, a), b) -> mad 2.0, a, b 2775 if (LHS.getOpcode() == ISD::FADD) { 2776 SDValue A = LHS.getOperand(0); 2777 if (A == LHS.getOperand(1)) { 2778 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); 2779 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, RHS); 2780 } 2781 } 2782 2783 // fadd (b, fadd (a, a)) -> mad 2.0, a, b 2784 if (RHS.getOpcode() == ISD::FADD) { 2785 SDValue A = RHS.getOperand(0); 2786 if (A == RHS.getOperand(1)) { 2787 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); 2788 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, LHS); 2789 } 2790 } 2791 2792 return SDValue(); 2793 } 2794 case ISD::FSUB: { 2795 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 2796 break; 2797 2798 EVT VT = N->getValueType(0); 2799 2800 // Try to get the fneg to fold into the source modifier. This undoes generic 2801 // DAG combines and folds them into the mad. 2802 // 2803 // Only do this if we are not trying to support denormals. v_mad_f32 does 2804 // not support denormals ever. 2805 if (VT == MVT::f32 && 2806 !Subtarget->hasFP32Denormals()) { 2807 SDValue LHS = N->getOperand(0); 2808 SDValue RHS = N->getOperand(1); 2809 if (LHS.getOpcode() == ISD::FADD) { 2810 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) 2811 2812 SDValue A = LHS.getOperand(0); 2813 if (A == LHS.getOperand(1)) { 2814 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); 2815 SDValue NegRHS = DAG.getNode(ISD::FNEG, DL, VT, RHS); 2816 2817 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, NegRHS); 2818 } 2819 } 2820 2821 if (RHS.getOpcode() == ISD::FADD) { 2822 // (fsub c, (fadd a, a)) -> mad -2.0, a, c 2823 2824 SDValue A = RHS.getOperand(0); 2825 if (A == RHS.getOperand(1)) { 2826 const SDValue NegTwo = DAG.getConstantFP(-2.0, DL, MVT::f32); 2827 return DAG.getNode(ISD::FMAD, DL, VT, NegTwo, A, LHS); 2828 } 2829 } 2830 2831 return SDValue(); 2832 } 2833 2834 break; 2835 } 2836 case ISD::LOAD: 2837 case ISD::STORE: 2838 case ISD::ATOMIC_LOAD: 2839 case ISD::ATOMIC_STORE: 2840 case ISD::ATOMIC_CMP_SWAP: 2841 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 2842 case ISD::ATOMIC_SWAP: 2843 case ISD::ATOMIC_LOAD_ADD: 2844 case ISD::ATOMIC_LOAD_SUB: 2845 case ISD::ATOMIC_LOAD_AND: 2846 case ISD::ATOMIC_LOAD_OR: 2847 case ISD::ATOMIC_LOAD_XOR: 2848 case ISD::ATOMIC_LOAD_NAND: 2849 case ISD::ATOMIC_LOAD_MIN: 2850 case ISD::ATOMIC_LOAD_MAX: 2851 case ISD::ATOMIC_LOAD_UMIN: 2852 case ISD::ATOMIC_LOAD_UMAX: 2853 case AMDGPUISD::ATOMIC_INC: 2854 case AMDGPUISD::ATOMIC_DEC: { // TODO: Target mem intrinsics. 2855 if (DCI.isBeforeLegalize()) 2856 break; 2857 2858 MemSDNode *MemNode = cast<MemSDNode>(N); 2859 SDValue Ptr = MemNode->getBasePtr(); 2860 2861 // TODO: We could also do this for multiplies. 2862 unsigned AS = MemNode->getAddressSpace(); 2863 if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUAS::PRIVATE_ADDRESS) { 2864 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI); 2865 if (NewPtr) { 2866 SmallVector<SDValue, 8> NewOps(MemNode->op_begin(), MemNode->op_end()); 2867 2868 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; 2869 return SDValue(DAG.UpdateNodeOperands(MemNode, NewOps), 0); 2870 } 2871 } 2872 break; 2873 } 2874 case ISD::AND: 2875 return performAndCombine(N, DCI); 2876 case ISD::OR: 2877 return performOrCombine(N, DCI); 2878 case AMDGPUISD::FP_CLASS: 2879 return performClassCombine(N, DCI); 2880 case ISD::FCANONICALIZE: 2881 return performFCanonicalizeCombine(N, DCI); 2882 } 2883 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 2884 } 2885 2886 /// \brief Analyze the possible immediate value Op 2887 /// 2888 /// Returns -1 if it isn't an immediate, 0 if it's and inline immediate 2889 /// and the immediate value if it's a literal immediate 2890 int32_t SITargetLowering::analyzeImmediate(const SDNode *N) const { 2891 2892 const SIInstrInfo *TII = 2893 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 2894 2895 if (const ConstantSDNode *Node = dyn_cast<ConstantSDNode>(N)) { 2896 if (TII->isInlineConstant(Node->getAPIntValue())) 2897 return 0; 2898 2899 uint64_t Val = Node->getZExtValue(); 2900 return isUInt<32>(Val) ? Val : -1; 2901 } 2902 2903 if (const ConstantFPSDNode *Node = dyn_cast<ConstantFPSDNode>(N)) { 2904 if (TII->isInlineConstant(Node->getValueAPF().bitcastToAPInt())) 2905 return 0; 2906 2907 if (Node->getValueType(0) == MVT::f32) 2908 return FloatToBits(Node->getValueAPF().convertToFloat()); 2909 2910 return -1; 2911 } 2912 2913 return -1; 2914 } 2915 2916 /// \brief Helper function for adjustWritemask 2917 static unsigned SubIdx2Lane(unsigned Idx) { 2918 switch (Idx) { 2919 default: return 0; 2920 case AMDGPU::sub0: return 0; 2921 case AMDGPU::sub1: return 1; 2922 case AMDGPU::sub2: return 2; 2923 case AMDGPU::sub3: return 3; 2924 } 2925 } 2926 2927 /// \brief Adjust the writemask of MIMG instructions 2928 void SITargetLowering::adjustWritemask(MachineSDNode *&Node, 2929 SelectionDAG &DAG) const { 2930 SDNode *Users[4] = { }; 2931 unsigned Lane = 0; 2932 unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3; 2933 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); 2934 unsigned NewDmask = 0; 2935 2936 // Try to figure out the used register components 2937 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); 2938 I != E; ++I) { 2939 2940 // Abort if we can't understand the usage 2941 if (!I->isMachineOpcode() || 2942 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) 2943 return; 2944 2945 // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used. 2946 // Note that subregs are packed, i.e. Lane==0 is the first bit set 2947 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 2948 // set, etc. 2949 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 2950 2951 // Set which texture component corresponds to the lane. 2952 unsigned Comp; 2953 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) { 2954 assert(Dmask); 2955 Comp = countTrailingZeros(Dmask); 2956 Dmask &= ~(1 << Comp); 2957 } 2958 2959 // Abort if we have more than one user per component 2960 if (Users[Lane]) 2961 return; 2962 2963 Users[Lane] = *I; 2964 NewDmask |= 1 << Comp; 2965 } 2966 2967 // Abort if there's no change 2968 if (NewDmask == OldDmask) 2969 return; 2970 2971 // Adjust the writemask in the node 2972 std::vector<SDValue> Ops; 2973 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); 2974 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); 2975 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); 2976 Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops); 2977 2978 // If we only got one lane, replace it with a copy 2979 // (if NewDmask has only one bit set...) 2980 if (NewDmask && (NewDmask & (NewDmask-1)) == 0) { 2981 SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(), 2982 MVT::i32); 2983 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 2984 SDLoc(), Users[Lane]->getValueType(0), 2985 SDValue(Node, 0), RC); 2986 DAG.ReplaceAllUsesWith(Users[Lane], Copy); 2987 return; 2988 } 2989 2990 // Update the users of the node with the new indices 2991 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { 2992 2993 SDNode *User = Users[i]; 2994 if (!User) 2995 continue; 2996 2997 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); 2998 DAG.UpdateNodeOperands(User, User->getOperand(0), Op); 2999 3000 switch (Idx) { 3001 default: break; 3002 case AMDGPU::sub0: Idx = AMDGPU::sub1; break; 3003 case AMDGPU::sub1: Idx = AMDGPU::sub2; break; 3004 case AMDGPU::sub2: Idx = AMDGPU::sub3; break; 3005 } 3006 } 3007 } 3008 3009 static bool isFrameIndexOp(SDValue Op) { 3010 if (Op.getOpcode() == ISD::AssertZext) 3011 Op = Op.getOperand(0); 3012 3013 return isa<FrameIndexSDNode>(Op); 3014 } 3015 3016 /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) 3017 /// with frame index operands. 3018 /// LLVM assumes that inputs are to these instructions are registers. 3019 void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, 3020 SelectionDAG &DAG) const { 3021 3022 SmallVector<SDValue, 8> Ops; 3023 for (unsigned i = 0; i < Node->getNumOperands(); ++i) { 3024 if (!isFrameIndexOp(Node->getOperand(i))) { 3025 Ops.push_back(Node->getOperand(i)); 3026 continue; 3027 } 3028 3029 SDLoc DL(Node); 3030 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, 3031 Node->getOperand(i).getValueType(), 3032 Node->getOperand(i)), 0)); 3033 } 3034 3035 DAG.UpdateNodeOperands(Node, Ops); 3036 } 3037 3038 /// \brief Fold the instructions after selecting them. 3039 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, 3040 SelectionDAG &DAG) const { 3041 const SIInstrInfo *TII = 3042 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 3043 unsigned Opcode = Node->getMachineOpcode(); 3044 3045 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore()) 3046 adjustWritemask(Node, DAG); 3047 3048 if (Opcode == AMDGPU::INSERT_SUBREG || 3049 Opcode == AMDGPU::REG_SEQUENCE) { 3050 legalizeTargetIndependentNode(Node, DAG); 3051 return Node; 3052 } 3053 return Node; 3054 } 3055 3056 /// \brief Assign the register class depending on the number of 3057 /// bits set in the writemask 3058 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, 3059 SDNode *Node) const { 3060 const SIInstrInfo *TII = 3061 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 3062 3063 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 3064 3065 if (TII->isVOP3(MI->getOpcode())) { 3066 // Make sure constant bus requirements are respected. 3067 TII->legalizeOperandsVOP3(MRI, MI); 3068 return; 3069 } 3070 3071 if (TII->isMIMG(*MI)) { 3072 unsigned VReg = MI->getOperand(0).getReg(); 3073 unsigned DmaskIdx = MI->getNumOperands() == 12 ? 3 : 4; 3074 unsigned Writemask = MI->getOperand(DmaskIdx).getImm(); 3075 unsigned BitsSet = 0; 3076 for (unsigned i = 0; i < 4; ++i) 3077 BitsSet += Writemask & (1 << i) ? 1 : 0; 3078 3079 const TargetRegisterClass *RC; 3080 switch (BitsSet) { 3081 default: return; 3082 case 1: RC = &AMDGPU::VGPR_32RegClass; break; 3083 case 2: RC = &AMDGPU::VReg_64RegClass; break; 3084 case 3: RC = &AMDGPU::VReg_96RegClass; break; 3085 } 3086 3087 unsigned NewOpcode = TII->getMaskedMIMGOp(MI->getOpcode(), BitsSet); 3088 MI->setDesc(TII->get(NewOpcode)); 3089 MRI.setRegClass(VReg, RC); 3090 return; 3091 } 3092 3093 // Replace unused atomics with the no return version. 3094 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI->getOpcode()); 3095 if (NoRetAtomicOp != -1) { 3096 if (!Node->hasAnyUseOfValue(0)) { 3097 MI->setDesc(TII->get(NoRetAtomicOp)); 3098 MI->RemoveOperand(0); 3099 return; 3100 } 3101 3102 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg 3103 // instruction, because the return type of these instructions is a vec2 of 3104 // the memory type, so it can be tied to the input operand. 3105 // This means these instructions always have a use, so we need to add a 3106 // special case to check if the atomic has only one extract_subreg use, 3107 // which itself has no uses. 3108 if ((Node->hasNUsesOfValue(1, 0) && 3109 Node->use_begin()->isMachineOpcode() && 3110 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && 3111 !Node->use_begin()->hasAnyUseOfValue(0))) { 3112 unsigned Def = MI->getOperand(0).getReg(); 3113 3114 // Change this into a noret atomic. 3115 MI->setDesc(TII->get(NoRetAtomicOp)); 3116 MI->RemoveOperand(0); 3117 3118 // If we only remove the def operand from the atomic instruction, the 3119 // extract_subreg will be left with a use of a vreg without a def. 3120 // So we need to insert an implicit_def to avoid machine verifier 3121 // errors. 3122 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 3123 TII->get(AMDGPU::IMPLICIT_DEF), Def); 3124 } 3125 return; 3126 } 3127 } 3128 3129 static SDValue buildSMovImm32(SelectionDAG &DAG, SDLoc DL, uint64_t Val) { 3130 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); 3131 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); 3132 } 3133 3134 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, 3135 SDLoc DL, 3136 SDValue Ptr) const { 3137 const SIInstrInfo *TII = 3138 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 3139 3140 // Build the half of the subregister with the constants before building the 3141 // full 128-bit register. If we are building multiple resource descriptors, 3142 // this will allow CSEing of the 2-component register. 3143 const SDValue Ops0[] = { 3144 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), 3145 buildSMovImm32(DAG, DL, 0), 3146 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 3147 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), 3148 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) 3149 }; 3150 3151 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, 3152 MVT::v2i32, Ops0), 0); 3153 3154 // Combine the constants and the pointer. 3155 const SDValue Ops1[] = { 3156 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 3157 Ptr, 3158 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), 3159 SubRegHi, 3160 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) 3161 }; 3162 3163 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); 3164 } 3165 3166 /// \brief Return a resource descriptor with the 'Add TID' bit enabled 3167 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] 3168 /// of the resource descriptor) to create an offset, which is added to 3169 /// the resource pointer. 3170 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, 3171 SDLoc DL, 3172 SDValue Ptr, 3173 uint32_t RsrcDword1, 3174 uint64_t RsrcDword2And3) const { 3175 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); 3176 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); 3177 if (RsrcDword1) { 3178 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, 3179 DAG.getConstant(RsrcDword1, DL, MVT::i32)), 3180 0); 3181 } 3182 3183 SDValue DataLo = buildSMovImm32(DAG, DL, 3184 RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); 3185 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); 3186 3187 const SDValue Ops[] = { 3188 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 3189 PtrLo, 3190 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 3191 PtrHi, 3192 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), 3193 DataLo, 3194 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), 3195 DataHi, 3196 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) 3197 }; 3198 3199 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); 3200 } 3201 3202 SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG, 3203 const TargetRegisterClass *RC, 3204 unsigned Reg, EVT VT) const { 3205 SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT); 3206 3207 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()), 3208 cast<RegisterSDNode>(VReg)->getReg(), VT); 3209 } 3210 3211 //===----------------------------------------------------------------------===// 3212 // SI Inline Assembly Support 3213 //===----------------------------------------------------------------------===// 3214 3215 std::pair<unsigned, const TargetRegisterClass *> 3216 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 3217 StringRef Constraint, 3218 MVT VT) const { 3219 3220 if (Constraint.size() == 1) { 3221 switch (Constraint[0]) { 3222 case 's': 3223 case 'r': 3224 switch (VT.getSizeInBits()) { 3225 default: 3226 return std::make_pair(0U, nullptr); 3227 case 32: 3228 return std::make_pair(0U, &AMDGPU::SGPR_32RegClass); 3229 case 64: 3230 return std::make_pair(0U, &AMDGPU::SGPR_64RegClass); 3231 case 128: 3232 return std::make_pair(0U, &AMDGPU::SReg_128RegClass); 3233 case 256: 3234 return std::make_pair(0U, &AMDGPU::SReg_256RegClass); 3235 } 3236 3237 case 'v': 3238 switch (VT.getSizeInBits()) { 3239 default: 3240 return std::make_pair(0U, nullptr); 3241 case 32: 3242 return std::make_pair(0U, &AMDGPU::VGPR_32RegClass); 3243 case 64: 3244 return std::make_pair(0U, &AMDGPU::VReg_64RegClass); 3245 case 96: 3246 return std::make_pair(0U, &AMDGPU::VReg_96RegClass); 3247 case 128: 3248 return std::make_pair(0U, &AMDGPU::VReg_128RegClass); 3249 case 256: 3250 return std::make_pair(0U, &AMDGPU::VReg_256RegClass); 3251 case 512: 3252 return std::make_pair(0U, &AMDGPU::VReg_512RegClass); 3253 } 3254 } 3255 } 3256 3257 if (Constraint.size() > 1) { 3258 const TargetRegisterClass *RC = nullptr; 3259 if (Constraint[1] == 'v') { 3260 RC = &AMDGPU::VGPR_32RegClass; 3261 } else if (Constraint[1] == 's') { 3262 RC = &AMDGPU::SGPR_32RegClass; 3263 } 3264 3265 if (RC) { 3266 uint32_t Idx; 3267 bool Failed = Constraint.substr(2).getAsInteger(10, Idx); 3268 if (!Failed && Idx < RC->getNumRegs()) 3269 return std::make_pair(RC->getRegister(Idx), RC); 3270 } 3271 } 3272 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 3273 } 3274 3275 SITargetLowering::ConstraintType 3276 SITargetLowering::getConstraintType(StringRef Constraint) const { 3277 if (Constraint.size() == 1) { 3278 switch (Constraint[0]) { 3279 default: break; 3280 case 's': 3281 case 'v': 3282 return C_RegisterClass; 3283 } 3284 } 3285 return TargetLowering::getConstraintType(Constraint); 3286 } 3287