1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief Custom DAG lowering for SI 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifdef _MSC_VER 16 // Provide M_PI. 17 #define _USE_MATH_DEFINES 18 #endif 19 20 #include "AMDGPU.h" 21 #include "AMDGPUIntrinsicInfo.h" 22 #include "AMDGPUSubtarget.h" 23 #include "SIDefines.h" 24 #include "SIISelLowering.h" 25 #include "SIInstrInfo.h" 26 #include "SIMachineFunctionInfo.h" 27 #include "SIRegisterInfo.h" 28 #include "Utils/AMDGPUBaseInfo.h" 29 #include "llvm/ADT/APFloat.h" 30 #include "llvm/ADT/APInt.h" 31 #include "llvm/ADT/ArrayRef.h" 32 #include "llvm/ADT/BitVector.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/StringRef.h" 35 #include "llvm/ADT/StringSwitch.h" 36 #include "llvm/ADT/Twine.h" 37 #include "llvm/CodeGen/Analysis.h" 38 #include "llvm/CodeGen/CallingConvLower.h" 39 #include "llvm/CodeGen/DAGCombine.h" 40 #include "llvm/CodeGen/ISDOpcodes.h" 41 #include "llvm/CodeGen/MachineBasicBlock.h" 42 #include "llvm/CodeGen/MachineFrameInfo.h" 43 #include "llvm/CodeGen/MachineFunction.h" 44 #include "llvm/CodeGen/MachineInstr.h" 45 #include "llvm/CodeGen/MachineInstrBuilder.h" 46 #include "llvm/CodeGen/MachineMemOperand.h" 47 #include "llvm/CodeGen/MachineOperand.h" 48 #include "llvm/CodeGen/MachineRegisterInfo.h" 49 #include "llvm/CodeGen/MachineValueType.h" 50 #include "llvm/CodeGen/SelectionDAG.h" 51 #include "llvm/CodeGen/SelectionDAGNodes.h" 52 #include "llvm/CodeGen/ValueTypes.h" 53 #include "llvm/IR/Constants.h" 54 #include "llvm/IR/DataLayout.h" 55 #include "llvm/IR/DebugLoc.h" 56 #include "llvm/IR/DerivedTypes.h" 57 #include "llvm/IR/DiagnosticInfo.h" 58 #include "llvm/IR/Function.h" 59 #include "llvm/IR/GlobalValue.h" 60 #include "llvm/IR/InstrTypes.h" 61 #include "llvm/IR/Instruction.h" 62 #include "llvm/IR/Instructions.h" 63 #include "llvm/IR/Type.h" 64 #include "llvm/Support/Casting.h" 65 #include "llvm/Support/CodeGen.h" 66 #include "llvm/Support/CommandLine.h" 67 #include "llvm/Support/Compiler.h" 68 #include "llvm/Support/ErrorHandling.h" 69 #include "llvm/Support/MathExtras.h" 70 #include "llvm/Target/TargetCallingConv.h" 71 #include "llvm/Target/TargetMachine.h" 72 #include "llvm/Target/TargetOptions.h" 73 #include "llvm/Target/TargetRegisterInfo.h" 74 #include <cassert> 75 #include <cmath> 76 #include <cstdint> 77 #include <iterator> 78 #include <tuple> 79 #include <utility> 80 #include <vector> 81 82 using namespace llvm; 83 84 static cl::opt<bool> EnableVGPRIndexMode( 85 "amdgpu-vgpr-index-mode", 86 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), 87 cl::init(false)); 88 89 static unsigned findFirstFreeSGPR(CCState &CCInfo) { 90 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); 91 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { 92 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { 93 return AMDGPU::SGPR0 + Reg; 94 } 95 } 96 llvm_unreachable("Cannot allocate sgpr"); 97 } 98 99 SITargetLowering::SITargetLowering(const TargetMachine &TM, 100 const SISubtarget &STI) 101 : AMDGPUTargetLowering(TM, STI) { 102 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); 103 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); 104 105 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass); 106 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); 107 108 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); 109 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); 110 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); 111 112 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); 113 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); 114 115 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); 116 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); 117 118 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); 119 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); 120 121 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); 122 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); 123 124 if (Subtarget->has16BitInsts()) { 125 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass); 126 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass); 127 } 128 129 computeRegisterProperties(STI.getRegisterInfo()); 130 131 // We need to custom lower vector stores from local memory 132 setOperationAction(ISD::LOAD, MVT::v2i32, Custom); 133 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 134 setOperationAction(ISD::LOAD, MVT::v8i32, Custom); 135 setOperationAction(ISD::LOAD, MVT::v16i32, Custom); 136 setOperationAction(ISD::LOAD, MVT::i1, Custom); 137 138 setOperationAction(ISD::STORE, MVT::v2i32, Custom); 139 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 140 setOperationAction(ISD::STORE, MVT::v8i32, Custom); 141 setOperationAction(ISD::STORE, MVT::v16i32, Custom); 142 setOperationAction(ISD::STORE, MVT::i1, Custom); 143 144 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); 145 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); 146 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); 147 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); 148 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); 149 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); 150 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); 151 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); 152 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); 153 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); 154 155 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 156 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 157 setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand); 158 159 setOperationAction(ISD::SELECT, MVT::i1, Promote); 160 setOperationAction(ISD::SELECT, MVT::i64, Custom); 161 setOperationAction(ISD::SELECT, MVT::f64, Promote); 162 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); 163 164 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 165 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 166 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 167 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 168 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); 169 170 setOperationAction(ISD::SETCC, MVT::i1, Promote); 171 setOperationAction(ISD::SETCC, MVT::v2i1, Expand); 172 setOperationAction(ISD::SETCC, MVT::v4i1, Expand); 173 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); 174 175 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); 176 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 177 178 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); 179 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); 180 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 181 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); 182 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 183 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); 184 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); 185 186 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); 187 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); 188 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 189 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); 190 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); 191 192 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 193 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 194 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 195 setOperationAction(ISD::BR_CC, MVT::i64, Expand); 196 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 197 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 198 199 // We only support LOAD/STORE and vector manipulation ops for vectors 200 // with > 4 elements. 201 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64}) { 202 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 203 switch (Op) { 204 case ISD::LOAD: 205 case ISD::STORE: 206 case ISD::BUILD_VECTOR: 207 case ISD::BITCAST: 208 case ISD::EXTRACT_VECTOR_ELT: 209 case ISD::INSERT_VECTOR_ELT: 210 case ISD::INSERT_SUBVECTOR: 211 case ISD::EXTRACT_SUBVECTOR: 212 case ISD::SCALAR_TO_VECTOR: 213 break; 214 case ISD::CONCAT_VECTORS: 215 setOperationAction(Op, VT, Custom); 216 break; 217 default: 218 setOperationAction(Op, VT, Expand); 219 break; 220 } 221 } 222 } 223 224 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that 225 // is expanded to avoid having two separate loops in case the index is a VGPR. 226 227 // Most operations are naturally 32-bit vector operations. We only support 228 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. 229 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { 230 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 231 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); 232 233 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 234 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); 235 236 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 237 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); 238 239 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 240 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); 241 } 242 243 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); 244 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); 245 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); 246 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); 247 248 // Avoid stack access for these. 249 // TODO: Generalize to more vector types. 250 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); 251 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); 252 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 253 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 254 255 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, 256 // and output demarshalling 257 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 258 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 259 260 // We can't return success/failure, only the old value, 261 // let LLVM add the comparison 262 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); 263 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); 264 265 if (getSubtarget()->hasFlatAddressSpace()) { 266 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); 267 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); 268 } 269 270 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 271 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 272 273 // On SI this is s_memtime and s_memrealtime on VI. 274 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 275 setOperationAction(ISD::TRAP, MVT::Other, Legal); 276 277 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 278 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 279 280 if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) { 281 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 282 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 283 setOperationAction(ISD::FRINT, MVT::f64, Legal); 284 } 285 286 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 287 288 setOperationAction(ISD::FSIN, MVT::f32, Custom); 289 setOperationAction(ISD::FCOS, MVT::f32, Custom); 290 setOperationAction(ISD::FDIV, MVT::f32, Custom); 291 setOperationAction(ISD::FDIV, MVT::f64, Custom); 292 293 if (Subtarget->has16BitInsts()) { 294 setOperationAction(ISD::Constant, MVT::i16, Legal); 295 296 setOperationAction(ISD::SMIN, MVT::i16, Legal); 297 setOperationAction(ISD::SMAX, MVT::i16, Legal); 298 299 setOperationAction(ISD::UMIN, MVT::i16, Legal); 300 setOperationAction(ISD::UMAX, MVT::i16, Legal); 301 302 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); 303 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); 304 305 setOperationAction(ISD::ROTR, MVT::i16, Promote); 306 setOperationAction(ISD::ROTL, MVT::i16, Promote); 307 308 setOperationAction(ISD::SDIV, MVT::i16, Promote); 309 setOperationAction(ISD::UDIV, MVT::i16, Promote); 310 setOperationAction(ISD::SREM, MVT::i16, Promote); 311 setOperationAction(ISD::UREM, MVT::i16, Promote); 312 313 setOperationAction(ISD::BSWAP, MVT::i16, Promote); 314 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); 315 316 setOperationAction(ISD::CTTZ, MVT::i16, Promote); 317 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); 318 setOperationAction(ISD::CTLZ, MVT::i16, Promote); 319 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); 320 321 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); 322 323 setOperationAction(ISD::BR_CC, MVT::i16, Expand); 324 325 setOperationAction(ISD::LOAD, MVT::i16, Custom); 326 327 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 328 329 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); 330 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); 331 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); 332 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); 333 334 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); 335 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); 336 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); 337 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); 338 339 // F16 - Constant Actions. 340 setOperationAction(ISD::ConstantFP, MVT::f16, Legal); 341 342 // F16 - Load/Store Actions. 343 setOperationAction(ISD::LOAD, MVT::f16, Promote); 344 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); 345 setOperationAction(ISD::STORE, MVT::f16, Promote); 346 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); 347 348 // F16 - VOP1 Actions. 349 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); 350 setOperationAction(ISD::FCOS, MVT::f16, Promote); 351 setOperationAction(ISD::FSIN, MVT::f16, Promote); 352 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); 353 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); 354 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); 355 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); 356 357 // F16 - VOP2 Actions. 358 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 359 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 360 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 361 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 362 setOperationAction(ISD::FDIV, MVT::f16, Custom); 363 364 // F16 - VOP3 Actions. 365 setOperationAction(ISD::FMA, MVT::f16, Legal); 366 if (!Subtarget->hasFP16Denormals()) 367 setOperationAction(ISD::FMAD, MVT::f16, Legal); 368 } 369 370 setTargetDAGCombine(ISD::FADD); 371 setTargetDAGCombine(ISD::FSUB); 372 setTargetDAGCombine(ISD::FMINNUM); 373 setTargetDAGCombine(ISD::FMAXNUM); 374 setTargetDAGCombine(ISD::SMIN); 375 setTargetDAGCombine(ISD::SMAX); 376 setTargetDAGCombine(ISD::UMIN); 377 setTargetDAGCombine(ISD::UMAX); 378 setTargetDAGCombine(ISD::SETCC); 379 setTargetDAGCombine(ISD::AND); 380 setTargetDAGCombine(ISD::OR); 381 setTargetDAGCombine(ISD::XOR); 382 setTargetDAGCombine(ISD::SINT_TO_FP); 383 setTargetDAGCombine(ISD::UINT_TO_FP); 384 setTargetDAGCombine(ISD::FCANONICALIZE); 385 386 // All memory operations. Some folding on the pointer operand is done to help 387 // matching the constant offsets in the addressing modes. 388 setTargetDAGCombine(ISD::LOAD); 389 setTargetDAGCombine(ISD::STORE); 390 setTargetDAGCombine(ISD::ATOMIC_LOAD); 391 setTargetDAGCombine(ISD::ATOMIC_STORE); 392 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); 393 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 394 setTargetDAGCombine(ISD::ATOMIC_SWAP); 395 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); 396 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); 397 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); 398 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); 399 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); 400 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); 401 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); 402 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); 403 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); 404 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); 405 406 setSchedulingPreference(Sched::RegPressure); 407 } 408 409 const SISubtarget *SITargetLowering::getSubtarget() const { 410 return static_cast<const SISubtarget *>(Subtarget); 411 } 412 413 //===----------------------------------------------------------------------===// 414 // TargetLowering queries 415 //===----------------------------------------------------------------------===// 416 417 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 418 const CallInst &CI, 419 unsigned IntrID) const { 420 switch (IntrID) { 421 case Intrinsic::amdgcn_atomic_inc: 422 case Intrinsic::amdgcn_atomic_dec: 423 Info.opc = ISD::INTRINSIC_W_CHAIN; 424 Info.memVT = MVT::getVT(CI.getType()); 425 Info.ptrVal = CI.getOperand(0); 426 Info.align = 0; 427 Info.vol = false; 428 Info.readMem = true; 429 Info.writeMem = true; 430 return true; 431 default: 432 return false; 433 } 434 } 435 436 bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &, 437 EVT) const { 438 // SI has some legal vector types, but no legal vector operations. Say no 439 // shuffles are legal in order to prefer scalarizing some vector operations. 440 return false; 441 } 442 443 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { 444 // Flat instructions do not have offsets, and only have the register 445 // address. 446 return AM.BaseOffs == 0 && (AM.Scale == 0 || AM.Scale == 1); 447 } 448 449 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { 450 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and 451 // additionally can do r + r + i with addr64. 32-bit has more addressing 452 // mode options. Depending on the resource constant, it can also do 453 // (i64 r0) + (i32 r1) * (i14 i). 454 // 455 // Private arrays end up using a scratch buffer most of the time, so also 456 // assume those use MUBUF instructions. Scratch loads / stores are currently 457 // implemented as mubuf instructions with offen bit set, so slightly 458 // different than the normal addr64. 459 if (!isUInt<12>(AM.BaseOffs)) 460 return false; 461 462 // FIXME: Since we can split immediate into soffset and immediate offset, 463 // would it make sense to allow any immediate? 464 465 switch (AM.Scale) { 466 case 0: // r + i or just i, depending on HasBaseReg. 467 return true; 468 case 1: 469 return true; // We have r + r or r + i. 470 case 2: 471 if (AM.HasBaseReg) { 472 // Reject 2 * r + r. 473 return false; 474 } 475 476 // Allow 2 * r as r + r 477 // Or 2 * r + i is allowed as r + r + i. 478 return true; 479 default: // Don't allow n * r 480 return false; 481 } 482 } 483 484 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, 485 const AddrMode &AM, Type *Ty, 486 unsigned AS) const { 487 // No global is ever allowed as a base. 488 if (AM.BaseGV) 489 return false; 490 491 switch (AS) { 492 case AMDGPUAS::GLOBAL_ADDRESS: 493 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 494 // Assume the we will use FLAT for all global memory accesses 495 // on VI. 496 // FIXME: This assumption is currently wrong. On VI we still use 497 // MUBUF instructions for the r + i addressing mode. As currently 498 // implemented, the MUBUF instructions only work on buffer < 4GB. 499 // It may be possible to support > 4GB buffers with MUBUF instructions, 500 // by setting the stride value in the resource descriptor which would 501 // increase the size limit to (stride * 4GB). However, this is risky, 502 // because it has never been validated. 503 return isLegalFlatAddressingMode(AM); 504 } 505 506 return isLegalMUBUFAddressingMode(AM); 507 508 case AMDGPUAS::CONSTANT_ADDRESS: 509 // If the offset isn't a multiple of 4, it probably isn't going to be 510 // correctly aligned. 511 // FIXME: Can we get the real alignment here? 512 if (AM.BaseOffs % 4 != 0) 513 return isLegalMUBUFAddressingMode(AM); 514 515 // There are no SMRD extloads, so if we have to do a small type access we 516 // will use a MUBUF load. 517 // FIXME?: We also need to do this if unaligned, but we don't know the 518 // alignment here. 519 if (DL.getTypeStoreSize(Ty) < 4) 520 return isLegalMUBUFAddressingMode(AM); 521 522 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 523 // SMRD instructions have an 8-bit, dword offset on SI. 524 if (!isUInt<8>(AM.BaseOffs / 4)) 525 return false; 526 } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) { 527 // On CI+, this can also be a 32-bit literal constant offset. If it fits 528 // in 8-bits, it can use a smaller encoding. 529 if (!isUInt<32>(AM.BaseOffs / 4)) 530 return false; 531 } else if (Subtarget->getGeneration() == SISubtarget::VOLCANIC_ISLANDS) { 532 // On VI, these use the SMEM format and the offset is 20-bit in bytes. 533 if (!isUInt<20>(AM.BaseOffs)) 534 return false; 535 } else 536 llvm_unreachable("unhandled generation"); 537 538 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 539 return true; 540 541 if (AM.Scale == 1 && AM.HasBaseReg) 542 return true; 543 544 return false; 545 546 case AMDGPUAS::PRIVATE_ADDRESS: 547 return isLegalMUBUFAddressingMode(AM); 548 549 case AMDGPUAS::LOCAL_ADDRESS: 550 case AMDGPUAS::REGION_ADDRESS: 551 // Basic, single offset DS instructions allow a 16-bit unsigned immediate 552 // field. 553 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have 554 // an 8-bit dword offset but we don't know the alignment here. 555 if (!isUInt<16>(AM.BaseOffs)) 556 return false; 557 558 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 559 return true; 560 561 if (AM.Scale == 1 && AM.HasBaseReg) 562 return true; 563 564 return false; 565 566 case AMDGPUAS::FLAT_ADDRESS: 567 case AMDGPUAS::UNKNOWN_ADDRESS_SPACE: 568 // For an unknown address space, this usually means that this is for some 569 // reason being used for pure arithmetic, and not based on some addressing 570 // computation. We don't have instructions that compute pointers with any 571 // addressing modes, so treat them as having no offset like flat 572 // instructions. 573 return isLegalFlatAddressingMode(AM); 574 575 default: 576 llvm_unreachable("unhandled address space"); 577 } 578 } 579 580 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 581 unsigned AddrSpace, 582 unsigned Align, 583 bool *IsFast) const { 584 if (IsFast) 585 *IsFast = false; 586 587 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, 588 // which isn't a simple VT. 589 // Until MVT is extended to handle this, simply check for the size and 590 // rely on the condition below: allow accesses if the size is a multiple of 4. 591 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && 592 VT.getStoreSize() > 16)) { 593 return false; 594 } 595 596 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || 597 AddrSpace == AMDGPUAS::REGION_ADDRESS) { 598 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte 599 // aligned, 8 byte access in a single operation using ds_read2/write2_b32 600 // with adjacent offsets. 601 bool AlignedBy4 = (Align % 4 == 0); 602 if (IsFast) 603 *IsFast = AlignedBy4; 604 605 return AlignedBy4; 606 } 607 608 // FIXME: We have to be conservative here and assume that flat operations 609 // will access scratch. If we had access to the IR function, then we 610 // could determine if any private memory was used in the function. 611 if (!Subtarget->hasUnalignedScratchAccess() && 612 (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS || 613 AddrSpace == AMDGPUAS::FLAT_ADDRESS)) { 614 return false; 615 } 616 617 if (Subtarget->hasUnalignedBufferAccess()) { 618 // If we have an uniform constant load, it still requires using a slow 619 // buffer instruction if unaligned. 620 if (IsFast) { 621 *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS) ? 622 (Align % 4 == 0) : true; 623 } 624 625 return true; 626 } 627 628 // Smaller than dword value must be aligned. 629 if (VT.bitsLT(MVT::i32)) 630 return false; 631 632 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the 633 // byte-address are ignored, thus forcing Dword alignment. 634 // This applies to private, global, and constant memory. 635 if (IsFast) 636 *IsFast = true; 637 638 return VT.bitsGT(MVT::i32) && Align % 4 == 0; 639 } 640 641 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, 642 unsigned SrcAlign, bool IsMemset, 643 bool ZeroMemset, 644 bool MemcpyStrSrc, 645 MachineFunction &MF) const { 646 // FIXME: Should account for address space here. 647 648 // The default fallback uses the private pointer size as a guess for a type to 649 // use. Make sure we switch these to 64-bit accesses. 650 651 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global 652 return MVT::v4i32; 653 654 if (Size >= 8 && DstAlign >= 4) 655 return MVT::v2i32; 656 657 // Use the default. 658 return MVT::Other; 659 } 660 661 static bool isFlatGlobalAddrSpace(unsigned AS) { 662 return AS == AMDGPUAS::GLOBAL_ADDRESS || 663 AS == AMDGPUAS::FLAT_ADDRESS || 664 AS == AMDGPUAS::CONSTANT_ADDRESS; 665 } 666 667 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, 668 unsigned DestAS) const { 669 return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS); 670 } 671 672 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { 673 const MemSDNode *MemNode = cast<MemSDNode>(N); 674 const Value *Ptr = MemNode->getMemOperand()->getValue(); 675 const Instruction *I = dyn_cast<Instruction>(Ptr); 676 return I && I->getMetadata("amdgpu.noclobber"); 677 } 678 679 bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS, 680 unsigned DestAS) const { 681 // Flat -> private/local is a simple truncate. 682 // Flat -> global is no-op 683 if (SrcAS == AMDGPUAS::FLAT_ADDRESS) 684 return true; 685 686 return isNoopAddrSpaceCast(SrcAS, DestAS); 687 } 688 689 bool SITargetLowering::isMemOpUniform(const SDNode *N) const { 690 const MemSDNode *MemNode = cast<MemSDNode>(N); 691 const Value *Ptr = MemNode->getMemOperand()->getValue(); 692 693 // UndefValue means this is a load of a kernel input. These are uniform. 694 // Sometimes LDS instructions have constant pointers. 695 // If Ptr is null, then that means this mem operand contains a 696 // PseudoSourceValue like GOT. 697 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || 698 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) 699 return true; 700 701 const Instruction *I = dyn_cast<Instruction>(Ptr); 702 return I && I->getMetadata("amdgpu.uniform"); 703 } 704 705 TargetLoweringBase::LegalizeTypeAction 706 SITargetLowering::getPreferredVectorAction(EVT VT) const { 707 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) 708 return TypeSplitVector; 709 710 return TargetLoweringBase::getPreferredVectorAction(VT); 711 } 712 713 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 714 Type *Ty) const { 715 // FIXME: Could be smarter if called for vector constants. 716 return true; 717 } 718 719 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { 720 if (Subtarget->has16BitInsts() && VT == MVT::i16) { 721 switch (Op) { 722 case ISD::LOAD: 723 case ISD::STORE: 724 725 // These operations are done with 32-bit instructions anyway. 726 case ISD::AND: 727 case ISD::OR: 728 case ISD::XOR: 729 case ISD::SELECT: 730 // TODO: Extensions? 731 return true; 732 default: 733 return false; 734 } 735 } 736 737 // SimplifySetCC uses this function to determine whether or not it should 738 // create setcc with i1 operands. We don't have instructions for i1 setcc. 739 if (VT == MVT::i1 && Op == ISD::SETCC) 740 return false; 741 742 return TargetLowering::isTypeDesirableForOp(Op, VT); 743 } 744 745 SDValue SITargetLowering::LowerParameterPtr(SelectionDAG &DAG, 746 const SDLoc &SL, SDValue Chain, 747 unsigned Offset) const { 748 const DataLayout &DL = DAG.getDataLayout(); 749 MachineFunction &MF = DAG.getMachineFunction(); 750 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 751 unsigned InputPtrReg = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 752 753 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 754 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); 755 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, 756 MRI.getLiveInVirtReg(InputPtrReg), PtrVT); 757 return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, 758 DAG.getConstant(Offset, SL, PtrVT)); 759 } 760 761 SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, 762 const SDLoc &SL, SDValue Chain, 763 unsigned Offset, bool Signed, 764 const ISD::InputArg *Arg) const { 765 const DataLayout &DL = DAG.getDataLayout(); 766 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 767 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 768 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 769 770 unsigned Align = DL.getABITypeAlignment(Ty); 771 772 SDValue Ptr = LowerParameterPtr(DAG, SL, Chain, Offset); 773 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align, 774 MachineMemOperand::MONonTemporal | 775 MachineMemOperand::MODereferenceable | 776 MachineMemOperand::MOInvariant); 777 778 SDValue Val = Load; 779 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && 780 VT.bitsLT(MemVT)) { 781 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; 782 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); 783 } 784 785 if (MemVT.isFloatingPoint()) 786 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT); 787 else if (Signed) 788 Val = DAG.getSExtOrTrunc(Val, SL, VT); 789 else 790 Val = DAG.getZExtOrTrunc(Val, SL, VT); 791 792 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); 793 } 794 795 SDValue SITargetLowering::LowerFormalArguments( 796 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 797 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 798 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 799 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 800 801 MachineFunction &MF = DAG.getMachineFunction(); 802 FunctionType *FType = MF.getFunction()->getFunctionType(); 803 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 804 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 805 806 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { 807 const Function *Fn = MF.getFunction(); 808 DiagnosticInfoUnsupported NoGraphicsHSA( 809 *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); 810 DAG.getContext()->diagnose(NoGraphicsHSA); 811 return DAG.getEntryNode(); 812 } 813 814 // Create stack objects that are used for emitting debugger prologue if 815 // "amdgpu-debugger-emit-prologue" attribute was specified. 816 if (ST.debuggerEmitPrologue()) 817 createDebuggerPrologueStackObjects(MF); 818 819 SmallVector<ISD::InputArg, 16> Splits; 820 BitVector Skipped(Ins.size()); 821 822 for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) { 823 const ISD::InputArg &Arg = Ins[i]; 824 825 // First check if it's a PS input addr 826 if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() && 827 !Arg.Flags.isByVal() && PSInputNum <= 15) { 828 829 if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) { 830 // We can safely skip PS inputs 831 Skipped.set(i); 832 ++PSInputNum; 833 continue; 834 } 835 836 Info->markPSInputAllocated(PSInputNum); 837 if (Arg.Used) 838 Info->PSInputEna |= 1 << PSInputNum; 839 840 ++PSInputNum; 841 } 842 843 if (AMDGPU::isShader(CallConv)) { 844 // Second split vertices into their elements 845 if (Arg.VT.isVector()) { 846 ISD::InputArg NewArg = Arg; 847 NewArg.Flags.setSplit(); 848 NewArg.VT = Arg.VT.getVectorElementType(); 849 850 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a 851 // three or five element vertex only needs three or five registers, 852 // NOT four or eight. 853 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 854 unsigned NumElements = ParamType->getVectorNumElements(); 855 856 for (unsigned j = 0; j != NumElements; ++j) { 857 Splits.push_back(NewArg); 858 NewArg.PartOffset += NewArg.VT.getStoreSize(); 859 } 860 } else { 861 Splits.push_back(Arg); 862 } 863 } 864 } 865 866 SmallVector<CCValAssign, 16> ArgLocs; 867 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 868 *DAG.getContext()); 869 870 // At least one interpolation mode must be enabled or else the GPU will hang. 871 // 872 // Check PSInputAddr instead of PSInputEna. The idea is that if the user set 873 // PSInputAddr, the user wants to enable some bits after the compilation 874 // based on run-time states. Since we can't know what the final PSInputEna 875 // will look like, so we shouldn't do anything here and the user should take 876 // responsibility for the correct programming. 877 // 878 // Otherwise, the following restrictions apply: 879 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 880 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 881 // enabled too. 882 if (CallConv == CallingConv::AMDGPU_PS && 883 ((Info->getPSInputAddr() & 0x7F) == 0 || 884 ((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11)))) { 885 CCInfo.AllocateReg(AMDGPU::VGPR0); 886 CCInfo.AllocateReg(AMDGPU::VGPR1); 887 Info->markPSInputAllocated(0); 888 Info->PSInputEna |= 1; 889 } 890 891 if (!AMDGPU::isShader(CallConv)) { 892 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); 893 } else { 894 assert(!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr() && 895 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && 896 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && 897 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && 898 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && 899 !Info->hasWorkItemIDZ()); 900 } 901 902 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 903 if (Info->hasPrivateSegmentBuffer()) { 904 unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI); 905 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass); 906 CCInfo.AllocateReg(PrivateSegmentBufferReg); 907 } 908 909 if (Info->hasDispatchPtr()) { 910 unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI); 911 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 912 CCInfo.AllocateReg(DispatchPtrReg); 913 } 914 915 if (Info->hasQueuePtr()) { 916 unsigned QueuePtrReg = Info->addQueuePtr(*TRI); 917 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 918 CCInfo.AllocateReg(QueuePtrReg); 919 } 920 921 if (Info->hasKernargSegmentPtr()) { 922 unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI); 923 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); 924 CCInfo.AllocateReg(InputPtrReg); 925 } 926 927 if (Info->hasDispatchID()) { 928 unsigned DispatchIDReg = Info->addDispatchID(*TRI); 929 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 930 CCInfo.AllocateReg(DispatchIDReg); 931 } 932 933 if (Info->hasFlatScratchInit()) { 934 unsigned FlatScratchInitReg = Info->addFlatScratchInit(*TRI); 935 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 936 CCInfo.AllocateReg(FlatScratchInitReg); 937 } 938 939 if (!AMDGPU::isShader(CallConv)) 940 analyzeFormalArgumentsCompute(CCInfo, Ins); 941 else 942 AnalyzeFormalArguments(CCInfo, Splits); 943 944 SmallVector<SDValue, 16> Chains; 945 946 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { 947 const ISD::InputArg &Arg = Ins[i]; 948 if (Skipped[i]) { 949 InVals.push_back(DAG.getUNDEF(Arg.VT)); 950 continue; 951 } 952 953 CCValAssign &VA = ArgLocs[ArgIdx++]; 954 MVT VT = VA.getLocVT(); 955 956 if (VA.isMemLoc()) { 957 VT = Ins[i].VT; 958 EVT MemVT = VA.getLocVT(); 959 const unsigned Offset = Subtarget->getExplicitKernelArgOffset() + 960 VA.getLocMemOffset(); 961 // The first 36 bytes of the input buffer contains information about 962 // thread group and global sizes. 963 SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, Chain, 964 Offset, Ins[i].Flags.isSExt(), 965 &Ins[i]); 966 Chains.push_back(Arg.getValue(1)); 967 968 auto *ParamTy = 969 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); 970 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 971 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 972 // On SI local pointers are just offsets into LDS, so they are always 973 // less than 16-bits. On CI and newer they could potentially be 974 // real pointers, so we can't guarantee their size. 975 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, 976 DAG.getValueType(MVT::i16)); 977 } 978 979 InVals.push_back(Arg); 980 Info->setABIArgOffset(Offset + MemVT.getStoreSize()); 981 continue; 982 } 983 assert(VA.isRegLoc() && "Parameter must be in a register!"); 984 985 unsigned Reg = VA.getLocReg(); 986 987 if (VT == MVT::i64) { 988 // For now assume it is a pointer 989 Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, 990 &AMDGPU::SGPR_64RegClass); 991 Reg = MF.addLiveIn(Reg, &AMDGPU::SGPR_64RegClass); 992 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 993 InVals.push_back(Copy); 994 continue; 995 } 996 997 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 998 999 Reg = MF.addLiveIn(Reg, RC); 1000 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1001 1002 if (Arg.VT.isVector()) { 1003 // Build a vector from the registers 1004 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 1005 unsigned NumElements = ParamType->getVectorNumElements(); 1006 1007 SmallVector<SDValue, 4> Regs; 1008 Regs.push_back(Val); 1009 for (unsigned j = 1; j != NumElements; ++j) { 1010 Reg = ArgLocs[ArgIdx++].getLocReg(); 1011 Reg = MF.addLiveIn(Reg, RC); 1012 1013 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1014 Regs.push_back(Copy); 1015 } 1016 1017 // Fill up the missing vector elements 1018 NumElements = Arg.VT.getVectorNumElements() - NumElements; 1019 Regs.append(NumElements, DAG.getUNDEF(VT)); 1020 1021 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs)); 1022 continue; 1023 } 1024 1025 InVals.push_back(Val); 1026 } 1027 1028 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 1029 // these from the dispatch pointer. 1030 1031 // Start adding system SGPRs. 1032 if (Info->hasWorkGroupIDX()) { 1033 unsigned Reg = Info->addWorkGroupIDX(); 1034 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1035 CCInfo.AllocateReg(Reg); 1036 } 1037 1038 if (Info->hasWorkGroupIDY()) { 1039 unsigned Reg = Info->addWorkGroupIDY(); 1040 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1041 CCInfo.AllocateReg(Reg); 1042 } 1043 1044 if (Info->hasWorkGroupIDZ()) { 1045 unsigned Reg = Info->addWorkGroupIDZ(); 1046 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1047 CCInfo.AllocateReg(Reg); 1048 } 1049 1050 if (Info->hasWorkGroupInfo()) { 1051 unsigned Reg = Info->addWorkGroupInfo(); 1052 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1053 CCInfo.AllocateReg(Reg); 1054 } 1055 1056 if (Info->hasPrivateSegmentWaveByteOffset()) { 1057 // Scratch wave offset passed in system SGPR. 1058 unsigned PrivateSegmentWaveByteOffsetReg; 1059 1060 if (AMDGPU::isShader(CallConv)) { 1061 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); 1062 Info->setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); 1063 } else 1064 PrivateSegmentWaveByteOffsetReg = Info->addPrivateSegmentWaveByteOffset(); 1065 1066 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); 1067 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); 1068 } 1069 1070 // Now that we've figured out where the scratch register inputs are, see if 1071 // should reserve the arguments and use them directly. 1072 bool HasStackObjects = MF.getFrameInfo().hasStackObjects(); 1073 // Record that we know we have non-spill stack objects so we don't need to 1074 // check all stack objects later. 1075 if (HasStackObjects) 1076 Info->setHasNonSpillStackObjects(true); 1077 1078 // Everything live out of a block is spilled with fast regalloc, so it's 1079 // almost certain that spilling will be required. 1080 if (getTargetMachine().getOptLevel() == CodeGenOpt::None) 1081 HasStackObjects = true; 1082 1083 if (ST.isAmdCodeObjectV2()) { 1084 if (HasStackObjects) { 1085 // If we have stack objects, we unquestionably need the private buffer 1086 // resource. For the Code Object V2 ABI, this will be the first 4 user 1087 // SGPR inputs. We can reserve those and use them directly. 1088 1089 unsigned PrivateSegmentBufferReg = TRI->getPreloadedValue( 1090 MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); 1091 Info->setScratchRSrcReg(PrivateSegmentBufferReg); 1092 1093 unsigned PrivateSegmentWaveByteOffsetReg = TRI->getPreloadedValue( 1094 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1095 Info->setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg); 1096 } else { 1097 unsigned ReservedBufferReg 1098 = TRI->reservedPrivateSegmentBufferReg(MF); 1099 unsigned ReservedOffsetReg 1100 = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 1101 1102 // We tentatively reserve the last registers (skipping the last two 1103 // which may contain VCC). After register allocation, we'll replace 1104 // these with the ones immediately after those which were really 1105 // allocated. In the prologue copies will be inserted from the argument 1106 // to these reserved registers. 1107 Info->setScratchRSrcReg(ReservedBufferReg); 1108 Info->setScratchWaveOffsetReg(ReservedOffsetReg); 1109 } 1110 } else { 1111 unsigned ReservedBufferReg = TRI->reservedPrivateSegmentBufferReg(MF); 1112 1113 // Without HSA, relocations are used for the scratch pointer and the 1114 // buffer resource setup is always inserted in the prologue. Scratch wave 1115 // offset is still in an input SGPR. 1116 Info->setScratchRSrcReg(ReservedBufferReg); 1117 1118 if (HasStackObjects) { 1119 unsigned ScratchWaveOffsetReg = TRI->getPreloadedValue( 1120 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1121 Info->setScratchWaveOffsetReg(ScratchWaveOffsetReg); 1122 } else { 1123 unsigned ReservedOffsetReg 1124 = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 1125 Info->setScratchWaveOffsetReg(ReservedOffsetReg); 1126 } 1127 } 1128 1129 if (Info->hasWorkItemIDX()) { 1130 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X); 1131 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1132 CCInfo.AllocateReg(Reg); 1133 } 1134 1135 if (Info->hasWorkItemIDY()) { 1136 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y); 1137 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1138 CCInfo.AllocateReg(Reg); 1139 } 1140 1141 if (Info->hasWorkItemIDZ()) { 1142 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z); 1143 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1144 CCInfo.AllocateReg(Reg); 1145 } 1146 1147 if (Chains.empty()) 1148 return Chain; 1149 1150 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 1151 } 1152 1153 SDValue 1154 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1155 bool isVarArg, 1156 const SmallVectorImpl<ISD::OutputArg> &Outs, 1157 const SmallVectorImpl<SDValue> &OutVals, 1158 const SDLoc &DL, SelectionDAG &DAG) const { 1159 MachineFunction &MF = DAG.getMachineFunction(); 1160 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1161 1162 if (!AMDGPU::isShader(CallConv)) 1163 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, 1164 OutVals, DL, DAG); 1165 1166 Info->setIfReturnsVoid(Outs.size() == 0); 1167 1168 SmallVector<ISD::OutputArg, 48> Splits; 1169 SmallVector<SDValue, 48> SplitVals; 1170 1171 // Split vectors into their elements. 1172 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 1173 const ISD::OutputArg &Out = Outs[i]; 1174 1175 if (Out.VT.isVector()) { 1176 MVT VT = Out.VT.getVectorElementType(); 1177 ISD::OutputArg NewOut = Out; 1178 NewOut.Flags.setSplit(); 1179 NewOut.VT = VT; 1180 1181 // We want the original number of vector elements here, e.g. 1182 // three or five, not four or eight. 1183 unsigned NumElements = Out.ArgVT.getVectorNumElements(); 1184 1185 for (unsigned j = 0; j != NumElements; ++j) { 1186 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i], 1187 DAG.getConstant(j, DL, MVT::i32)); 1188 SplitVals.push_back(Elem); 1189 Splits.push_back(NewOut); 1190 NewOut.PartOffset += NewOut.VT.getStoreSize(); 1191 } 1192 } else { 1193 SplitVals.push_back(OutVals[i]); 1194 Splits.push_back(Out); 1195 } 1196 } 1197 1198 // CCValAssign - represent the assignment of the return value to a location. 1199 SmallVector<CCValAssign, 48> RVLocs; 1200 1201 // CCState - Info about the registers and stack slots. 1202 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1203 *DAG.getContext()); 1204 1205 // Analyze outgoing return values. 1206 AnalyzeReturn(CCInfo, Splits); 1207 1208 SDValue Flag; 1209 SmallVector<SDValue, 48> RetOps; 1210 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1211 1212 // Copy the result values into the output registers. 1213 for (unsigned i = 0, realRVLocIdx = 0; 1214 i != RVLocs.size(); 1215 ++i, ++realRVLocIdx) { 1216 CCValAssign &VA = RVLocs[i]; 1217 assert(VA.isRegLoc() && "Can only return in registers!"); 1218 1219 SDValue Arg = SplitVals[realRVLocIdx]; 1220 1221 // Copied from other backends. 1222 switch (VA.getLocInfo()) { 1223 default: llvm_unreachable("Unknown loc info!"); 1224 case CCValAssign::Full: 1225 break; 1226 case CCValAssign::BCvt: 1227 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 1228 break; 1229 } 1230 1231 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 1232 Flag = Chain.getValue(1); 1233 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1234 } 1235 1236 // Update chain and glue. 1237 RetOps[0] = Chain; 1238 if (Flag.getNode()) 1239 RetOps.push_back(Flag); 1240 1241 unsigned Opc = Info->returnsVoid() ? AMDGPUISD::ENDPGM : AMDGPUISD::RETURN; 1242 return DAG.getNode(Opc, DL, MVT::Other, RetOps); 1243 } 1244 1245 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT, 1246 SelectionDAG &DAG) const { 1247 unsigned Reg = StringSwitch<unsigned>(RegName) 1248 .Case("m0", AMDGPU::M0) 1249 .Case("exec", AMDGPU::EXEC) 1250 .Case("exec_lo", AMDGPU::EXEC_LO) 1251 .Case("exec_hi", AMDGPU::EXEC_HI) 1252 .Case("flat_scratch", AMDGPU::FLAT_SCR) 1253 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) 1254 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) 1255 .Default(AMDGPU::NoRegister); 1256 1257 if (Reg == AMDGPU::NoRegister) { 1258 report_fatal_error(Twine("invalid register name \"" 1259 + StringRef(RegName) + "\".")); 1260 1261 } 1262 1263 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 1264 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { 1265 report_fatal_error(Twine("invalid register \"" 1266 + StringRef(RegName) + "\" for subtarget.")); 1267 } 1268 1269 switch (Reg) { 1270 case AMDGPU::M0: 1271 case AMDGPU::EXEC_LO: 1272 case AMDGPU::EXEC_HI: 1273 case AMDGPU::FLAT_SCR_LO: 1274 case AMDGPU::FLAT_SCR_HI: 1275 if (VT.getSizeInBits() == 32) 1276 return Reg; 1277 break; 1278 case AMDGPU::EXEC: 1279 case AMDGPU::FLAT_SCR: 1280 if (VT.getSizeInBits() == 64) 1281 return Reg; 1282 break; 1283 default: 1284 llvm_unreachable("missing register type checking"); 1285 } 1286 1287 report_fatal_error(Twine("invalid type for register \"" 1288 + StringRef(RegName) + "\".")); 1289 } 1290 1291 // If kill is not the last instruction, split the block so kill is always a 1292 // proper terminator. 1293 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI, 1294 MachineBasicBlock *BB) const { 1295 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1296 1297 MachineBasicBlock::iterator SplitPoint(&MI); 1298 ++SplitPoint; 1299 1300 if (SplitPoint == BB->end()) { 1301 // Don't bother with a new block. 1302 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR)); 1303 return BB; 1304 } 1305 1306 MachineFunction *MF = BB->getParent(); 1307 MachineBasicBlock *SplitBB 1308 = MF->CreateMachineBasicBlock(BB->getBasicBlock()); 1309 1310 MF->insert(++MachineFunction::iterator(BB), SplitBB); 1311 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end()); 1312 1313 SplitBB->transferSuccessorsAndUpdatePHIs(BB); 1314 BB->addSuccessor(SplitBB); 1315 1316 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR)); 1317 return SplitBB; 1318 } 1319 1320 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the 1321 // wavefront. If the value is uniform and just happens to be in a VGPR, this 1322 // will only do one iteration. In the worst case, this will loop 64 times. 1323 // 1324 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. 1325 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop( 1326 const SIInstrInfo *TII, 1327 MachineRegisterInfo &MRI, 1328 MachineBasicBlock &OrigBB, 1329 MachineBasicBlock &LoopBB, 1330 const DebugLoc &DL, 1331 const MachineOperand &IdxReg, 1332 unsigned InitReg, 1333 unsigned ResultReg, 1334 unsigned PhiReg, 1335 unsigned InitSaveExecReg, 1336 int Offset, 1337 bool UseGPRIdxMode) { 1338 MachineBasicBlock::iterator I = LoopBB.begin(); 1339 1340 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1341 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1342 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1343 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1344 1345 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) 1346 .addReg(InitReg) 1347 .addMBB(&OrigBB) 1348 .addReg(ResultReg) 1349 .addMBB(&LoopBB); 1350 1351 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) 1352 .addReg(InitSaveExecReg) 1353 .addMBB(&OrigBB) 1354 .addReg(NewExec) 1355 .addMBB(&LoopBB); 1356 1357 // Read the next variant <- also loop target. 1358 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) 1359 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef())); 1360 1361 // Compare the just read M0 value to all possible Idx values. 1362 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) 1363 .addReg(CurrentIdxReg) 1364 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg()); 1365 1366 if (UseGPRIdxMode) { 1367 unsigned IdxReg; 1368 if (Offset == 0) { 1369 IdxReg = CurrentIdxReg; 1370 } else { 1371 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1372 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg) 1373 .addReg(CurrentIdxReg, RegState::Kill) 1374 .addImm(Offset); 1375 } 1376 1377 MachineInstr *SetIdx = 1378 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_IDX)) 1379 .addReg(IdxReg, RegState::Kill); 1380 SetIdx->getOperand(2).setIsUndef(); 1381 } else { 1382 // Move index from VCC into M0 1383 if (Offset == 0) { 1384 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1385 .addReg(CurrentIdxReg, RegState::Kill); 1386 } else { 1387 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 1388 .addReg(CurrentIdxReg, RegState::Kill) 1389 .addImm(Offset); 1390 } 1391 } 1392 1393 // Update EXEC, save the original EXEC value to VCC. 1394 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec) 1395 .addReg(CondReg, RegState::Kill); 1396 1397 MRI.setSimpleHint(NewExec, CondReg); 1398 1399 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 1400 MachineInstr *InsertPt = 1401 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 1402 .addReg(AMDGPU::EXEC) 1403 .addReg(NewExec); 1404 1405 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use 1406 // s_cbranch_scc0? 1407 1408 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. 1409 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 1410 .addMBB(&LoopBB); 1411 1412 return InsertPt->getIterator(); 1413 } 1414 1415 // This has slightly sub-optimal regalloc when the source vector is killed by 1416 // the read. The register allocator does not understand that the kill is 1417 // per-workitem, so is kept alive for the whole loop so we end up not re-using a 1418 // subregister from it, using 1 more VGPR than necessary. This was saved when 1419 // this was expanded after register allocation. 1420 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII, 1421 MachineBasicBlock &MBB, 1422 MachineInstr &MI, 1423 unsigned InitResultReg, 1424 unsigned PhiReg, 1425 int Offset, 1426 bool UseGPRIdxMode) { 1427 MachineFunction *MF = MBB.getParent(); 1428 MachineRegisterInfo &MRI = MF->getRegInfo(); 1429 const DebugLoc &DL = MI.getDebugLoc(); 1430 MachineBasicBlock::iterator I(&MI); 1431 1432 unsigned DstReg = MI.getOperand(0).getReg(); 1433 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1434 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1435 1436 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); 1437 1438 // Save the EXEC mask 1439 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec) 1440 .addReg(AMDGPU::EXEC); 1441 1442 // To insert the loop we need to split the block. Move everything after this 1443 // point to a new block, and insert a new empty block between the two. 1444 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); 1445 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); 1446 MachineFunction::iterator MBBI(MBB); 1447 ++MBBI; 1448 1449 MF->insert(MBBI, LoopBB); 1450 MF->insert(MBBI, RemainderBB); 1451 1452 LoopBB->addSuccessor(LoopBB); 1453 LoopBB->addSuccessor(RemainderBB); 1454 1455 // Move the rest of the block into a new block. 1456 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 1457 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 1458 1459 MBB.addSuccessor(LoopBB); 1460 1461 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1462 1463 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, 1464 InitResultReg, DstReg, PhiReg, TmpExec, 1465 Offset, UseGPRIdxMode); 1466 1467 MachineBasicBlock::iterator First = RemainderBB->begin(); 1468 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 1469 .addReg(SaveExec); 1470 1471 return InsPt; 1472 } 1473 1474 // Returns subreg index, offset 1475 static std::pair<unsigned, int> 1476 computeIndirectRegAndOffset(const SIRegisterInfo &TRI, 1477 const TargetRegisterClass *SuperRC, 1478 unsigned VecReg, 1479 int Offset) { 1480 int NumElts = SuperRC->getSize() / 4; 1481 1482 // Skip out of bounds offsets, or else we would end up using an undefined 1483 // register. 1484 if (Offset >= NumElts || Offset < 0) 1485 return std::make_pair(AMDGPU::sub0, Offset); 1486 1487 return std::make_pair(AMDGPU::sub0 + Offset, 0); 1488 } 1489 1490 // Return true if the index is an SGPR and was set. 1491 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, 1492 MachineRegisterInfo &MRI, 1493 MachineInstr &MI, 1494 int Offset, 1495 bool UseGPRIdxMode, 1496 bool IsIndirectSrc) { 1497 MachineBasicBlock *MBB = MI.getParent(); 1498 const DebugLoc &DL = MI.getDebugLoc(); 1499 MachineBasicBlock::iterator I(&MI); 1500 1501 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1502 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); 1503 1504 assert(Idx->getReg() != AMDGPU::NoRegister); 1505 1506 if (!TII->getRegisterInfo().isSGPRClass(IdxRC)) 1507 return false; 1508 1509 if (UseGPRIdxMode) { 1510 unsigned IdxMode = IsIndirectSrc ? 1511 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE; 1512 if (Offset == 0) { 1513 MachineInstr *SetOn = 1514 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1515 .add(*Idx) 1516 .addImm(IdxMode); 1517 1518 SetOn->getOperand(3).setIsUndef(); 1519 } else { 1520 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 1521 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) 1522 .add(*Idx) 1523 .addImm(Offset); 1524 MachineInstr *SetOn = 1525 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1526 .addReg(Tmp, RegState::Kill) 1527 .addImm(IdxMode); 1528 1529 SetOn->getOperand(3).setIsUndef(); 1530 } 1531 1532 return true; 1533 } 1534 1535 if (Offset == 0) { 1536 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0).add(*Idx); 1537 } else { 1538 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 1539 .add(*Idx) 1540 .addImm(Offset); 1541 } 1542 1543 return true; 1544 } 1545 1546 // Control flow needs to be inserted if indexing with a VGPR. 1547 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, 1548 MachineBasicBlock &MBB, 1549 const SISubtarget &ST) { 1550 const SIInstrInfo *TII = ST.getInstrInfo(); 1551 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1552 MachineFunction *MF = MBB.getParent(); 1553 MachineRegisterInfo &MRI = MF->getRegInfo(); 1554 1555 unsigned Dst = MI.getOperand(0).getReg(); 1556 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); 1557 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 1558 1559 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); 1560 1561 unsigned SubReg; 1562 std::tie(SubReg, Offset) 1563 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); 1564 1565 bool UseGPRIdxMode = ST.hasVGPRIndexMode() && EnableVGPRIndexMode; 1566 1567 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) { 1568 MachineBasicBlock::iterator I(&MI); 1569 const DebugLoc &DL = MI.getDebugLoc(); 1570 1571 if (UseGPRIdxMode) { 1572 // TODO: Look at the uses to avoid the copy. This may require rescheduling 1573 // to avoid interfering with other uses, so probably requires a new 1574 // optimization pass. 1575 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 1576 .addReg(SrcReg, RegState::Undef, SubReg) 1577 .addReg(SrcReg, RegState::Implicit) 1578 .addReg(AMDGPU::M0, RegState::Implicit); 1579 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1580 } else { 1581 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 1582 .addReg(SrcReg, RegState::Undef, SubReg) 1583 .addReg(SrcReg, RegState::Implicit); 1584 } 1585 1586 MI.eraseFromParent(); 1587 1588 return &MBB; 1589 } 1590 1591 const DebugLoc &DL = MI.getDebugLoc(); 1592 MachineBasicBlock::iterator I(&MI); 1593 1594 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1595 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1596 1597 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); 1598 1599 if (UseGPRIdxMode) { 1600 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1601 .addImm(0) // Reset inside loop. 1602 .addImm(VGPRIndexMode::SRC0_ENABLE); 1603 SetOn->getOperand(3).setIsUndef(); 1604 1605 // Disable again after the loop. 1606 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1607 } 1608 1609 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, UseGPRIdxMode); 1610 MachineBasicBlock *LoopBB = InsPt->getParent(); 1611 1612 if (UseGPRIdxMode) { 1613 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 1614 .addReg(SrcReg, RegState::Undef, SubReg) 1615 .addReg(SrcReg, RegState::Implicit) 1616 .addReg(AMDGPU::M0, RegState::Implicit); 1617 } else { 1618 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 1619 .addReg(SrcReg, RegState::Undef, SubReg) 1620 .addReg(SrcReg, RegState::Implicit); 1621 } 1622 1623 MI.eraseFromParent(); 1624 1625 return LoopBB; 1626 } 1627 1628 static unsigned getMOVRELDPseudo(const TargetRegisterClass *VecRC) { 1629 switch (VecRC->getSize()) { 1630 case 4: 1631 return AMDGPU::V_MOVRELD_B32_V1; 1632 case 8: 1633 return AMDGPU::V_MOVRELD_B32_V2; 1634 case 16: 1635 return AMDGPU::V_MOVRELD_B32_V4; 1636 case 32: 1637 return AMDGPU::V_MOVRELD_B32_V8; 1638 case 64: 1639 return AMDGPU::V_MOVRELD_B32_V16; 1640 default: 1641 llvm_unreachable("unsupported size for MOVRELD pseudos"); 1642 } 1643 } 1644 1645 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, 1646 MachineBasicBlock &MBB, 1647 const SISubtarget &ST) { 1648 const SIInstrInfo *TII = ST.getInstrInfo(); 1649 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1650 MachineFunction *MF = MBB.getParent(); 1651 MachineRegisterInfo &MRI = MF->getRegInfo(); 1652 1653 unsigned Dst = MI.getOperand(0).getReg(); 1654 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); 1655 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1656 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); 1657 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 1658 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); 1659 1660 // This can be an immediate, but will be folded later. 1661 assert(Val->getReg()); 1662 1663 unsigned SubReg; 1664 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, 1665 SrcVec->getReg(), 1666 Offset); 1667 bool UseGPRIdxMode = ST.hasVGPRIndexMode() && EnableVGPRIndexMode; 1668 1669 if (Idx->getReg() == AMDGPU::NoRegister) { 1670 MachineBasicBlock::iterator I(&MI); 1671 const DebugLoc &DL = MI.getDebugLoc(); 1672 1673 assert(Offset == 0); 1674 1675 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) 1676 .add(*SrcVec) 1677 .add(*Val) 1678 .addImm(SubReg); 1679 1680 MI.eraseFromParent(); 1681 return &MBB; 1682 } 1683 1684 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) { 1685 MachineBasicBlock::iterator I(&MI); 1686 const DebugLoc &DL = MI.getDebugLoc(); 1687 1688 if (UseGPRIdxMode) { 1689 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 1690 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst 1691 .add(*Val) 1692 .addReg(Dst, RegState::ImplicitDefine) 1693 .addReg(SrcVec->getReg(), RegState::Implicit) 1694 .addReg(AMDGPU::M0, RegState::Implicit); 1695 1696 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1697 } else { 1698 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC)); 1699 1700 BuildMI(MBB, I, DL, MovRelDesc) 1701 .addReg(Dst, RegState::Define) 1702 .addReg(SrcVec->getReg()) 1703 .add(*Val) 1704 .addImm(SubReg - AMDGPU::sub0); 1705 } 1706 1707 MI.eraseFromParent(); 1708 return &MBB; 1709 } 1710 1711 if (Val->isReg()) 1712 MRI.clearKillFlags(Val->getReg()); 1713 1714 const DebugLoc &DL = MI.getDebugLoc(); 1715 1716 if (UseGPRIdxMode) { 1717 MachineBasicBlock::iterator I(&MI); 1718 1719 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1720 .addImm(0) // Reset inside loop. 1721 .addImm(VGPRIndexMode::DST_ENABLE); 1722 SetOn->getOperand(3).setIsUndef(); 1723 1724 // Disable again after the loop. 1725 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1726 } 1727 1728 unsigned PhiReg = MRI.createVirtualRegister(VecRC); 1729 1730 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, 1731 Offset, UseGPRIdxMode); 1732 MachineBasicBlock *LoopBB = InsPt->getParent(); 1733 1734 if (UseGPRIdxMode) { 1735 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 1736 .addReg(PhiReg, RegState::Undef, SubReg) // vdst 1737 .add(*Val) // src0 1738 .addReg(Dst, RegState::ImplicitDefine) 1739 .addReg(PhiReg, RegState::Implicit) 1740 .addReg(AMDGPU::M0, RegState::Implicit); 1741 } else { 1742 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC)); 1743 1744 BuildMI(*LoopBB, InsPt, DL, MovRelDesc) 1745 .addReg(Dst, RegState::Define) 1746 .addReg(PhiReg) 1747 .add(*Val) 1748 .addImm(SubReg - AMDGPU::sub0); 1749 } 1750 1751 MI.eraseFromParent(); 1752 1753 return LoopBB; 1754 } 1755 1756 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( 1757 MachineInstr &MI, MachineBasicBlock *BB) const { 1758 1759 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1760 MachineFunction *MF = BB->getParent(); 1761 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1762 1763 if (TII->isMIMG(MI)) { 1764 if (!MI.memoperands_empty()) 1765 return BB; 1766 // Add a memoperand for mimg instructions so that they aren't assumed to 1767 // be ordered memory instuctions. 1768 1769 MachinePointerInfo PtrInfo(MFI->getImagePSV()); 1770 MachineMemOperand::Flags Flags = MachineMemOperand::MODereferenceable; 1771 if (MI.mayStore()) 1772 Flags |= MachineMemOperand::MOStore; 1773 1774 if (MI.mayLoad()) 1775 Flags |= MachineMemOperand::MOLoad; 1776 1777 auto MMO = MF->getMachineMemOperand(PtrInfo, Flags, 0, 0); 1778 MI.addMemOperand(*MF, MMO); 1779 return BB; 1780 } 1781 1782 switch (MI.getOpcode()) { 1783 case AMDGPU::S_TRAP_PSEUDO: { 1784 DebugLoc DL = MI.getDebugLoc(); 1785 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), AMDGPU::VGPR0) 1786 .addImm(1); 1787 1788 MachineFunction *MF = BB->getParent(); 1789 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1790 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 1791 assert(UserSGPR != AMDGPU::NoRegister); 1792 1793 if (!BB->isLiveIn(UserSGPR)) 1794 BB->addLiveIn(UserSGPR); 1795 1796 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), AMDGPU::SGPR0_SGPR1) 1797 .addReg(UserSGPR); 1798 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_TRAP)).addImm(0x1) 1799 .addReg(AMDGPU::VGPR0, RegState::Implicit) 1800 .addReg(AMDGPU::SGPR0_SGPR1, RegState::Implicit); 1801 1802 MI.eraseFromParent(); 1803 return BB; 1804 } 1805 1806 case AMDGPU::SI_INIT_M0: 1807 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), 1808 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1809 .add(MI.getOperand(0)); 1810 MI.eraseFromParent(); 1811 return BB; 1812 1813 case AMDGPU::GET_GROUPSTATICSIZE: { 1814 DebugLoc DL = MI.getDebugLoc(); 1815 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) 1816 .add(MI.getOperand(0)) 1817 .addImm(MFI->getLDSSize()); 1818 MI.eraseFromParent(); 1819 return BB; 1820 } 1821 case AMDGPU::SI_INDIRECT_SRC_V1: 1822 case AMDGPU::SI_INDIRECT_SRC_V2: 1823 case AMDGPU::SI_INDIRECT_SRC_V4: 1824 case AMDGPU::SI_INDIRECT_SRC_V8: 1825 case AMDGPU::SI_INDIRECT_SRC_V16: 1826 return emitIndirectSrc(MI, *BB, *getSubtarget()); 1827 case AMDGPU::SI_INDIRECT_DST_V1: 1828 case AMDGPU::SI_INDIRECT_DST_V2: 1829 case AMDGPU::SI_INDIRECT_DST_V4: 1830 case AMDGPU::SI_INDIRECT_DST_V8: 1831 case AMDGPU::SI_INDIRECT_DST_V16: 1832 return emitIndirectDst(MI, *BB, *getSubtarget()); 1833 case AMDGPU::SI_KILL: 1834 return splitKillBlock(MI, BB); 1835 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 1836 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 1837 1838 unsigned Dst = MI.getOperand(0).getReg(); 1839 unsigned Src0 = MI.getOperand(1).getReg(); 1840 unsigned Src1 = MI.getOperand(2).getReg(); 1841 const DebugLoc &DL = MI.getDebugLoc(); 1842 unsigned SrcCond = MI.getOperand(3).getReg(); 1843 1844 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1845 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1846 1847 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 1848 .addReg(Src0, 0, AMDGPU::sub0) 1849 .addReg(Src1, 0, AMDGPU::sub0) 1850 .addReg(SrcCond); 1851 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 1852 .addReg(Src0, 0, AMDGPU::sub1) 1853 .addReg(Src1, 0, AMDGPU::sub1) 1854 .addReg(SrcCond); 1855 1856 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) 1857 .addReg(DstLo) 1858 .addImm(AMDGPU::sub0) 1859 .addReg(DstHi) 1860 .addImm(AMDGPU::sub1); 1861 MI.eraseFromParent(); 1862 return BB; 1863 } 1864 case AMDGPU::SI_BR_UNDEF: { 1865 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1866 const DebugLoc &DL = MI.getDebugLoc(); 1867 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) 1868 .add(MI.getOperand(0)); 1869 Br->getOperand(1).setIsUndef(true); // read undef SCC 1870 MI.eraseFromParent(); 1871 return BB; 1872 } 1873 default: 1874 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 1875 } 1876 } 1877 1878 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1879 // This currently forces unfolding various combinations of fsub into fma with 1880 // free fneg'd operands. As long as we have fast FMA (controlled by 1881 // isFMAFasterThanFMulAndFAdd), we should perform these. 1882 1883 // When fma is quarter rate, for f64 where add / sub are at best half rate, 1884 // most of these combines appear to be cycle neutral but save on instruction 1885 // count / code size. 1886 return true; 1887 } 1888 1889 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, 1890 EVT VT) const { 1891 if (!VT.isVector()) { 1892 return MVT::i1; 1893 } 1894 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); 1895 } 1896 1897 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { 1898 // TODO: Should i16 be used always if legal? For now it would force VALU 1899 // shifts. 1900 return (VT == MVT::i16) ? MVT::i16 : MVT::i32; 1901 } 1902 1903 // Answering this is somewhat tricky and depends on the specific device which 1904 // have different rates for fma or all f64 operations. 1905 // 1906 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other 1907 // regardless of which device (although the number of cycles differs between 1908 // devices), so it is always profitable for f64. 1909 // 1910 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable 1911 // only on full rate devices. Normally, we should prefer selecting v_mad_f32 1912 // which we can always do even without fused FP ops since it returns the same 1913 // result as the separate operations and since it is always full 1914 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 1915 // however does not support denormals, so we do report fma as faster if we have 1916 // a fast fma device and require denormals. 1917 // 1918 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 1919 VT = VT.getScalarType(); 1920 1921 switch (VT.getSimpleVT().SimpleTy) { 1922 case MVT::f32: 1923 // This is as fast on some subtargets. However, we always have full rate f32 1924 // mad available which returns the same result as the separate operations 1925 // which we should prefer over fma. We can't use this if we want to support 1926 // denormals, so only report this in these cases. 1927 return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32(); 1928 case MVT::f64: 1929 return true; 1930 case MVT::f16: 1931 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals(); 1932 default: 1933 break; 1934 } 1935 1936 return false; 1937 } 1938 1939 //===----------------------------------------------------------------------===// 1940 // Custom DAG Lowering Operations 1941 //===----------------------------------------------------------------------===// 1942 1943 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 1944 switch (Op.getOpcode()) { 1945 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 1946 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 1947 case ISD::LOAD: { 1948 SDValue Result = LowerLOAD(Op, DAG); 1949 assert((!Result.getNode() || 1950 Result.getNode()->getNumValues() == 2) && 1951 "Load should return a value and a chain"); 1952 return Result; 1953 } 1954 1955 case ISD::FSIN: 1956 case ISD::FCOS: 1957 return LowerTrig(Op, DAG); 1958 case ISD::SELECT: return LowerSELECT(Op, DAG); 1959 case ISD::FDIV: return LowerFDIV(Op, DAG); 1960 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); 1961 case ISD::STORE: return LowerSTORE(Op, DAG); 1962 case ISD::GlobalAddress: { 1963 MachineFunction &MF = DAG.getMachineFunction(); 1964 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1965 return LowerGlobalAddress(MFI, Op, DAG); 1966 } 1967 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 1968 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 1969 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 1970 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); 1971 case ISD::INSERT_VECTOR_ELT: 1972 return lowerINSERT_VECTOR_ELT(Op, DAG); 1973 case ISD::EXTRACT_VECTOR_ELT: 1974 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 1975 case ISD::FP_ROUND: 1976 return lowerFP_ROUND(Op, DAG); 1977 } 1978 return SDValue(); 1979 } 1980 1981 void SITargetLowering::ReplaceNodeResults(SDNode *N, 1982 SmallVectorImpl<SDValue> &Results, 1983 SelectionDAG &DAG) const { 1984 switch (N->getOpcode()) { 1985 case ISD::INSERT_VECTOR_ELT: { 1986 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) 1987 Results.push_back(Res); 1988 return; 1989 } 1990 case ISD::EXTRACT_VECTOR_ELT: { 1991 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) 1992 Results.push_back(Res); 1993 return; 1994 } 1995 default: 1996 break; 1997 } 1998 } 1999 2000 /// \brief Helper function for LowerBRCOND 2001 static SDNode *findUser(SDValue Value, unsigned Opcode) { 2002 2003 SDNode *Parent = Value.getNode(); 2004 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); 2005 I != E; ++I) { 2006 2007 if (I.getUse().get() != Value) 2008 continue; 2009 2010 if (I->getOpcode() == Opcode) 2011 return *I; 2012 } 2013 return nullptr; 2014 } 2015 2016 bool SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { 2017 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 2018 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { 2019 case AMDGPUIntrinsic::amdgcn_if: 2020 case AMDGPUIntrinsic::amdgcn_else: 2021 case AMDGPUIntrinsic::amdgcn_end_cf: 2022 case AMDGPUIntrinsic::amdgcn_loop: 2023 return true; 2024 default: 2025 return false; 2026 } 2027 } 2028 2029 if (Intr->getOpcode() == ISD::INTRINSIC_WO_CHAIN) { 2030 switch (cast<ConstantSDNode>(Intr->getOperand(0))->getZExtValue()) { 2031 case AMDGPUIntrinsic::amdgcn_break: 2032 case AMDGPUIntrinsic::amdgcn_if_break: 2033 case AMDGPUIntrinsic::amdgcn_else_break: 2034 return true; 2035 default: 2036 return false; 2037 } 2038 } 2039 2040 return false; 2041 } 2042 2043 void SITargetLowering::createDebuggerPrologueStackObjects( 2044 MachineFunction &MF) const { 2045 // Create stack objects that are used for emitting debugger prologue. 2046 // 2047 // Debugger prologue writes work group IDs and work item IDs to scratch memory 2048 // at fixed location in the following format: 2049 // offset 0: work group ID x 2050 // offset 4: work group ID y 2051 // offset 8: work group ID z 2052 // offset 16: work item ID x 2053 // offset 20: work item ID y 2054 // offset 24: work item ID z 2055 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2056 int ObjectIdx = 0; 2057 2058 // For each dimension: 2059 for (unsigned i = 0; i < 3; ++i) { 2060 // Create fixed stack object for work group ID. 2061 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true); 2062 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx); 2063 // Create fixed stack object for work item ID. 2064 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true); 2065 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx); 2066 } 2067 } 2068 2069 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { 2070 const Triple &TT = getTargetMachine().getTargetTriple(); 2071 return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS && 2072 AMDGPU::shouldEmitConstantsToTextSection(TT); 2073 } 2074 2075 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { 2076 return (GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || 2077 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) && 2078 !shouldEmitFixup(GV) && 2079 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 2080 } 2081 2082 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { 2083 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); 2084 } 2085 2086 /// This transforms the control flow intrinsics to get the branch destination as 2087 /// last parameter, also switches branch target with BR if the need arise 2088 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, 2089 SelectionDAG &DAG) const { 2090 SDLoc DL(BRCOND); 2091 2092 SDNode *Intr = BRCOND.getOperand(1).getNode(); 2093 SDValue Target = BRCOND.getOperand(2); 2094 SDNode *BR = nullptr; 2095 SDNode *SetCC = nullptr; 2096 2097 if (Intr->getOpcode() == ISD::SETCC) { 2098 // As long as we negate the condition everything is fine 2099 SetCC = Intr; 2100 Intr = SetCC->getOperand(0).getNode(); 2101 2102 } else { 2103 // Get the target from BR if we don't negate the condition 2104 BR = findUser(BRCOND, ISD::BR); 2105 Target = BR->getOperand(1); 2106 } 2107 2108 // FIXME: This changes the types of the intrinsics instead of introducing new 2109 // nodes with the correct types. 2110 // e.g. llvm.amdgcn.loop 2111 2112 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3 2113 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088> 2114 2115 if (!isCFIntrinsic(Intr)) { 2116 // This is a uniform branch so we don't need to legalize. 2117 return BRCOND; 2118 } 2119 2120 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || 2121 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; 2122 2123 assert(!SetCC || 2124 (SetCC->getConstantOperandVal(1) == 1 && 2125 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == 2126 ISD::SETNE)); 2127 2128 // operands of the new intrinsic call 2129 SmallVector<SDValue, 4> Ops; 2130 if (HaveChain) 2131 Ops.push_back(BRCOND.getOperand(0)); 2132 2133 Ops.append(Intr->op_begin() + (HaveChain ? 1 : 0), Intr->op_end()); 2134 Ops.push_back(Target); 2135 2136 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); 2137 2138 // build the new intrinsic call 2139 SDNode *Result = DAG.getNode( 2140 Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL, 2141 DAG.getVTList(Res), Ops).getNode(); 2142 2143 if (!HaveChain) { 2144 SDValue Ops[] = { 2145 SDValue(Result, 0), 2146 BRCOND.getOperand(0) 2147 }; 2148 2149 Result = DAG.getMergeValues(Ops, DL).getNode(); 2150 } 2151 2152 if (BR) { 2153 // Give the branch instruction our target 2154 SDValue Ops[] = { 2155 BR->getOperand(0), 2156 BRCOND.getOperand(2) 2157 }; 2158 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); 2159 DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); 2160 BR = NewBR.getNode(); 2161 } 2162 2163 SDValue Chain = SDValue(Result, Result->getNumValues() - 1); 2164 2165 // Copy the intrinsic results to registers 2166 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { 2167 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); 2168 if (!CopyToReg) 2169 continue; 2170 2171 Chain = DAG.getCopyToReg( 2172 Chain, DL, 2173 CopyToReg->getOperand(1), 2174 SDValue(Result, i - 1), 2175 SDValue()); 2176 2177 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); 2178 } 2179 2180 // Remove the old intrinsic from the chain 2181 DAG.ReplaceAllUsesOfValueWith( 2182 SDValue(Intr, Intr->getNumValues() - 1), 2183 Intr->getOperand(0)); 2184 2185 return Chain; 2186 } 2187 2188 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG, 2189 SDValue Op, 2190 const SDLoc &DL, 2191 EVT VT) const { 2192 return Op.getValueType().bitsLE(VT) ? 2193 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : 2194 DAG.getNode(ISD::FTRUNC, DL, VT, Op); 2195 } 2196 2197 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 2198 assert(Op.getValueType() == MVT::f16 && 2199 "Do not know how to custom lower FP_ROUND for non-f16 type"); 2200 2201 SDValue Src = Op.getOperand(0); 2202 EVT SrcVT = Src.getValueType(); 2203 if (SrcVT != MVT::f64) 2204 return Op; 2205 2206 SDLoc DL(Op); 2207 2208 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); 2209 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); 2210 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);; 2211 } 2212 2213 SDValue SITargetLowering::getSegmentAperture(unsigned AS, 2214 SelectionDAG &DAG) const { 2215 SDLoc SL; 2216 MachineFunction &MF = DAG.getMachineFunction(); 2217 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2218 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 2219 assert(UserSGPR != AMDGPU::NoRegister); 2220 2221 SDValue QueuePtr = CreateLiveInRegister( 2222 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 2223 2224 // Offset into amd_queue_t for group_segment_aperture_base_hi / 2225 // private_segment_aperture_base_hi. 2226 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44; 2227 2228 SDValue Ptr = DAG.getNode(ISD::ADD, SL, MVT::i64, QueuePtr, 2229 DAG.getConstant(StructOffset, SL, MVT::i64)); 2230 2231 // TODO: Use custom target PseudoSourceValue. 2232 // TODO: We should use the value from the IR intrinsic call, but it might not 2233 // be available and how do we get it? 2234 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()), 2235 AMDGPUAS::CONSTANT_ADDRESS)); 2236 2237 MachinePointerInfo PtrInfo(V, StructOffset); 2238 return DAG.getLoad(MVT::i32, SL, QueuePtr.getValue(1), Ptr, PtrInfo, 2239 MinAlign(64, StructOffset), 2240 MachineMemOperand::MODereferenceable | 2241 MachineMemOperand::MOInvariant); 2242 } 2243 2244 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, 2245 SelectionDAG &DAG) const { 2246 SDLoc SL(Op); 2247 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); 2248 2249 SDValue Src = ASC->getOperand(0); 2250 2251 // FIXME: Really support non-0 null pointers. 2252 SDValue SegmentNullPtr = DAG.getConstant(-1, SL, MVT::i32); 2253 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); 2254 2255 // flat -> local/private 2256 if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 2257 if (ASC->getDestAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 2258 ASC->getDestAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { 2259 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); 2260 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); 2261 2262 return DAG.getNode(ISD::SELECT, SL, MVT::i32, 2263 NonNull, Ptr, SegmentNullPtr); 2264 } 2265 } 2266 2267 // local/private -> flat 2268 if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 2269 if (ASC->getSrcAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 2270 ASC->getSrcAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { 2271 SDValue NonNull 2272 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); 2273 2274 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), DAG); 2275 SDValue CvtPtr 2276 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); 2277 2278 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, 2279 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), 2280 FlatNullPtr); 2281 } 2282 } 2283 2284 // global <-> flat are no-ops and never emitted. 2285 2286 const MachineFunction &MF = DAG.getMachineFunction(); 2287 DiagnosticInfoUnsupported InvalidAddrSpaceCast( 2288 *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); 2289 DAG.getContext()->diagnose(InvalidAddrSpaceCast); 2290 2291 return DAG.getUNDEF(ASC->getValueType(0)); 2292 } 2293 2294 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 2295 SelectionDAG &DAG) const { 2296 SDValue Idx = Op.getOperand(2); 2297 if (isa<ConstantSDNode>(Idx)) 2298 return SDValue(); 2299 2300 // Avoid stack access for dynamic indexing. 2301 SDLoc SL(Op); 2302 SDValue Vec = Op.getOperand(0); 2303 SDValue Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Op.getOperand(1)); 2304 2305 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec 2306 SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Val); 2307 2308 // Convert vector index to bit-index. 2309 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, 2310 DAG.getConstant(16, SL, MVT::i32)); 2311 2312 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 2313 2314 SDValue BFM = DAG.getNode(ISD::SHL, SL, MVT::i32, 2315 DAG.getConstant(0xffff, SL, MVT::i32), 2316 ScaledIdx); 2317 2318 SDValue LHS = DAG.getNode(ISD::AND, SL, MVT::i32, BFM, ExtVal); 2319 SDValue RHS = DAG.getNode(ISD::AND, SL, MVT::i32, 2320 DAG.getNOT(SL, BFM, MVT::i32), BCVec); 2321 2322 SDValue BFI = DAG.getNode(ISD::OR, SL, MVT::i32, LHS, RHS); 2323 return DAG.getNode(ISD::BITCAST, SL, Op.getValueType(), BFI); 2324 } 2325 2326 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 2327 SelectionDAG &DAG) const { 2328 SDLoc SL(Op); 2329 2330 EVT ResultVT = Op.getValueType(); 2331 SDValue Vec = Op.getOperand(0); 2332 SDValue Idx = Op.getOperand(1); 2333 2334 if (const ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) { 2335 SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 2336 2337 if (CIdx->getZExtValue() == 1) { 2338 Result = DAG.getNode(ISD::SRL, SL, MVT::i32, Result, 2339 DAG.getConstant(16, SL, MVT::i32)); 2340 } else { 2341 assert(CIdx->getZExtValue() == 0); 2342 } 2343 2344 if (ResultVT.bitsLT(MVT::i32)) 2345 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result); 2346 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 2347 } 2348 2349 SDValue Sixteen = DAG.getConstant(16, SL, MVT::i32); 2350 2351 // Convert vector index to bit-index. 2352 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, Sixteen); 2353 2354 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 2355 SDValue Elt = DAG.getNode(ISD::SRL, SL, MVT::i32, BC, ScaledIdx); 2356 2357 SDValue Result = Elt; 2358 if (ResultVT.bitsLT(MVT::i32)) 2359 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result); 2360 2361 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 2362 } 2363 2364 bool 2365 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 2366 // We can fold offsets for anything that doesn't require a GOT relocation. 2367 return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || 2368 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) && 2369 !shouldEmitGOTReloc(GA->getGlobal()); 2370 } 2371 2372 static SDValue 2373 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, 2374 const SDLoc &DL, unsigned Offset, EVT PtrVT, 2375 unsigned GAFlags = SIInstrInfo::MO_NONE) { 2376 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is 2377 // lowered to the following code sequence: 2378 // 2379 // For constant address space: 2380 // s_getpc_b64 s[0:1] 2381 // s_add_u32 s0, s0, $symbol 2382 // s_addc_u32 s1, s1, 0 2383 // 2384 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 2385 // a fixup or relocation is emitted to replace $symbol with a literal 2386 // constant, which is a pc-relative offset from the encoding of the $symbol 2387 // operand to the global variable. 2388 // 2389 // For global address space: 2390 // s_getpc_b64 s[0:1] 2391 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo 2392 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi 2393 // 2394 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 2395 // fixups or relocations are emitted to replace $symbol@*@lo and 2396 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, 2397 // which is a 64-bit pc-relative offset from the encoding of the $symbol 2398 // operand to the global variable. 2399 // 2400 // What we want here is an offset from the value returned by s_getpc 2401 // (which is the address of the s_add_u32 instruction) to the global 2402 // variable, but since the encoding of $symbol starts 4 bytes after the start 2403 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too 2404 // small. This requires us to add 4 to the global variable offset in order to 2405 // compute the correct address. 2406 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 2407 GAFlags); 2408 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 2409 GAFlags == SIInstrInfo::MO_NONE ? 2410 GAFlags : GAFlags + 1); 2411 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); 2412 } 2413 2414 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 2415 SDValue Op, 2416 SelectionDAG &DAG) const { 2417 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 2418 2419 if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS && 2420 GSD->getAddressSpace() != AMDGPUAS::GLOBAL_ADDRESS) 2421 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 2422 2423 SDLoc DL(GSD); 2424 const GlobalValue *GV = GSD->getGlobal(); 2425 EVT PtrVT = Op.getValueType(); 2426 2427 if (shouldEmitFixup(GV)) 2428 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); 2429 else if (shouldEmitPCReloc(GV)) 2430 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, 2431 SIInstrInfo::MO_REL32); 2432 2433 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, 2434 SIInstrInfo::MO_GOTPCREL32); 2435 2436 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); 2437 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 2438 const DataLayout &DataLayout = DAG.getDataLayout(); 2439 unsigned Align = DataLayout.getABITypeAlignment(PtrTy); 2440 // FIXME: Use a PseudoSourceValue once those can be assigned an address space. 2441 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 2442 2443 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align, 2444 MachineMemOperand::MODereferenceable | 2445 MachineMemOperand::MOInvariant); 2446 } 2447 2448 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, 2449 const SDLoc &DL, SDValue V) const { 2450 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as 2451 // the destination register. 2452 // 2453 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, 2454 // so we will end up with redundant moves to m0. 2455 // 2456 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. 2457 2458 // A Null SDValue creates a glue result. 2459 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, 2460 V, Chain); 2461 return SDValue(M0, 0); 2462 } 2463 2464 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, 2465 SDValue Op, 2466 MVT VT, 2467 unsigned Offset) const { 2468 SDLoc SL(Op); 2469 SDValue Param = LowerParameter(DAG, MVT::i32, MVT::i32, SL, 2470 DAG.getEntryNode(), Offset, false); 2471 // The local size values will have the hi 16-bits as zero. 2472 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, 2473 DAG.getValueType(VT)); 2474 } 2475 2476 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 2477 EVT VT) { 2478 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), 2479 "non-hsa intrinsic with hsa target", 2480 DL.getDebugLoc()); 2481 DAG.getContext()->diagnose(BadIntrin); 2482 return DAG.getUNDEF(VT); 2483 } 2484 2485 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 2486 EVT VT) { 2487 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), 2488 "intrinsic not supported on subtarget", 2489 DL.getDebugLoc()); 2490 DAG.getContext()->diagnose(BadIntrin); 2491 return DAG.getUNDEF(VT); 2492 } 2493 2494 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 2495 SelectionDAG &DAG) const { 2496 MachineFunction &MF = DAG.getMachineFunction(); 2497 auto MFI = MF.getInfo<SIMachineFunctionInfo>(); 2498 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2499 2500 EVT VT = Op.getValueType(); 2501 SDLoc DL(Op); 2502 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2503 2504 // TODO: Should this propagate fast-math-flags? 2505 2506 switch (IntrinsicID) { 2507 case Intrinsic::amdgcn_dispatch_ptr: 2508 case Intrinsic::amdgcn_queue_ptr: { 2509 if (!Subtarget->isAmdCodeObjectV2()) { 2510 DiagnosticInfoUnsupported BadIntrin( 2511 *MF.getFunction(), "unsupported hsa intrinsic without hsa target", 2512 DL.getDebugLoc()); 2513 DAG.getContext()->diagnose(BadIntrin); 2514 return DAG.getUNDEF(VT); 2515 } 2516 2517 auto Reg = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? 2518 SIRegisterInfo::DISPATCH_PTR : SIRegisterInfo::QUEUE_PTR; 2519 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, 2520 TRI->getPreloadedValue(MF, Reg), VT); 2521 } 2522 case Intrinsic::amdgcn_implicitarg_ptr: { 2523 unsigned offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT); 2524 return LowerParameterPtr(DAG, DL, DAG.getEntryNode(), offset); 2525 } 2526 case Intrinsic::amdgcn_kernarg_segment_ptr: { 2527 unsigned Reg 2528 = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 2529 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 2530 } 2531 case Intrinsic::amdgcn_dispatch_id: { 2532 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::DISPATCH_ID); 2533 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 2534 } 2535 case Intrinsic::amdgcn_rcp: 2536 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); 2537 case Intrinsic::amdgcn_rsq: 2538 case AMDGPUIntrinsic::AMDGPU_rsq: // Legacy name 2539 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 2540 case Intrinsic::amdgcn_rsq_legacy: 2541 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 2542 return emitRemovedIntrinsicError(DAG, DL, VT); 2543 2544 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); 2545 case Intrinsic::amdgcn_rcp_legacy: 2546 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 2547 return emitRemovedIntrinsicError(DAG, DL, VT); 2548 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); 2549 case Intrinsic::amdgcn_rsq_clamp: { 2550 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 2551 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 2552 2553 Type *Type = VT.getTypeForEVT(*DAG.getContext()); 2554 APFloat Max = APFloat::getLargest(Type->getFltSemantics()); 2555 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); 2556 2557 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 2558 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, 2559 DAG.getConstantFP(Max, DL, VT)); 2560 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, 2561 DAG.getConstantFP(Min, DL, VT)); 2562 } 2563 case Intrinsic::r600_read_ngroups_x: 2564 if (Subtarget->isAmdHsaOS()) 2565 return emitNonHSAIntrinsicError(DAG, DL, VT); 2566 2567 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2568 SI::KernelInputOffsets::NGROUPS_X, false); 2569 case Intrinsic::r600_read_ngroups_y: 2570 if (Subtarget->isAmdHsaOS()) 2571 return emitNonHSAIntrinsicError(DAG, DL, VT); 2572 2573 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2574 SI::KernelInputOffsets::NGROUPS_Y, false); 2575 case Intrinsic::r600_read_ngroups_z: 2576 if (Subtarget->isAmdHsaOS()) 2577 return emitNonHSAIntrinsicError(DAG, DL, VT); 2578 2579 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2580 SI::KernelInputOffsets::NGROUPS_Z, false); 2581 case Intrinsic::r600_read_global_size_x: 2582 if (Subtarget->isAmdHsaOS()) 2583 return emitNonHSAIntrinsicError(DAG, DL, VT); 2584 2585 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2586 SI::KernelInputOffsets::GLOBAL_SIZE_X, false); 2587 case Intrinsic::r600_read_global_size_y: 2588 if (Subtarget->isAmdHsaOS()) 2589 return emitNonHSAIntrinsicError(DAG, DL, VT); 2590 2591 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2592 SI::KernelInputOffsets::GLOBAL_SIZE_Y, false); 2593 case Intrinsic::r600_read_global_size_z: 2594 if (Subtarget->isAmdHsaOS()) 2595 return emitNonHSAIntrinsicError(DAG, DL, VT); 2596 2597 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2598 SI::KernelInputOffsets::GLOBAL_SIZE_Z, false); 2599 case Intrinsic::r600_read_local_size_x: 2600 if (Subtarget->isAmdHsaOS()) 2601 return emitNonHSAIntrinsicError(DAG, DL, VT); 2602 2603 return lowerImplicitZextParam(DAG, Op, MVT::i16, 2604 SI::KernelInputOffsets::LOCAL_SIZE_X); 2605 case Intrinsic::r600_read_local_size_y: 2606 if (Subtarget->isAmdHsaOS()) 2607 return emitNonHSAIntrinsicError(DAG, DL, VT); 2608 2609 return lowerImplicitZextParam(DAG, Op, MVT::i16, 2610 SI::KernelInputOffsets::LOCAL_SIZE_Y); 2611 case Intrinsic::r600_read_local_size_z: 2612 if (Subtarget->isAmdHsaOS()) 2613 return emitNonHSAIntrinsicError(DAG, DL, VT); 2614 2615 return lowerImplicitZextParam(DAG, Op, MVT::i16, 2616 SI::KernelInputOffsets::LOCAL_SIZE_Z); 2617 case Intrinsic::amdgcn_workgroup_id_x: 2618 case Intrinsic::r600_read_tgid_x: 2619 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, 2620 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_X), VT); 2621 case Intrinsic::amdgcn_workgroup_id_y: 2622 case Intrinsic::r600_read_tgid_y: 2623 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, 2624 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Y), VT); 2625 case Intrinsic::amdgcn_workgroup_id_z: 2626 case Intrinsic::r600_read_tgid_z: 2627 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, 2628 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Z), VT); 2629 case Intrinsic::amdgcn_workitem_id_x: 2630 case Intrinsic::r600_read_tidig_x: 2631 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 2632 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X), VT); 2633 case Intrinsic::amdgcn_workitem_id_y: 2634 case Intrinsic::r600_read_tidig_y: 2635 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 2636 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y), VT); 2637 case Intrinsic::amdgcn_workitem_id_z: 2638 case Intrinsic::r600_read_tidig_z: 2639 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 2640 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z), VT); 2641 case AMDGPUIntrinsic::SI_load_const: { 2642 SDValue Ops[] = { 2643 Op.getOperand(1), 2644 Op.getOperand(2) 2645 }; 2646 2647 MachineMemOperand *MMO = MF.getMachineMemOperand( 2648 MachinePointerInfo(), 2649 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 2650 MachineMemOperand::MOInvariant, 2651 VT.getStoreSize(), 4); 2652 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL, 2653 Op->getVTList(), Ops, VT, MMO); 2654 } 2655 case AMDGPUIntrinsic::amdgcn_fdiv_fast: 2656 return lowerFDIV_FAST(Op, DAG); 2657 case AMDGPUIntrinsic::SI_vs_load_input: 2658 return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT, 2659 Op.getOperand(1), 2660 Op.getOperand(2), 2661 Op.getOperand(3)); 2662 2663 case AMDGPUIntrinsic::SI_fs_constant: { 2664 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); 2665 SDValue Glue = M0.getValue(1); 2666 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, 2667 DAG.getConstant(2, DL, MVT::i32), // P0 2668 Op.getOperand(1), Op.getOperand(2), Glue); 2669 } 2670 case AMDGPUIntrinsic::SI_packf16: 2671 if (Op.getOperand(1).isUndef() && Op.getOperand(2).isUndef()) 2672 return DAG.getUNDEF(MVT::i32); 2673 return Op; 2674 case AMDGPUIntrinsic::SI_fs_interp: { 2675 SDValue IJ = Op.getOperand(4); 2676 SDValue I = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, 2677 DAG.getConstant(0, DL, MVT::i32)); 2678 SDValue J = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, 2679 DAG.getConstant(1, DL, MVT::i32)); 2680 I = DAG.getNode(ISD::BITCAST, DL, MVT::f32, I); 2681 J = DAG.getNode(ISD::BITCAST, DL, MVT::f32, J); 2682 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); 2683 SDValue Glue = M0.getValue(1); 2684 SDValue P1 = DAG.getNode(AMDGPUISD::INTERP_P1, DL, 2685 DAG.getVTList(MVT::f32, MVT::Glue), 2686 I, Op.getOperand(1), Op.getOperand(2), Glue); 2687 Glue = SDValue(P1.getNode(), 1); 2688 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, P1, J, 2689 Op.getOperand(1), Op.getOperand(2), Glue); 2690 } 2691 case Intrinsic::amdgcn_interp_mov: { 2692 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 2693 SDValue Glue = M0.getValue(1); 2694 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1), 2695 Op.getOperand(2), Op.getOperand(3), Glue); 2696 } 2697 case Intrinsic::amdgcn_interp_p1: { 2698 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 2699 SDValue Glue = M0.getValue(1); 2700 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), 2701 Op.getOperand(2), Op.getOperand(3), Glue); 2702 } 2703 case Intrinsic::amdgcn_interp_p2: { 2704 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 2705 SDValue Glue = SDValue(M0.getNode(), 1); 2706 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), 2707 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), 2708 Glue); 2709 } 2710 case Intrinsic::amdgcn_sin: 2711 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); 2712 2713 case Intrinsic::amdgcn_cos: 2714 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); 2715 2716 case Intrinsic::amdgcn_log_clamp: { 2717 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 2718 return SDValue(); 2719 2720 DiagnosticInfoUnsupported BadIntrin( 2721 *MF.getFunction(), "intrinsic not supported on subtarget", 2722 DL.getDebugLoc()); 2723 DAG.getContext()->diagnose(BadIntrin); 2724 return DAG.getUNDEF(VT); 2725 } 2726 case Intrinsic::amdgcn_ldexp: 2727 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, 2728 Op.getOperand(1), Op.getOperand(2)); 2729 2730 case Intrinsic::amdgcn_fract: 2731 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); 2732 2733 case Intrinsic::amdgcn_class: 2734 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, 2735 Op.getOperand(1), Op.getOperand(2)); 2736 case Intrinsic::amdgcn_div_fmas: 2737 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, 2738 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 2739 Op.getOperand(4)); 2740 2741 case Intrinsic::amdgcn_div_fixup: 2742 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, 2743 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 2744 2745 case Intrinsic::amdgcn_trig_preop: 2746 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, 2747 Op.getOperand(1), Op.getOperand(2)); 2748 case Intrinsic::amdgcn_div_scale: { 2749 // 3rd parameter required to be a constant. 2750 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 2751 if (!Param) 2752 return DAG.getUNDEF(VT); 2753 2754 // Translate to the operands expected by the machine instruction. The 2755 // first parameter must be the same as the first instruction. 2756 SDValue Numerator = Op.getOperand(1); 2757 SDValue Denominator = Op.getOperand(2); 2758 2759 // Note this order is opposite of the machine instruction's operations, 2760 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The 2761 // intrinsic has the numerator as the first operand to match a normal 2762 // division operation. 2763 2764 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; 2765 2766 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, 2767 Denominator, Numerator); 2768 } 2769 case Intrinsic::amdgcn_icmp: { 2770 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 2771 int CondCode = CD->getSExtValue(); 2772 2773 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || 2774 CondCode >= ICmpInst::Predicate::BAD_ICMP_PREDICATE) 2775 return DAG.getUNDEF(VT); 2776 2777 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); 2778 ISD::CondCode CCOpcode = getICmpCondCode(IcInput); 2779 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 2780 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 2781 } 2782 case Intrinsic::amdgcn_fcmp: { 2783 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 2784 int CondCode = CD->getSExtValue(); 2785 2786 if (CondCode <= FCmpInst::Predicate::FCMP_FALSE || 2787 CondCode >= FCmpInst::Predicate::FCMP_TRUE) 2788 return DAG.getUNDEF(VT); 2789 2790 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); 2791 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); 2792 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 2793 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 2794 } 2795 case Intrinsic::amdgcn_fmul_legacy: 2796 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, 2797 Op.getOperand(1), Op.getOperand(2)); 2798 case Intrinsic::amdgcn_sffbh: 2799 case AMDGPUIntrinsic::AMDGPU_flbit_i32: // Legacy name. 2800 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); 2801 default: 2802 return AMDGPUTargetLowering::LowerOperation(Op, DAG); 2803 } 2804 } 2805 2806 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 2807 SelectionDAG &DAG) const { 2808 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2809 SDLoc DL(Op); 2810 switch (IntrID) { 2811 case Intrinsic::amdgcn_atomic_inc: 2812 case Intrinsic::amdgcn_atomic_dec: { 2813 MemSDNode *M = cast<MemSDNode>(Op); 2814 unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ? 2815 AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC; 2816 SDValue Ops[] = { 2817 M->getOperand(0), // Chain 2818 M->getOperand(2), // Ptr 2819 M->getOperand(3) // Value 2820 }; 2821 2822 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, 2823 M->getMemoryVT(), M->getMemOperand()); 2824 } 2825 case Intrinsic::amdgcn_buffer_load: 2826 case Intrinsic::amdgcn_buffer_load_format: { 2827 SDValue Ops[] = { 2828 Op.getOperand(0), // Chain 2829 Op.getOperand(2), // rsrc 2830 Op.getOperand(3), // vindex 2831 Op.getOperand(4), // offset 2832 Op.getOperand(5), // glc 2833 Op.getOperand(6) // slc 2834 }; 2835 MachineFunction &MF = DAG.getMachineFunction(); 2836 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 2837 2838 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? 2839 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; 2840 EVT VT = Op.getValueType(); 2841 EVT IntVT = VT.changeTypeToInteger(); 2842 2843 MachineMemOperand *MMO = MF.getMachineMemOperand( 2844 MachinePointerInfo(MFI->getBufferPSV()), 2845 MachineMemOperand::MOLoad, 2846 VT.getStoreSize(), VT.getStoreSize()); 2847 2848 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, MMO); 2849 } 2850 default: 2851 return SDValue(); 2852 } 2853 } 2854 2855 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, 2856 SelectionDAG &DAG) const { 2857 MachineFunction &MF = DAG.getMachineFunction(); 2858 SDLoc DL(Op); 2859 SDValue Chain = Op.getOperand(0); 2860 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2861 2862 switch (IntrinsicID) { 2863 case Intrinsic::amdgcn_exp: { 2864 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 2865 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 2866 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8)); 2867 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9)); 2868 2869 const SDValue Ops[] = { 2870 Chain, 2871 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 2872 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 2873 Op.getOperand(4), // src0 2874 Op.getOperand(5), // src1 2875 Op.getOperand(6), // src2 2876 Op.getOperand(7), // src3 2877 DAG.getTargetConstant(0, DL, MVT::i1), // compr 2878 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 2879 }; 2880 2881 unsigned Opc = Done->isNullValue() ? 2882 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 2883 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 2884 } 2885 case Intrinsic::amdgcn_exp_compr: { 2886 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 2887 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 2888 SDValue Src0 = Op.getOperand(4); 2889 SDValue Src1 = Op.getOperand(5); 2890 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); 2891 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7)); 2892 2893 SDValue Undef = DAG.getUNDEF(MVT::f32); 2894 const SDValue Ops[] = { 2895 Chain, 2896 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 2897 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 2898 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), 2899 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), 2900 Undef, // src2 2901 Undef, // src3 2902 DAG.getTargetConstant(1, DL, MVT::i1), // compr 2903 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 2904 }; 2905 2906 unsigned Opc = Done->isNullValue() ? 2907 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 2908 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 2909 } 2910 case Intrinsic::amdgcn_s_sendmsg: 2911 case AMDGPUIntrinsic::SI_sendmsg: { 2912 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 2913 SDValue Glue = Chain.getValue(1); 2914 return DAG.getNode(AMDGPUISD::SENDMSG, DL, MVT::Other, Chain, 2915 Op.getOperand(2), Glue); 2916 } 2917 case Intrinsic::amdgcn_s_sendmsghalt: { 2918 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 2919 SDValue Glue = Chain.getValue(1); 2920 return DAG.getNode(AMDGPUISD::SENDMSGHALT, DL, MVT::Other, Chain, 2921 Op.getOperand(2), Glue); 2922 } 2923 case AMDGPUIntrinsic::SI_tbuffer_store: { 2924 SDValue Ops[] = { 2925 Chain, 2926 Op.getOperand(2), 2927 Op.getOperand(3), 2928 Op.getOperand(4), 2929 Op.getOperand(5), 2930 Op.getOperand(6), 2931 Op.getOperand(7), 2932 Op.getOperand(8), 2933 Op.getOperand(9), 2934 Op.getOperand(10), 2935 Op.getOperand(11), 2936 Op.getOperand(12), 2937 Op.getOperand(13), 2938 Op.getOperand(14) 2939 }; 2940 2941 EVT VT = Op.getOperand(3).getValueType(); 2942 2943 MachineMemOperand *MMO = MF.getMachineMemOperand( 2944 MachinePointerInfo(), 2945 MachineMemOperand::MOStore, 2946 VT.getStoreSize(), 4); 2947 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL, 2948 Op->getVTList(), Ops, VT, MMO); 2949 } 2950 case AMDGPUIntrinsic::AMDGPU_kill: { 2951 SDValue Src = Op.getOperand(2); 2952 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) { 2953 if (!K->isNegative()) 2954 return Chain; 2955 2956 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32); 2957 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne); 2958 } 2959 2960 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src); 2961 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast); 2962 } 2963 case AMDGPUIntrinsic::SI_export: { // Legacy intrinsic. 2964 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(2)); 2965 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(3)); 2966 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(4)); 2967 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(5)); 2968 const ConstantSDNode *Compr = cast<ConstantSDNode>(Op.getOperand(6)); 2969 2970 const SDValue Ops[] = { 2971 Chain, 2972 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), 2973 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), 2974 Op.getOperand(7), // src0 2975 Op.getOperand(8), // src1 2976 Op.getOperand(9), // src2 2977 Op.getOperand(10), // src3 2978 DAG.getTargetConstant(Compr->getZExtValue(), DL, MVT::i1), 2979 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 2980 }; 2981 2982 unsigned Opc = Done->isNullValue() ? 2983 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 2984 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 2985 } 2986 default: 2987 return SDValue(); 2988 } 2989 } 2990 2991 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 2992 SDLoc DL(Op); 2993 LoadSDNode *Load = cast<LoadSDNode>(Op); 2994 ISD::LoadExtType ExtType = Load->getExtensionType(); 2995 EVT MemVT = Load->getMemoryVT(); 2996 2997 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { 2998 // FIXME: Copied from PPC 2999 // First, load into 32 bits, then truncate to 1 bit. 3000 3001 SDValue Chain = Load->getChain(); 3002 SDValue BasePtr = Load->getBasePtr(); 3003 MachineMemOperand *MMO = Load->getMemOperand(); 3004 3005 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; 3006 3007 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 3008 BasePtr, RealMemVT, MMO); 3009 3010 SDValue Ops[] = { 3011 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), 3012 NewLD.getValue(1) 3013 }; 3014 3015 return DAG.getMergeValues(Ops, DL); 3016 } 3017 3018 if (!MemVT.isVector()) 3019 return SDValue(); 3020 3021 assert(Op.getValueType().getVectorElementType() == MVT::i32 && 3022 "Custom lowering for non-i32 vectors hasn't been implemented."); 3023 3024 unsigned AS = Load->getAddressSpace(); 3025 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, 3026 AS, Load->getAlignment())) { 3027 SDValue Ops[2]; 3028 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); 3029 return DAG.getMergeValues(Ops, DL); 3030 } 3031 3032 MachineFunction &MF = DAG.getMachineFunction(); 3033 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 3034 // If there is a possibilty that flat instruction access scratch memory 3035 // then we need to use the same legalization rules we use for private. 3036 if (AS == AMDGPUAS::FLAT_ADDRESS) 3037 AS = MFI->hasFlatScratchInit() ? 3038 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; 3039 3040 unsigned NumElements = MemVT.getVectorNumElements(); 3041 switch (AS) { 3042 case AMDGPUAS::CONSTANT_ADDRESS: 3043 if (isMemOpUniform(Load)) 3044 return SDValue(); 3045 // Non-uniform loads will be selected to MUBUF instructions, so they 3046 // have the same legalization requirements as global and private 3047 // loads. 3048 // 3049 LLVM_FALLTHROUGH; 3050 case AMDGPUAS::GLOBAL_ADDRESS: 3051 if (Subtarget->getScalarizeGlobalBehavior() && isMemOpUniform(Load) && 3052 isMemOpHasNoClobberedMemOperand(Load)) 3053 return SDValue(); 3054 // Non-uniform loads will be selected to MUBUF instructions, so they 3055 // have the same legalization requirements as global and private 3056 // loads. 3057 // 3058 LLVM_FALLTHROUGH; 3059 case AMDGPUAS::FLAT_ADDRESS: 3060 if (NumElements > 4) 3061 return SplitVectorLoad(Op, DAG); 3062 // v4 loads are supported for private and global memory. 3063 return SDValue(); 3064 case AMDGPUAS::PRIVATE_ADDRESS: 3065 // Depending on the setting of the private_element_size field in the 3066 // resource descriptor, we can only make private accesses up to a certain 3067 // size. 3068 switch (Subtarget->getMaxPrivateElementSize()) { 3069 case 4: 3070 return scalarizeVectorLoad(Load, DAG); 3071 case 8: 3072 if (NumElements > 2) 3073 return SplitVectorLoad(Op, DAG); 3074 return SDValue(); 3075 case 16: 3076 // Same as global/flat 3077 if (NumElements > 4) 3078 return SplitVectorLoad(Op, DAG); 3079 return SDValue(); 3080 default: 3081 llvm_unreachable("unsupported private_element_size"); 3082 } 3083 case AMDGPUAS::LOCAL_ADDRESS: 3084 if (NumElements > 2) 3085 return SplitVectorLoad(Op, DAG); 3086 3087 if (NumElements == 2) 3088 return SDValue(); 3089 3090 // If properly aligned, if we split we might be able to use ds_read_b64. 3091 return SplitVectorLoad(Op, DAG); 3092 default: 3093 return SDValue(); 3094 } 3095 } 3096 3097 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 3098 if (Op.getValueType() != MVT::i64) 3099 return SDValue(); 3100 3101 SDLoc DL(Op); 3102 SDValue Cond = Op.getOperand(0); 3103 3104 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 3105 SDValue One = DAG.getConstant(1, DL, MVT::i32); 3106 3107 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); 3108 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); 3109 3110 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); 3111 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); 3112 3113 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); 3114 3115 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); 3116 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); 3117 3118 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); 3119 3120 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); 3121 return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res); 3122 } 3123 3124 // Catch division cases where we can use shortcuts with rcp and rsq 3125 // instructions. 3126 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, 3127 SelectionDAG &DAG) const { 3128 SDLoc SL(Op); 3129 SDValue LHS = Op.getOperand(0); 3130 SDValue RHS = Op.getOperand(1); 3131 EVT VT = Op.getValueType(); 3132 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath; 3133 3134 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { 3135 if (Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals()) || 3136 VT == MVT::f16) { 3137 if (CLHS->isExactlyValue(1.0)) { 3138 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 3139 // the CI documentation has a worst case error of 1 ulp. 3140 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 3141 // use it as long as we aren't trying to use denormals. 3142 // 3143 // v_rcp_f16 and v_rsq_f16 DO support denormals. 3144 3145 // 1.0 / sqrt(x) -> rsq(x) 3146 3147 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP 3148 // error seems really high at 2^29 ULP. 3149 if (RHS.getOpcode() == ISD::FSQRT) 3150 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); 3151 3152 // 1.0 / x -> rcp(x) 3153 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 3154 } 3155 3156 // Same as for 1.0, but expand the sign out of the constant. 3157 if (CLHS->isExactlyValue(-1.0)) { 3158 // -1.0 / x -> rcp (fneg x) 3159 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3160 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); 3161 } 3162 } 3163 } 3164 3165 const SDNodeFlags *Flags = Op->getFlags(); 3166 3167 if (Unsafe || Flags->hasAllowReciprocal()) { 3168 // Turn into multiply by the reciprocal. 3169 // x / y -> x * (1.0 / y) 3170 SDNodeFlags Flags; 3171 Flags.setUnsafeAlgebra(true); 3172 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 3173 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, &Flags); 3174 } 3175 3176 return SDValue(); 3177 } 3178 3179 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 3180 EVT VT, SDValue A, SDValue B, SDValue GlueChain) { 3181 if (GlueChain->getNumValues() <= 1) { 3182 return DAG.getNode(Opcode, SL, VT, A, B); 3183 } 3184 3185 assert(GlueChain->getNumValues() == 3); 3186 3187 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 3188 switch (Opcode) { 3189 default: llvm_unreachable("no chain equivalent for opcode"); 3190 case ISD::FMUL: 3191 Opcode = AMDGPUISD::FMUL_W_CHAIN; 3192 break; 3193 } 3194 3195 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, 3196 GlueChain.getValue(2)); 3197 } 3198 3199 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 3200 EVT VT, SDValue A, SDValue B, SDValue C, 3201 SDValue GlueChain) { 3202 if (GlueChain->getNumValues() <= 1) { 3203 return DAG.getNode(Opcode, SL, VT, A, B, C); 3204 } 3205 3206 assert(GlueChain->getNumValues() == 3); 3207 3208 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 3209 switch (Opcode) { 3210 default: llvm_unreachable("no chain equivalent for opcode"); 3211 case ISD::FMA: 3212 Opcode = AMDGPUISD::FMA_W_CHAIN; 3213 break; 3214 } 3215 3216 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C, 3217 GlueChain.getValue(2)); 3218 } 3219 3220 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { 3221 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 3222 return FastLowered; 3223 3224 SDLoc SL(Op); 3225 SDValue Src0 = Op.getOperand(0); 3226 SDValue Src1 = Op.getOperand(1); 3227 3228 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); 3229 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); 3230 3231 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); 3232 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); 3233 3234 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); 3235 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); 3236 3237 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); 3238 } 3239 3240 // Faster 2.5 ULP division that does not support denormals. 3241 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { 3242 SDLoc SL(Op); 3243 SDValue LHS = Op.getOperand(1); 3244 SDValue RHS = Op.getOperand(2); 3245 3246 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); 3247 3248 const APFloat K0Val(BitsToFloat(0x6f800000)); 3249 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); 3250 3251 const APFloat K1Val(BitsToFloat(0x2f800000)); 3252 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); 3253 3254 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 3255 3256 EVT SetCCVT = 3257 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 3258 3259 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); 3260 3261 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); 3262 3263 // TODO: Should this propagate fast-math-flags? 3264 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); 3265 3266 // rcp does not support denormals. 3267 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); 3268 3269 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); 3270 3271 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); 3272 } 3273 3274 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { 3275 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 3276 return FastLowered; 3277 3278 SDLoc SL(Op); 3279 SDValue LHS = Op.getOperand(0); 3280 SDValue RHS = Op.getOperand(1); 3281 3282 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 3283 3284 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); 3285 3286 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 3287 RHS, RHS, LHS); 3288 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 3289 LHS, RHS, LHS); 3290 3291 // Denominator is scaled to not be denormal, so using rcp is ok. 3292 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, 3293 DenominatorScaled); 3294 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, 3295 DenominatorScaled); 3296 3297 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | 3298 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | 3299 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); 3300 3301 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16); 3302 3303 if (!Subtarget->hasFP32Denormals()) { 3304 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); 3305 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE, 3306 SL, MVT::i32); 3307 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs, 3308 DAG.getEntryNode(), 3309 EnableDenormValue, BitField); 3310 SDValue Ops[3] = { 3311 NegDivScale0, 3312 EnableDenorm.getValue(0), 3313 EnableDenorm.getValue(1) 3314 }; 3315 3316 NegDivScale0 = DAG.getMergeValues(Ops, SL); 3317 } 3318 3319 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, 3320 ApproxRcp, One, NegDivScale0); 3321 3322 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, 3323 ApproxRcp, Fma0); 3324 3325 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, 3326 Fma1, Fma1); 3327 3328 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, 3329 NumeratorScaled, Mul); 3330 3331 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2); 3332 3333 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, 3334 NumeratorScaled, Fma3); 3335 3336 if (!Subtarget->hasFP32Denormals()) { 3337 const SDValue DisableDenormValue = 3338 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32); 3339 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other, 3340 Fma4.getValue(1), 3341 DisableDenormValue, 3342 BitField, 3343 Fma4.getValue(2)); 3344 3345 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 3346 DisableDenorm, DAG.getRoot()); 3347 DAG.setRoot(OutputChain); 3348 } 3349 3350 SDValue Scale = NumeratorScaled.getValue(1); 3351 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, 3352 Fma4, Fma1, Fma3, Scale); 3353 3354 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS); 3355 } 3356 3357 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { 3358 if (DAG.getTarget().Options.UnsafeFPMath) 3359 return lowerFastUnsafeFDIV(Op, DAG); 3360 3361 SDLoc SL(Op); 3362 SDValue X = Op.getOperand(0); 3363 SDValue Y = Op.getOperand(1); 3364 3365 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 3366 3367 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); 3368 3369 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); 3370 3371 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); 3372 3373 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); 3374 3375 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); 3376 3377 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); 3378 3379 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); 3380 3381 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); 3382 3383 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); 3384 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); 3385 3386 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, 3387 NegDivScale0, Mul, DivScale1); 3388 3389 SDValue Scale; 3390 3391 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 3392 // Workaround a hardware bug on SI where the condition output from div_scale 3393 // is not usable. 3394 3395 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); 3396 3397 // Figure out if the scale to use for div_fmas. 3398 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 3399 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); 3400 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); 3401 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); 3402 3403 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); 3404 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); 3405 3406 SDValue Scale0Hi 3407 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); 3408 SDValue Scale1Hi 3409 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); 3410 3411 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); 3412 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); 3413 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); 3414 } else { 3415 Scale = DivScale1.getValue(1); 3416 } 3417 3418 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, 3419 Fma4, Fma3, Mul, Scale); 3420 3421 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); 3422 } 3423 3424 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { 3425 EVT VT = Op.getValueType(); 3426 3427 if (VT == MVT::f32) 3428 return LowerFDIV32(Op, DAG); 3429 3430 if (VT == MVT::f64) 3431 return LowerFDIV64(Op, DAG); 3432 3433 if (VT == MVT::f16) 3434 return LowerFDIV16(Op, DAG); 3435 3436 llvm_unreachable("Unexpected type for fdiv"); 3437 } 3438 3439 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 3440 SDLoc DL(Op); 3441 StoreSDNode *Store = cast<StoreSDNode>(Op); 3442 EVT VT = Store->getMemoryVT(); 3443 3444 if (VT == MVT::i1) { 3445 return DAG.getTruncStore(Store->getChain(), DL, 3446 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), 3447 Store->getBasePtr(), MVT::i1, Store->getMemOperand()); 3448 } 3449 3450 assert(VT.isVector() && 3451 Store->getValue().getValueType().getScalarType() == MVT::i32); 3452 3453 unsigned AS = Store->getAddressSpace(); 3454 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 3455 AS, Store->getAlignment())) { 3456 return expandUnalignedStore(Store, DAG); 3457 } 3458 3459 MachineFunction &MF = DAG.getMachineFunction(); 3460 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 3461 // If there is a possibilty that flat instruction access scratch memory 3462 // then we need to use the same legalization rules we use for private. 3463 if (AS == AMDGPUAS::FLAT_ADDRESS) 3464 AS = MFI->hasFlatScratchInit() ? 3465 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; 3466 3467 unsigned NumElements = VT.getVectorNumElements(); 3468 switch (AS) { 3469 case AMDGPUAS::GLOBAL_ADDRESS: 3470 case AMDGPUAS::FLAT_ADDRESS: 3471 if (NumElements > 4) 3472 return SplitVectorStore(Op, DAG); 3473 return SDValue(); 3474 case AMDGPUAS::PRIVATE_ADDRESS: { 3475 switch (Subtarget->getMaxPrivateElementSize()) { 3476 case 4: 3477 return scalarizeVectorStore(Store, DAG); 3478 case 8: 3479 if (NumElements > 2) 3480 return SplitVectorStore(Op, DAG); 3481 return SDValue(); 3482 case 16: 3483 if (NumElements > 4) 3484 return SplitVectorStore(Op, DAG); 3485 return SDValue(); 3486 default: 3487 llvm_unreachable("unsupported private_element_size"); 3488 } 3489 } 3490 case AMDGPUAS::LOCAL_ADDRESS: { 3491 if (NumElements > 2) 3492 return SplitVectorStore(Op, DAG); 3493 3494 if (NumElements == 2) 3495 return Op; 3496 3497 // If properly aligned, if we split we might be able to use ds_write_b64. 3498 return SplitVectorStore(Op, DAG); 3499 } 3500 default: 3501 llvm_unreachable("unhandled address space"); 3502 } 3503 } 3504 3505 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 3506 SDLoc DL(Op); 3507 EVT VT = Op.getValueType(); 3508 SDValue Arg = Op.getOperand(0); 3509 // TODO: Should this propagate fast-math-flags? 3510 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, 3511 DAG.getNode(ISD::FMUL, DL, VT, Arg, 3512 DAG.getConstantFP(0.5/M_PI, DL, 3513 VT))); 3514 3515 switch (Op.getOpcode()) { 3516 case ISD::FCOS: 3517 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart); 3518 case ISD::FSIN: 3519 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart); 3520 default: 3521 llvm_unreachable("Wrong trig opcode"); 3522 } 3523 } 3524 3525 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 3526 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); 3527 assert(AtomicNode->isCompareAndSwap()); 3528 unsigned AS = AtomicNode->getAddressSpace(); 3529 3530 // No custom lowering required for local address space 3531 if (!isFlatGlobalAddrSpace(AS)) 3532 return Op; 3533 3534 // Non-local address space requires custom lowering for atomic compare 3535 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 3536 SDLoc DL(Op); 3537 SDValue ChainIn = Op.getOperand(0); 3538 SDValue Addr = Op.getOperand(1); 3539 SDValue Old = Op.getOperand(2); 3540 SDValue New = Op.getOperand(3); 3541 EVT VT = Op.getValueType(); 3542 MVT SimpleVT = VT.getSimpleVT(); 3543 MVT VecType = MVT::getVectorVT(SimpleVT, 2); 3544 3545 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); 3546 SDValue Ops[] = { ChainIn, Addr, NewOld }; 3547 3548 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), 3549 Ops, VT, AtomicNode->getMemOperand()); 3550 } 3551 3552 //===----------------------------------------------------------------------===// 3553 // Custom DAG optimizations 3554 //===----------------------------------------------------------------------===// 3555 3556 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, 3557 DAGCombinerInfo &DCI) const { 3558 EVT VT = N->getValueType(0); 3559 EVT ScalarVT = VT.getScalarType(); 3560 if (ScalarVT != MVT::f32) 3561 return SDValue(); 3562 3563 SelectionDAG &DAG = DCI.DAG; 3564 SDLoc DL(N); 3565 3566 SDValue Src = N->getOperand(0); 3567 EVT SrcVT = Src.getValueType(); 3568 3569 // TODO: We could try to match extracting the higher bytes, which would be 3570 // easier if i8 vectors weren't promoted to i32 vectors, particularly after 3571 // types are legalized. v4i8 -> v4f32 is probably the only case to worry 3572 // about in practice. 3573 if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) { 3574 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { 3575 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); 3576 DCI.AddToWorklist(Cvt.getNode()); 3577 return Cvt; 3578 } 3579 } 3580 3581 return SDValue(); 3582 } 3583 3584 /// \brief Return true if the given offset Size in bytes can be folded into 3585 /// the immediate offsets of a memory instruction for the given address space. 3586 static bool canFoldOffset(unsigned OffsetSize, unsigned AS, 3587 const SISubtarget &STI) { 3588 switch (AS) { 3589 case AMDGPUAS::GLOBAL_ADDRESS: 3590 // MUBUF instructions a 12-bit offset in bytes. 3591 return isUInt<12>(OffsetSize); 3592 case AMDGPUAS::CONSTANT_ADDRESS: 3593 // SMRD instructions have an 8-bit offset in dwords on SI and 3594 // a 20-bit offset in bytes on VI. 3595 if (STI.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 3596 return isUInt<20>(OffsetSize); 3597 else 3598 return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4); 3599 case AMDGPUAS::LOCAL_ADDRESS: 3600 case AMDGPUAS::REGION_ADDRESS: 3601 // The single offset versions have a 16-bit offset in bytes. 3602 return isUInt<16>(OffsetSize); 3603 case AMDGPUAS::PRIVATE_ADDRESS: 3604 // Indirect register addressing does not use any offsets. 3605 default: 3606 return false; 3607 } 3608 } 3609 3610 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) 3611 3612 // This is a variant of 3613 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), 3614 // 3615 // The normal DAG combiner will do this, but only if the add has one use since 3616 // that would increase the number of instructions. 3617 // 3618 // This prevents us from seeing a constant offset that can be folded into a 3619 // memory instruction's addressing mode. If we know the resulting add offset of 3620 // a pointer can be folded into an addressing offset, we can replace the pointer 3621 // operand with the add of new constant offset. This eliminates one of the uses, 3622 // and may allow the remaining use to also be simplified. 3623 // 3624 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, 3625 unsigned AddrSpace, 3626 DAGCombinerInfo &DCI) const { 3627 SDValue N0 = N->getOperand(0); 3628 SDValue N1 = N->getOperand(1); 3629 3630 if (N0.getOpcode() != ISD::ADD) 3631 return SDValue(); 3632 3633 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); 3634 if (!CN1) 3635 return SDValue(); 3636 3637 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3638 if (!CAdd) 3639 return SDValue(); 3640 3641 // If the resulting offset is too large, we can't fold it into the addressing 3642 // mode offset. 3643 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); 3644 if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *getSubtarget())) 3645 return SDValue(); 3646 3647 SelectionDAG &DAG = DCI.DAG; 3648 SDLoc SL(N); 3649 EVT VT = N->getValueType(0); 3650 3651 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); 3652 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); 3653 3654 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset); 3655 } 3656 3657 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, 3658 DAGCombinerInfo &DCI) const { 3659 SDValue Ptr = N->getBasePtr(); 3660 SelectionDAG &DAG = DCI.DAG; 3661 SDLoc SL(N); 3662 3663 // TODO: We could also do this for multiplies. 3664 unsigned AS = N->getAddressSpace(); 3665 if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUAS::PRIVATE_ADDRESS) { 3666 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI); 3667 if (NewPtr) { 3668 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); 3669 3670 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; 3671 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); 3672 } 3673 } 3674 3675 return SDValue(); 3676 } 3677 3678 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { 3679 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || 3680 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || 3681 (Opc == ISD::XOR && Val == 0); 3682 } 3683 3684 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This 3685 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit 3686 // integer combine opportunities since most 64-bit operations are decomposed 3687 // this way. TODO: We won't want this for SALU especially if it is an inline 3688 // immediate. 3689 SDValue SITargetLowering::splitBinaryBitConstantOp( 3690 DAGCombinerInfo &DCI, 3691 const SDLoc &SL, 3692 unsigned Opc, SDValue LHS, 3693 const ConstantSDNode *CRHS) const { 3694 uint64_t Val = CRHS->getZExtValue(); 3695 uint32_t ValLo = Lo_32(Val); 3696 uint32_t ValHi = Hi_32(Val); 3697 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3698 3699 if ((bitOpWithConstantIsReducible(Opc, ValLo) || 3700 bitOpWithConstantIsReducible(Opc, ValHi)) || 3701 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { 3702 // If we need to materialize a 64-bit immediate, it will be split up later 3703 // anyway. Avoid creating the harder to understand 64-bit immediate 3704 // materialization. 3705 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); 3706 } 3707 3708 return SDValue(); 3709 } 3710 3711 SDValue SITargetLowering::performAndCombine(SDNode *N, 3712 DAGCombinerInfo &DCI) const { 3713 if (DCI.isBeforeLegalize()) 3714 return SDValue(); 3715 3716 SelectionDAG &DAG = DCI.DAG; 3717 EVT VT = N->getValueType(0); 3718 SDValue LHS = N->getOperand(0); 3719 SDValue RHS = N->getOperand(1); 3720 3721 3722 if (VT == MVT::i64) { 3723 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 3724 if (CRHS) { 3725 if (SDValue Split 3726 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) 3727 return Split; 3728 } 3729 } 3730 3731 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> 3732 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) 3733 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { 3734 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 3735 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); 3736 3737 SDValue X = LHS.getOperand(0); 3738 SDValue Y = RHS.getOperand(0); 3739 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) 3740 return SDValue(); 3741 3742 if (LCC == ISD::SETO) { 3743 if (X != LHS.getOperand(1)) 3744 return SDValue(); 3745 3746 if (RCC == ISD::SETUNE) { 3747 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); 3748 if (!C1 || !C1->isInfinity() || C1->isNegative()) 3749 return SDValue(); 3750 3751 const uint32_t Mask = SIInstrFlags::N_NORMAL | 3752 SIInstrFlags::N_SUBNORMAL | 3753 SIInstrFlags::N_ZERO | 3754 SIInstrFlags::P_ZERO | 3755 SIInstrFlags::P_SUBNORMAL | 3756 SIInstrFlags::P_NORMAL; 3757 3758 static_assert(((~(SIInstrFlags::S_NAN | 3759 SIInstrFlags::Q_NAN | 3760 SIInstrFlags::N_INFINITY | 3761 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, 3762 "mask not equal"); 3763 3764 SDLoc DL(N); 3765 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 3766 X, DAG.getConstant(Mask, DL, MVT::i32)); 3767 } 3768 } 3769 } 3770 3771 return SDValue(); 3772 } 3773 3774 SDValue SITargetLowering::performOrCombine(SDNode *N, 3775 DAGCombinerInfo &DCI) const { 3776 SelectionDAG &DAG = DCI.DAG; 3777 SDValue LHS = N->getOperand(0); 3778 SDValue RHS = N->getOperand(1); 3779 3780 EVT VT = N->getValueType(0); 3781 if (VT == MVT::i1) { 3782 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) 3783 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && 3784 RHS.getOpcode() == AMDGPUISD::FP_CLASS) { 3785 SDValue Src = LHS.getOperand(0); 3786 if (Src != RHS.getOperand(0)) 3787 return SDValue(); 3788 3789 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 3790 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 3791 if (!CLHS || !CRHS) 3792 return SDValue(); 3793 3794 // Only 10 bits are used. 3795 static const uint32_t MaxMask = 0x3ff; 3796 3797 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; 3798 SDLoc DL(N); 3799 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 3800 Src, DAG.getConstant(NewMask, DL, MVT::i32)); 3801 } 3802 3803 return SDValue(); 3804 } 3805 3806 if (VT != MVT::i64) 3807 return SDValue(); 3808 3809 // TODO: This could be a generic combine with a predicate for extracting the 3810 // high half of an integer being free. 3811 3812 // (or i64:x, (zero_extend i32:y)) -> 3813 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) 3814 if (LHS.getOpcode() == ISD::ZERO_EXTEND && 3815 RHS.getOpcode() != ISD::ZERO_EXTEND) 3816 std::swap(LHS, RHS); 3817 3818 if (RHS.getOpcode() == ISD::ZERO_EXTEND) { 3819 SDValue ExtSrc = RHS.getOperand(0); 3820 EVT SrcVT = ExtSrc.getValueType(); 3821 if (SrcVT == MVT::i32) { 3822 SDLoc SL(N); 3823 SDValue LowLHS, HiBits; 3824 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); 3825 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); 3826 3827 DCI.AddToWorklist(LowOr.getNode()); 3828 DCI.AddToWorklist(HiBits.getNode()); 3829 3830 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 3831 LowOr, HiBits); 3832 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 3833 } 3834 } 3835 3836 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3837 if (CRHS) { 3838 if (SDValue Split 3839 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) 3840 return Split; 3841 } 3842 3843 return SDValue(); 3844 } 3845 3846 SDValue SITargetLowering::performXorCombine(SDNode *N, 3847 DAGCombinerInfo &DCI) const { 3848 EVT VT = N->getValueType(0); 3849 if (VT != MVT::i64) 3850 return SDValue(); 3851 3852 SDValue LHS = N->getOperand(0); 3853 SDValue RHS = N->getOperand(1); 3854 3855 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 3856 if (CRHS) { 3857 if (SDValue Split 3858 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) 3859 return Split; 3860 } 3861 3862 return SDValue(); 3863 } 3864 3865 SDValue SITargetLowering::performClassCombine(SDNode *N, 3866 DAGCombinerInfo &DCI) const { 3867 SelectionDAG &DAG = DCI.DAG; 3868 SDValue Mask = N->getOperand(1); 3869 3870 // fp_class x, 0 -> false 3871 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { 3872 if (CMask->isNullValue()) 3873 return DAG.getConstant(0, SDLoc(N), MVT::i1); 3874 } 3875 3876 if (N->getOperand(0).isUndef()) 3877 return DAG.getUNDEF(MVT::i1); 3878 3879 return SDValue(); 3880 } 3881 3882 // Constant fold canonicalize. 3883 SDValue SITargetLowering::performFCanonicalizeCombine( 3884 SDNode *N, 3885 DAGCombinerInfo &DCI) const { 3886 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); 3887 if (!CFP) 3888 return SDValue(); 3889 3890 SelectionDAG &DAG = DCI.DAG; 3891 const APFloat &C = CFP->getValueAPF(); 3892 3893 // Flush denormals to 0 if not enabled. 3894 if (C.isDenormal()) { 3895 EVT VT = N->getValueType(0); 3896 if (VT == MVT::f32 && !Subtarget->hasFP32Denormals()) 3897 return DAG.getConstantFP(0.0, SDLoc(N), VT); 3898 3899 if (VT == MVT::f64 && !Subtarget->hasFP64Denormals()) 3900 return DAG.getConstantFP(0.0, SDLoc(N), VT); 3901 3902 if (VT == MVT::f16 && !Subtarget->hasFP16Denormals()) 3903 return DAG.getConstantFP(0.0, SDLoc(N), VT); 3904 } 3905 3906 if (C.isNaN()) { 3907 EVT VT = N->getValueType(0); 3908 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); 3909 if (C.isSignaling()) { 3910 // Quiet a signaling NaN. 3911 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 3912 } 3913 3914 // Make sure it is the canonical NaN bitpattern. 3915 // 3916 // TODO: Can we use -1 as the canonical NaN value since it's an inline 3917 // immediate? 3918 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) 3919 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 3920 } 3921 3922 return SDValue(CFP, 0); 3923 } 3924 3925 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { 3926 switch (Opc) { 3927 case ISD::FMAXNUM: 3928 return AMDGPUISD::FMAX3; 3929 case ISD::SMAX: 3930 return AMDGPUISD::SMAX3; 3931 case ISD::UMAX: 3932 return AMDGPUISD::UMAX3; 3933 case ISD::FMINNUM: 3934 return AMDGPUISD::FMIN3; 3935 case ISD::SMIN: 3936 return AMDGPUISD::SMIN3; 3937 case ISD::UMIN: 3938 return AMDGPUISD::UMIN3; 3939 default: 3940 llvm_unreachable("Not a min/max opcode"); 3941 } 3942 } 3943 3944 static SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL, 3945 SDValue Op0, SDValue Op1, bool Signed) { 3946 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); 3947 if (!K1) 3948 return SDValue(); 3949 3950 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 3951 if (!K0) 3952 return SDValue(); 3953 3954 if (Signed) { 3955 if (K0->getAPIntValue().sge(K1->getAPIntValue())) 3956 return SDValue(); 3957 } else { 3958 if (K0->getAPIntValue().uge(K1->getAPIntValue())) 3959 return SDValue(); 3960 } 3961 3962 EVT VT = K0->getValueType(0); 3963 3964 MVT NVT = MVT::i32; 3965 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3966 3967 SDValue Tmp1, Tmp2, Tmp3; 3968 Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); 3969 Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); 3970 Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); 3971 3972 if (VT == MVT::i16) { 3973 Tmp1 = DAG.getNode(Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3, SL, NVT, 3974 Tmp1, Tmp2, Tmp3); 3975 3976 return DAG.getNode(ISD::TRUNCATE, SL, VT, Tmp1); 3977 } else 3978 return DAG.getNode(Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3, SL, VT, 3979 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); 3980 } 3981 3982 static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) { 3983 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions()) 3984 return true; 3985 3986 return DAG.isKnownNeverNaN(Op); 3987 } 3988 3989 static SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL, 3990 SDValue Op0, SDValue Op1) { 3991 ConstantFPSDNode *K1 = dyn_cast<ConstantFPSDNode>(Op1); 3992 if (!K1) 3993 return SDValue(); 3994 3995 ConstantFPSDNode *K0 = dyn_cast<ConstantFPSDNode>(Op0.getOperand(1)); 3996 if (!K0) 3997 return SDValue(); 3998 3999 // Ordered >= (although NaN inputs should have folded away by now). 4000 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF()); 4001 if (Cmp == APFloat::cmpGreaterThan) 4002 return SDValue(); 4003 4004 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a 4005 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would then 4006 // give the other result, which is different from med3 with a NaN input. 4007 SDValue Var = Op0.getOperand(0); 4008 if (!isKnownNeverSNan(DAG, Var)) 4009 return SDValue(); 4010 4011 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), 4012 Var, SDValue(K0, 0), SDValue(K1, 0)); 4013 } 4014 4015 SDValue SITargetLowering::performMinMaxCombine(SDNode *N, 4016 DAGCombinerInfo &DCI) const { 4017 SelectionDAG &DAG = DCI.DAG; 4018 4019 unsigned Opc = N->getOpcode(); 4020 SDValue Op0 = N->getOperand(0); 4021 SDValue Op1 = N->getOperand(1); 4022 4023 // Only do this if the inner op has one use since this will just increases 4024 // register pressure for no benefit. 4025 4026 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY) { 4027 // max(max(a, b), c) -> max3(a, b, c) 4028 // min(min(a, b), c) -> min3(a, b, c) 4029 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { 4030 SDLoc DL(N); 4031 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 4032 DL, 4033 N->getValueType(0), 4034 Op0.getOperand(0), 4035 Op0.getOperand(1), 4036 Op1); 4037 } 4038 4039 // Try commuted. 4040 // max(a, max(b, c)) -> max3(a, b, c) 4041 // min(a, min(b, c)) -> min3(a, b, c) 4042 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { 4043 SDLoc DL(N); 4044 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 4045 DL, 4046 N->getValueType(0), 4047 Op0, 4048 Op1.getOperand(0), 4049 Op1.getOperand(1)); 4050 } 4051 } 4052 4053 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) 4054 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { 4055 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) 4056 return Med3; 4057 } 4058 4059 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { 4060 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) 4061 return Med3; 4062 } 4063 4064 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) 4065 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || 4066 (Opc == AMDGPUISD::FMIN_LEGACY && 4067 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && 4068 N->getValueType(0) == MVT::f32 && Op0.hasOneUse()) { 4069 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) 4070 return Res; 4071 } 4072 4073 return SDValue(); 4074 } 4075 4076 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, 4077 const SDNode *N0, 4078 const SDNode *N1) const { 4079 EVT VT = N0->getValueType(0); 4080 4081 // Only do this if we are not trying to support denormals. v_mad_f32 does not 4082 // support denormals ever. 4083 if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) || 4084 (VT == MVT::f16 && !Subtarget->hasFP16Denormals())) 4085 return ISD::FMAD; 4086 4087 const TargetOptions &Options = DAG.getTarget().Options; 4088 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || 4089 Options.UnsafeFPMath || 4090 (cast<BinaryWithFlagsSDNode>(N0)->Flags.hasUnsafeAlgebra() && 4091 cast<BinaryWithFlagsSDNode>(N1)->Flags.hasUnsafeAlgebra())) && 4092 isFMAFasterThanFMulAndFAdd(VT)) { 4093 return ISD::FMA; 4094 } 4095 4096 return 0; 4097 } 4098 4099 SDValue SITargetLowering::performFAddCombine(SDNode *N, 4100 DAGCombinerInfo &DCI) const { 4101 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 4102 return SDValue(); 4103 4104 SelectionDAG &DAG = DCI.DAG; 4105 EVT VT = N->getValueType(0); 4106 assert(!VT.isVector()); 4107 4108 SDLoc SL(N); 4109 SDValue LHS = N->getOperand(0); 4110 SDValue RHS = N->getOperand(1); 4111 4112 // These should really be instruction patterns, but writing patterns with 4113 // source modiifiers is a pain. 4114 4115 // fadd (fadd (a, a), b) -> mad 2.0, a, b 4116 if (LHS.getOpcode() == ISD::FADD) { 4117 SDValue A = LHS.getOperand(0); 4118 if (A == LHS.getOperand(1)) { 4119 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 4120 if (FusedOp != 0) { 4121 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 4122 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); 4123 } 4124 } 4125 } 4126 4127 // fadd (b, fadd (a, a)) -> mad 2.0, a, b 4128 if (RHS.getOpcode() == ISD::FADD) { 4129 SDValue A = RHS.getOperand(0); 4130 if (A == RHS.getOperand(1)) { 4131 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 4132 if (FusedOp != 0) { 4133 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 4134 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); 4135 } 4136 } 4137 } 4138 4139 return SDValue(); 4140 } 4141 4142 SDValue SITargetLowering::performFSubCombine(SDNode *N, 4143 DAGCombinerInfo &DCI) const { 4144 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 4145 return SDValue(); 4146 4147 SelectionDAG &DAG = DCI.DAG; 4148 SDLoc SL(N); 4149 EVT VT = N->getValueType(0); 4150 assert(!VT.isVector()); 4151 4152 // Try to get the fneg to fold into the source modifier. This undoes generic 4153 // DAG combines and folds them into the mad. 4154 // 4155 // Only do this if we are not trying to support denormals. v_mad_f32 does 4156 // not support denormals ever. 4157 SDValue LHS = N->getOperand(0); 4158 SDValue RHS = N->getOperand(1); 4159 if (LHS.getOpcode() == ISD::FADD) { 4160 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) 4161 SDValue A = LHS.getOperand(0); 4162 if (A == LHS.getOperand(1)) { 4163 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 4164 if (FusedOp != 0){ 4165 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 4166 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 4167 4168 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); 4169 } 4170 } 4171 } 4172 4173 if (RHS.getOpcode() == ISD::FADD) { 4174 // (fsub c, (fadd a, a)) -> mad -2.0, a, c 4175 4176 SDValue A = RHS.getOperand(0); 4177 if (A == RHS.getOperand(1)) { 4178 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 4179 if (FusedOp != 0){ 4180 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); 4181 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); 4182 } 4183 } 4184 } 4185 4186 return SDValue(); 4187 } 4188 4189 SDValue SITargetLowering::performSetCCCombine(SDNode *N, 4190 DAGCombinerInfo &DCI) const { 4191 SelectionDAG &DAG = DCI.DAG; 4192 SDLoc SL(N); 4193 4194 SDValue LHS = N->getOperand(0); 4195 SDValue RHS = N->getOperand(1); 4196 EVT VT = LHS.getValueType(); 4197 4198 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && 4199 VT != MVT::f16)) 4200 return SDValue(); 4201 4202 // Match isinf pattern 4203 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) 4204 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 4205 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { 4206 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 4207 if (!CRHS) 4208 return SDValue(); 4209 4210 const APFloat &APF = CRHS->getValueAPF(); 4211 if (APF.isInfinity() && !APF.isNegative()) { 4212 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY; 4213 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), 4214 DAG.getConstant(Mask, SL, MVT::i32)); 4215 } 4216 } 4217 4218 return SDValue(); 4219 } 4220 4221 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, 4222 DAGCombinerInfo &DCI) const { 4223 SelectionDAG &DAG = DCI.DAG; 4224 SDLoc SL(N); 4225 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; 4226 4227 SDValue Src = N->getOperand(0); 4228 SDValue Srl = N->getOperand(0); 4229 if (Srl.getOpcode() == ISD::ZERO_EXTEND) 4230 Srl = Srl.getOperand(0); 4231 4232 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero. 4233 if (Srl.getOpcode() == ISD::SRL) { 4234 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x 4235 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x 4236 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x 4237 4238 if (const ConstantSDNode *C = 4239 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) { 4240 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)), 4241 EVT(MVT::i32)); 4242 4243 unsigned SrcOffset = C->getZExtValue() + 8 * Offset; 4244 if (SrcOffset < 32 && SrcOffset % 8 == 0) { 4245 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL, 4246 MVT::f32, Srl); 4247 } 4248 } 4249 } 4250 4251 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); 4252 4253 APInt KnownZero, KnownOne; 4254 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 4255 !DCI.isBeforeLegalizeOps()); 4256 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4257 if (TLO.ShrinkDemandedConstant(Src, Demanded) || 4258 TLI.SimplifyDemandedBits(Src, Demanded, KnownZero, KnownOne, TLO)) { 4259 DCI.CommitTargetLoweringOpt(TLO); 4260 } 4261 4262 return SDValue(); 4263 } 4264 4265 SDValue SITargetLowering::PerformDAGCombine(SDNode *N, 4266 DAGCombinerInfo &DCI) const { 4267 switch (N->getOpcode()) { 4268 default: 4269 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 4270 case ISD::FADD: 4271 return performFAddCombine(N, DCI); 4272 case ISD::FSUB: 4273 return performFSubCombine(N, DCI); 4274 case ISD::SETCC: 4275 return performSetCCCombine(N, DCI); 4276 case ISD::FMAXNUM: 4277 case ISD::FMINNUM: 4278 case ISD::SMAX: 4279 case ISD::SMIN: 4280 case ISD::UMAX: 4281 case ISD::UMIN: 4282 case AMDGPUISD::FMIN_LEGACY: 4283 case AMDGPUISD::FMAX_LEGACY: { 4284 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG && 4285 N->getValueType(0) != MVT::f64 && 4286 getTargetMachine().getOptLevel() > CodeGenOpt::None) 4287 return performMinMaxCombine(N, DCI); 4288 break; 4289 } 4290 case ISD::LOAD: 4291 case ISD::STORE: 4292 case ISD::ATOMIC_LOAD: 4293 case ISD::ATOMIC_STORE: 4294 case ISD::ATOMIC_CMP_SWAP: 4295 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 4296 case ISD::ATOMIC_SWAP: 4297 case ISD::ATOMIC_LOAD_ADD: 4298 case ISD::ATOMIC_LOAD_SUB: 4299 case ISD::ATOMIC_LOAD_AND: 4300 case ISD::ATOMIC_LOAD_OR: 4301 case ISD::ATOMIC_LOAD_XOR: 4302 case ISD::ATOMIC_LOAD_NAND: 4303 case ISD::ATOMIC_LOAD_MIN: 4304 case ISD::ATOMIC_LOAD_MAX: 4305 case ISD::ATOMIC_LOAD_UMIN: 4306 case ISD::ATOMIC_LOAD_UMAX: 4307 case AMDGPUISD::ATOMIC_INC: 4308 case AMDGPUISD::ATOMIC_DEC: // TODO: Target mem intrinsics. 4309 if (DCI.isBeforeLegalize()) 4310 break; 4311 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI); 4312 case ISD::AND: 4313 return performAndCombine(N, DCI); 4314 case ISD::OR: 4315 return performOrCombine(N, DCI); 4316 case ISD::XOR: 4317 return performXorCombine(N, DCI); 4318 case AMDGPUISD::FP_CLASS: 4319 return performClassCombine(N, DCI); 4320 case ISD::FCANONICALIZE: 4321 return performFCanonicalizeCombine(N, DCI); 4322 case AMDGPUISD::FRACT: 4323 case AMDGPUISD::RCP: 4324 case AMDGPUISD::RSQ: 4325 case AMDGPUISD::RCP_LEGACY: 4326 case AMDGPUISD::RSQ_LEGACY: 4327 case AMDGPUISD::RSQ_CLAMP: 4328 case AMDGPUISD::LDEXP: { 4329 SDValue Src = N->getOperand(0); 4330 if (Src.isUndef()) 4331 return Src; 4332 break; 4333 } 4334 case ISD::SINT_TO_FP: 4335 case ISD::UINT_TO_FP: 4336 return performUCharToFloatCombine(N, DCI); 4337 case AMDGPUISD::CVT_F32_UBYTE0: 4338 case AMDGPUISD::CVT_F32_UBYTE1: 4339 case AMDGPUISD::CVT_F32_UBYTE2: 4340 case AMDGPUISD::CVT_F32_UBYTE3: 4341 return performCvtF32UByteNCombine(N, DCI); 4342 } 4343 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 4344 } 4345 4346 /// \brief Helper function for adjustWritemask 4347 static unsigned SubIdx2Lane(unsigned Idx) { 4348 switch (Idx) { 4349 default: return 0; 4350 case AMDGPU::sub0: return 0; 4351 case AMDGPU::sub1: return 1; 4352 case AMDGPU::sub2: return 2; 4353 case AMDGPU::sub3: return 3; 4354 } 4355 } 4356 4357 /// \brief Adjust the writemask of MIMG instructions 4358 void SITargetLowering::adjustWritemask(MachineSDNode *&Node, 4359 SelectionDAG &DAG) const { 4360 SDNode *Users[4] = { }; 4361 unsigned Lane = 0; 4362 unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3; 4363 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); 4364 unsigned NewDmask = 0; 4365 4366 // Try to figure out the used register components 4367 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); 4368 I != E; ++I) { 4369 4370 // Abort if we can't understand the usage 4371 if (!I->isMachineOpcode() || 4372 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) 4373 return; 4374 4375 // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used. 4376 // Note that subregs are packed, i.e. Lane==0 is the first bit set 4377 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 4378 // set, etc. 4379 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 4380 4381 // Set which texture component corresponds to the lane. 4382 unsigned Comp; 4383 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) { 4384 assert(Dmask); 4385 Comp = countTrailingZeros(Dmask); 4386 Dmask &= ~(1 << Comp); 4387 } 4388 4389 // Abort if we have more than one user per component 4390 if (Users[Lane]) 4391 return; 4392 4393 Users[Lane] = *I; 4394 NewDmask |= 1 << Comp; 4395 } 4396 4397 // Abort if there's no change 4398 if (NewDmask == OldDmask) 4399 return; 4400 4401 // Adjust the writemask in the node 4402 std::vector<SDValue> Ops; 4403 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); 4404 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); 4405 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); 4406 Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops); 4407 4408 // If we only got one lane, replace it with a copy 4409 // (if NewDmask has only one bit set...) 4410 if (NewDmask && (NewDmask & (NewDmask-1)) == 0) { 4411 SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(), 4412 MVT::i32); 4413 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 4414 SDLoc(), Users[Lane]->getValueType(0), 4415 SDValue(Node, 0), RC); 4416 DAG.ReplaceAllUsesWith(Users[Lane], Copy); 4417 return; 4418 } 4419 4420 // Update the users of the node with the new indices 4421 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { 4422 SDNode *User = Users[i]; 4423 if (!User) 4424 continue; 4425 4426 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); 4427 DAG.UpdateNodeOperands(User, User->getOperand(0), Op); 4428 4429 switch (Idx) { 4430 default: break; 4431 case AMDGPU::sub0: Idx = AMDGPU::sub1; break; 4432 case AMDGPU::sub1: Idx = AMDGPU::sub2; break; 4433 case AMDGPU::sub2: Idx = AMDGPU::sub3; break; 4434 } 4435 } 4436 } 4437 4438 static bool isFrameIndexOp(SDValue Op) { 4439 if (Op.getOpcode() == ISD::AssertZext) 4440 Op = Op.getOperand(0); 4441 4442 return isa<FrameIndexSDNode>(Op); 4443 } 4444 4445 /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) 4446 /// with frame index operands. 4447 /// LLVM assumes that inputs are to these instructions are registers. 4448 void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, 4449 SelectionDAG &DAG) const { 4450 4451 SmallVector<SDValue, 8> Ops; 4452 for (unsigned i = 0; i < Node->getNumOperands(); ++i) { 4453 if (!isFrameIndexOp(Node->getOperand(i))) { 4454 Ops.push_back(Node->getOperand(i)); 4455 continue; 4456 } 4457 4458 SDLoc DL(Node); 4459 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, 4460 Node->getOperand(i).getValueType(), 4461 Node->getOperand(i)), 0)); 4462 } 4463 4464 DAG.UpdateNodeOperands(Node, Ops); 4465 } 4466 4467 /// \brief Fold the instructions after selecting them. 4468 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, 4469 SelectionDAG &DAG) const { 4470 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 4471 unsigned Opcode = Node->getMachineOpcode(); 4472 4473 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && 4474 !TII->isGather4(Opcode)) 4475 adjustWritemask(Node, DAG); 4476 4477 if (Opcode == AMDGPU::INSERT_SUBREG || 4478 Opcode == AMDGPU::REG_SEQUENCE) { 4479 legalizeTargetIndependentNode(Node, DAG); 4480 return Node; 4481 } 4482 return Node; 4483 } 4484 4485 /// \brief Assign the register class depending on the number of 4486 /// bits set in the writemask 4487 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 4488 SDNode *Node) const { 4489 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 4490 4491 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4492 4493 if (TII->isVOP3(MI.getOpcode())) { 4494 // Make sure constant bus requirements are respected. 4495 TII->legalizeOperandsVOP3(MRI, MI); 4496 return; 4497 } 4498 4499 if (TII->isMIMG(MI)) { 4500 unsigned VReg = MI.getOperand(0).getReg(); 4501 const TargetRegisterClass *RC = MRI.getRegClass(VReg); 4502 // TODO: Need mapping tables to handle other cases (register classes). 4503 if (RC != &AMDGPU::VReg_128RegClass) 4504 return; 4505 4506 unsigned DmaskIdx = MI.getNumOperands() == 12 ? 3 : 4; 4507 unsigned Writemask = MI.getOperand(DmaskIdx).getImm(); 4508 unsigned BitsSet = 0; 4509 for (unsigned i = 0; i < 4; ++i) 4510 BitsSet += Writemask & (1 << i) ? 1 : 0; 4511 switch (BitsSet) { 4512 default: return; 4513 case 1: RC = &AMDGPU::VGPR_32RegClass; break; 4514 case 2: RC = &AMDGPU::VReg_64RegClass; break; 4515 case 3: RC = &AMDGPU::VReg_96RegClass; break; 4516 } 4517 4518 unsigned NewOpcode = TII->getMaskedMIMGOp(MI.getOpcode(), BitsSet); 4519 MI.setDesc(TII->get(NewOpcode)); 4520 MRI.setRegClass(VReg, RC); 4521 return; 4522 } 4523 4524 // Replace unused atomics with the no return version. 4525 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); 4526 if (NoRetAtomicOp != -1) { 4527 if (!Node->hasAnyUseOfValue(0)) { 4528 MI.setDesc(TII->get(NoRetAtomicOp)); 4529 MI.RemoveOperand(0); 4530 return; 4531 } 4532 4533 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg 4534 // instruction, because the return type of these instructions is a vec2 of 4535 // the memory type, so it can be tied to the input operand. 4536 // This means these instructions always have a use, so we need to add a 4537 // special case to check if the atomic has only one extract_subreg use, 4538 // which itself has no uses. 4539 if ((Node->hasNUsesOfValue(1, 0) && 4540 Node->use_begin()->isMachineOpcode() && 4541 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && 4542 !Node->use_begin()->hasAnyUseOfValue(0))) { 4543 unsigned Def = MI.getOperand(0).getReg(); 4544 4545 // Change this into a noret atomic. 4546 MI.setDesc(TII->get(NoRetAtomicOp)); 4547 MI.RemoveOperand(0); 4548 4549 // If we only remove the def operand from the atomic instruction, the 4550 // extract_subreg will be left with a use of a vreg without a def. 4551 // So we need to insert an implicit_def to avoid machine verifier 4552 // errors. 4553 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), 4554 TII->get(AMDGPU::IMPLICIT_DEF), Def); 4555 } 4556 return; 4557 } 4558 } 4559 4560 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, 4561 uint64_t Val) { 4562 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); 4563 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); 4564 } 4565 4566 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, 4567 const SDLoc &DL, 4568 SDValue Ptr) const { 4569 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 4570 4571 // Build the half of the subregister with the constants before building the 4572 // full 128-bit register. If we are building multiple resource descriptors, 4573 // this will allow CSEing of the 2-component register. 4574 const SDValue Ops0[] = { 4575 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), 4576 buildSMovImm32(DAG, DL, 0), 4577 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 4578 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), 4579 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) 4580 }; 4581 4582 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, 4583 MVT::v2i32, Ops0), 0); 4584 4585 // Combine the constants and the pointer. 4586 const SDValue Ops1[] = { 4587 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 4588 Ptr, 4589 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), 4590 SubRegHi, 4591 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) 4592 }; 4593 4594 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); 4595 } 4596 4597 /// \brief Return a resource descriptor with the 'Add TID' bit enabled 4598 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] 4599 /// of the resource descriptor) to create an offset, which is added to 4600 /// the resource pointer. 4601 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, 4602 SDValue Ptr, uint32_t RsrcDword1, 4603 uint64_t RsrcDword2And3) const { 4604 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); 4605 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); 4606 if (RsrcDword1) { 4607 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, 4608 DAG.getConstant(RsrcDword1, DL, MVT::i32)), 4609 0); 4610 } 4611 4612 SDValue DataLo = buildSMovImm32(DAG, DL, 4613 RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); 4614 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); 4615 4616 const SDValue Ops[] = { 4617 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 4618 PtrLo, 4619 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 4620 PtrHi, 4621 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), 4622 DataLo, 4623 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), 4624 DataHi, 4625 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) 4626 }; 4627 4628 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); 4629 } 4630 4631 SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG, 4632 const TargetRegisterClass *RC, 4633 unsigned Reg, EVT VT) const { 4634 SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT); 4635 4636 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()), 4637 cast<RegisterSDNode>(VReg)->getReg(), VT); 4638 } 4639 4640 //===----------------------------------------------------------------------===// 4641 // SI Inline Assembly Support 4642 //===----------------------------------------------------------------------===// 4643 4644 std::pair<unsigned, const TargetRegisterClass *> 4645 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 4646 StringRef Constraint, 4647 MVT VT) const { 4648 if (!isTypeLegal(VT)) 4649 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 4650 4651 if (Constraint.size() == 1) { 4652 switch (Constraint[0]) { 4653 case 's': 4654 case 'r': 4655 switch (VT.getSizeInBits()) { 4656 default: 4657 return std::make_pair(0U, nullptr); 4658 case 32: 4659 case 16: 4660 return std::make_pair(0U, &AMDGPU::SReg_32_XM0RegClass); 4661 case 64: 4662 return std::make_pair(0U, &AMDGPU::SGPR_64RegClass); 4663 case 128: 4664 return std::make_pair(0U, &AMDGPU::SReg_128RegClass); 4665 case 256: 4666 return std::make_pair(0U, &AMDGPU::SReg_256RegClass); 4667 } 4668 4669 case 'v': 4670 switch (VT.getSizeInBits()) { 4671 default: 4672 return std::make_pair(0U, nullptr); 4673 case 32: 4674 case 16: 4675 return std::make_pair(0U, &AMDGPU::VGPR_32RegClass); 4676 case 64: 4677 return std::make_pair(0U, &AMDGPU::VReg_64RegClass); 4678 case 96: 4679 return std::make_pair(0U, &AMDGPU::VReg_96RegClass); 4680 case 128: 4681 return std::make_pair(0U, &AMDGPU::VReg_128RegClass); 4682 case 256: 4683 return std::make_pair(0U, &AMDGPU::VReg_256RegClass); 4684 case 512: 4685 return std::make_pair(0U, &AMDGPU::VReg_512RegClass); 4686 } 4687 } 4688 } 4689 4690 if (Constraint.size() > 1) { 4691 const TargetRegisterClass *RC = nullptr; 4692 if (Constraint[1] == 'v') { 4693 RC = &AMDGPU::VGPR_32RegClass; 4694 } else if (Constraint[1] == 's') { 4695 RC = &AMDGPU::SGPR_32RegClass; 4696 } 4697 4698 if (RC) { 4699 uint32_t Idx; 4700 bool Failed = Constraint.substr(2).getAsInteger(10, Idx); 4701 if (!Failed && Idx < RC->getNumRegs()) 4702 return std::make_pair(RC->getRegister(Idx), RC); 4703 } 4704 } 4705 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 4706 } 4707 4708 SITargetLowering::ConstraintType 4709 SITargetLowering::getConstraintType(StringRef Constraint) const { 4710 if (Constraint.size() == 1) { 4711 switch (Constraint[0]) { 4712 default: break; 4713 case 's': 4714 case 'v': 4715 return C_RegisterClass; 4716 } 4717 } 4718 return TargetLowering::getConstraintType(Constraint); 4719 } 4720