1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief Custom DAG lowering for SI 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifdef _MSC_VER 16 // Provide M_PI. 17 #define _USE_MATH_DEFINES 18 #endif 19 20 #include "AMDGPU.h" 21 #include "AMDGPUIntrinsicInfo.h" 22 #include "AMDGPUSubtarget.h" 23 #include "SIDefines.h" 24 #include "SIISelLowering.h" 25 #include "SIInstrInfo.h" 26 #include "SIMachineFunctionInfo.h" 27 #include "SIRegisterInfo.h" 28 #include "Utils/AMDGPUBaseInfo.h" 29 #include "llvm/ADT/APFloat.h" 30 #include "llvm/ADT/APInt.h" 31 #include "llvm/ADT/ArrayRef.h" 32 #include "llvm/ADT/BitVector.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/StringRef.h" 35 #include "llvm/ADT/StringSwitch.h" 36 #include "llvm/ADT/Twine.h" 37 #include "llvm/CodeGen/Analysis.h" 38 #include "llvm/CodeGen/CallingConvLower.h" 39 #include "llvm/CodeGen/DAGCombine.h" 40 #include "llvm/CodeGen/ISDOpcodes.h" 41 #include "llvm/CodeGen/MachineBasicBlock.h" 42 #include "llvm/CodeGen/MachineFrameInfo.h" 43 #include "llvm/CodeGen/MachineFunction.h" 44 #include "llvm/CodeGen/MachineInstr.h" 45 #include "llvm/CodeGen/MachineInstrBuilder.h" 46 #include "llvm/CodeGen/MachineMemOperand.h" 47 #include "llvm/CodeGen/MachineOperand.h" 48 #include "llvm/CodeGen/MachineRegisterInfo.h" 49 #include "llvm/CodeGen/MachineValueType.h" 50 #include "llvm/CodeGen/SelectionDAG.h" 51 #include "llvm/CodeGen/SelectionDAGNodes.h" 52 #include "llvm/CodeGen/ValueTypes.h" 53 #include "llvm/IR/Constants.h" 54 #include "llvm/IR/DataLayout.h" 55 #include "llvm/IR/DebugLoc.h" 56 #include "llvm/IR/DerivedTypes.h" 57 #include "llvm/IR/DiagnosticInfo.h" 58 #include "llvm/IR/Function.h" 59 #include "llvm/IR/GlobalValue.h" 60 #include "llvm/IR/InstrTypes.h" 61 #include "llvm/IR/Instruction.h" 62 #include "llvm/IR/Instructions.h" 63 #include "llvm/IR/Type.h" 64 #include "llvm/Support/Casting.h" 65 #include "llvm/Support/CodeGen.h" 66 #include "llvm/Support/CommandLine.h" 67 #include "llvm/Support/Compiler.h" 68 #include "llvm/Support/ErrorHandling.h" 69 #include "llvm/Support/MathExtras.h" 70 #include "llvm/Target/TargetCallingConv.h" 71 #include "llvm/Target/TargetMachine.h" 72 #include "llvm/Target/TargetOptions.h" 73 #include "llvm/Target/TargetRegisterInfo.h" 74 #include <cassert> 75 #include <cmath> 76 #include <cstdint> 77 #include <iterator> 78 #include <tuple> 79 #include <utility> 80 #include <vector> 81 82 using namespace llvm; 83 84 static cl::opt<bool> EnableVGPRIndexMode( 85 "amdgpu-vgpr-index-mode", 86 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), 87 cl::init(false)); 88 89 static unsigned findFirstFreeSGPR(CCState &CCInfo) { 90 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); 91 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { 92 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { 93 return AMDGPU::SGPR0 + Reg; 94 } 95 } 96 llvm_unreachable("Cannot allocate sgpr"); 97 } 98 99 SITargetLowering::SITargetLowering(const TargetMachine &TM, 100 const SISubtarget &STI) 101 : AMDGPUTargetLowering(TM, STI) { 102 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); 103 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); 104 105 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass); 106 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); 107 108 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); 109 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); 110 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); 111 112 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); 113 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); 114 115 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); 116 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); 117 118 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); 119 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); 120 121 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); 122 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); 123 124 if (Subtarget->has16BitInsts()) { 125 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass); 126 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass); 127 } 128 129 computeRegisterProperties(STI.getRegisterInfo()); 130 131 // We need to custom lower vector stores from local memory 132 setOperationAction(ISD::LOAD, MVT::v2i32, Custom); 133 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 134 setOperationAction(ISD::LOAD, MVT::v8i32, Custom); 135 setOperationAction(ISD::LOAD, MVT::v16i32, Custom); 136 setOperationAction(ISD::LOAD, MVT::i1, Custom); 137 138 setOperationAction(ISD::STORE, MVT::v2i32, Custom); 139 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 140 setOperationAction(ISD::STORE, MVT::v8i32, Custom); 141 setOperationAction(ISD::STORE, MVT::v16i32, Custom); 142 setOperationAction(ISD::STORE, MVT::i1, Custom); 143 144 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); 145 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); 146 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); 147 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); 148 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); 149 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); 150 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); 151 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); 152 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); 153 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); 154 155 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 156 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 157 setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand); 158 159 setOperationAction(ISD::SELECT, MVT::i1, Promote); 160 setOperationAction(ISD::SELECT, MVT::i64, Custom); 161 setOperationAction(ISD::SELECT, MVT::f64, Promote); 162 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); 163 164 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 165 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 166 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 167 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 168 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); 169 170 setOperationAction(ISD::SETCC, MVT::i1, Promote); 171 setOperationAction(ISD::SETCC, MVT::v2i1, Expand); 172 setOperationAction(ISD::SETCC, MVT::v4i1, Expand); 173 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); 174 175 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); 176 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 177 178 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); 179 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); 180 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 181 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); 182 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 183 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); 184 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); 185 186 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); 187 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); 188 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 189 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); 190 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); 191 192 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 193 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 194 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 195 setOperationAction(ISD::BR_CC, MVT::i64, Expand); 196 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 197 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 198 199 setOperationAction(ISD::UADDO, MVT::i32, Legal); 200 setOperationAction(ISD::USUBO, MVT::i32, Legal); 201 202 // We only support LOAD/STORE and vector manipulation ops for vectors 203 // with > 4 elements. 204 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64}) { 205 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 206 switch (Op) { 207 case ISD::LOAD: 208 case ISD::STORE: 209 case ISD::BUILD_VECTOR: 210 case ISD::BITCAST: 211 case ISD::EXTRACT_VECTOR_ELT: 212 case ISD::INSERT_VECTOR_ELT: 213 case ISD::INSERT_SUBVECTOR: 214 case ISD::EXTRACT_SUBVECTOR: 215 case ISD::SCALAR_TO_VECTOR: 216 break; 217 case ISD::CONCAT_VECTORS: 218 setOperationAction(Op, VT, Custom); 219 break; 220 default: 221 setOperationAction(Op, VT, Expand); 222 break; 223 } 224 } 225 } 226 227 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that 228 // is expanded to avoid having two separate loops in case the index is a VGPR. 229 230 // Most operations are naturally 32-bit vector operations. We only support 231 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. 232 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { 233 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 234 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); 235 236 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 237 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); 238 239 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 240 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); 241 242 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 243 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); 244 } 245 246 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); 247 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); 248 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); 249 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); 250 251 // Avoid stack access for these. 252 // TODO: Generalize to more vector types. 253 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); 254 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); 255 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 256 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 257 258 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, 259 // and output demarshalling 260 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 261 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 262 263 // We can't return success/failure, only the old value, 264 // let LLVM add the comparison 265 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); 266 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); 267 268 if (getSubtarget()->hasFlatAddressSpace()) { 269 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); 270 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); 271 } 272 273 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 274 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 275 276 // On SI this is s_memtime and s_memrealtime on VI. 277 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 278 setOperationAction(ISD::TRAP, MVT::Other, Legal); 279 280 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 281 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 282 283 if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) { 284 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 285 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 286 setOperationAction(ISD::FRINT, MVT::f64, Legal); 287 } 288 289 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 290 291 setOperationAction(ISD::FSIN, MVT::f32, Custom); 292 setOperationAction(ISD::FCOS, MVT::f32, Custom); 293 setOperationAction(ISD::FDIV, MVT::f32, Custom); 294 setOperationAction(ISD::FDIV, MVT::f64, Custom); 295 296 if (Subtarget->has16BitInsts()) { 297 setOperationAction(ISD::Constant, MVT::i16, Legal); 298 299 setOperationAction(ISD::SMIN, MVT::i16, Legal); 300 setOperationAction(ISD::SMAX, MVT::i16, Legal); 301 302 setOperationAction(ISD::UMIN, MVT::i16, Legal); 303 setOperationAction(ISD::UMAX, MVT::i16, Legal); 304 305 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); 306 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); 307 308 setOperationAction(ISD::ROTR, MVT::i16, Promote); 309 setOperationAction(ISD::ROTL, MVT::i16, Promote); 310 311 setOperationAction(ISD::SDIV, MVT::i16, Promote); 312 setOperationAction(ISD::UDIV, MVT::i16, Promote); 313 setOperationAction(ISD::SREM, MVT::i16, Promote); 314 setOperationAction(ISD::UREM, MVT::i16, Promote); 315 316 setOperationAction(ISD::BSWAP, MVT::i16, Promote); 317 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); 318 319 setOperationAction(ISD::CTTZ, MVT::i16, Promote); 320 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); 321 setOperationAction(ISD::CTLZ, MVT::i16, Promote); 322 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); 323 324 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); 325 326 setOperationAction(ISD::BR_CC, MVT::i16, Expand); 327 328 setOperationAction(ISD::LOAD, MVT::i16, Custom); 329 330 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 331 332 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); 333 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); 334 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); 335 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); 336 337 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); 338 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); 339 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); 340 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); 341 342 // F16 - Constant Actions. 343 setOperationAction(ISD::ConstantFP, MVT::f16, Legal); 344 345 // F16 - Load/Store Actions. 346 setOperationAction(ISD::LOAD, MVT::f16, Promote); 347 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); 348 setOperationAction(ISD::STORE, MVT::f16, Promote); 349 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); 350 351 // F16 - VOP1 Actions. 352 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); 353 setOperationAction(ISD::FCOS, MVT::f16, Promote); 354 setOperationAction(ISD::FSIN, MVT::f16, Promote); 355 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); 356 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); 357 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); 358 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); 359 360 // F16 - VOP2 Actions. 361 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 362 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 363 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 364 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 365 setOperationAction(ISD::FDIV, MVT::f16, Custom); 366 367 // F16 - VOP3 Actions. 368 setOperationAction(ISD::FMA, MVT::f16, Legal); 369 if (!Subtarget->hasFP16Denormals()) 370 setOperationAction(ISD::FMAD, MVT::f16, Legal); 371 } 372 373 setTargetDAGCombine(ISD::FADD); 374 setTargetDAGCombine(ISD::FSUB); 375 setTargetDAGCombine(ISD::FMINNUM); 376 setTargetDAGCombine(ISD::FMAXNUM); 377 setTargetDAGCombine(ISD::SMIN); 378 setTargetDAGCombine(ISD::SMAX); 379 setTargetDAGCombine(ISD::UMIN); 380 setTargetDAGCombine(ISD::UMAX); 381 setTargetDAGCombine(ISD::SETCC); 382 setTargetDAGCombine(ISD::AND); 383 setTargetDAGCombine(ISD::OR); 384 setTargetDAGCombine(ISD::XOR); 385 setTargetDAGCombine(ISD::SINT_TO_FP); 386 setTargetDAGCombine(ISD::UINT_TO_FP); 387 setTargetDAGCombine(ISD::FCANONICALIZE); 388 389 // All memory operations. Some folding on the pointer operand is done to help 390 // matching the constant offsets in the addressing modes. 391 setTargetDAGCombine(ISD::LOAD); 392 setTargetDAGCombine(ISD::STORE); 393 setTargetDAGCombine(ISD::ATOMIC_LOAD); 394 setTargetDAGCombine(ISD::ATOMIC_STORE); 395 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); 396 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 397 setTargetDAGCombine(ISD::ATOMIC_SWAP); 398 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); 399 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); 400 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); 401 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); 402 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); 403 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); 404 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); 405 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); 406 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); 407 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); 408 409 setSchedulingPreference(Sched::RegPressure); 410 } 411 412 const SISubtarget *SITargetLowering::getSubtarget() const { 413 return static_cast<const SISubtarget *>(Subtarget); 414 } 415 416 //===----------------------------------------------------------------------===// 417 // TargetLowering queries 418 //===----------------------------------------------------------------------===// 419 420 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 421 const CallInst &CI, 422 unsigned IntrID) const { 423 switch (IntrID) { 424 case Intrinsic::amdgcn_atomic_inc: 425 case Intrinsic::amdgcn_atomic_dec: 426 Info.opc = ISD::INTRINSIC_W_CHAIN; 427 Info.memVT = MVT::getVT(CI.getType()); 428 Info.ptrVal = CI.getOperand(0); 429 Info.align = 0; 430 Info.vol = false; 431 Info.readMem = true; 432 Info.writeMem = true; 433 return true; 434 default: 435 return false; 436 } 437 } 438 439 bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &, 440 EVT) const { 441 // SI has some legal vector types, but no legal vector operations. Say no 442 // shuffles are legal in order to prefer scalarizing some vector operations. 443 return false; 444 } 445 446 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { 447 // Flat instructions do not have offsets, and only have the register 448 // address. 449 return AM.BaseOffs == 0 && (AM.Scale == 0 || AM.Scale == 1); 450 } 451 452 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { 453 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and 454 // additionally can do r + r + i with addr64. 32-bit has more addressing 455 // mode options. Depending on the resource constant, it can also do 456 // (i64 r0) + (i32 r1) * (i14 i). 457 // 458 // Private arrays end up using a scratch buffer most of the time, so also 459 // assume those use MUBUF instructions. Scratch loads / stores are currently 460 // implemented as mubuf instructions with offen bit set, so slightly 461 // different than the normal addr64. 462 if (!isUInt<12>(AM.BaseOffs)) 463 return false; 464 465 // FIXME: Since we can split immediate into soffset and immediate offset, 466 // would it make sense to allow any immediate? 467 468 switch (AM.Scale) { 469 case 0: // r + i or just i, depending on HasBaseReg. 470 return true; 471 case 1: 472 return true; // We have r + r or r + i. 473 case 2: 474 if (AM.HasBaseReg) { 475 // Reject 2 * r + r. 476 return false; 477 } 478 479 // Allow 2 * r as r + r 480 // Or 2 * r + i is allowed as r + r + i. 481 return true; 482 default: // Don't allow n * r 483 return false; 484 } 485 } 486 487 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, 488 const AddrMode &AM, Type *Ty, 489 unsigned AS) const { 490 // No global is ever allowed as a base. 491 if (AM.BaseGV) 492 return false; 493 494 switch (AS) { 495 case AMDGPUAS::GLOBAL_ADDRESS: 496 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 497 // Assume the we will use FLAT for all global memory accesses 498 // on VI. 499 // FIXME: This assumption is currently wrong. On VI we still use 500 // MUBUF instructions for the r + i addressing mode. As currently 501 // implemented, the MUBUF instructions only work on buffer < 4GB. 502 // It may be possible to support > 4GB buffers with MUBUF instructions, 503 // by setting the stride value in the resource descriptor which would 504 // increase the size limit to (stride * 4GB). However, this is risky, 505 // because it has never been validated. 506 return isLegalFlatAddressingMode(AM); 507 } 508 509 return isLegalMUBUFAddressingMode(AM); 510 511 case AMDGPUAS::CONSTANT_ADDRESS: 512 // If the offset isn't a multiple of 4, it probably isn't going to be 513 // correctly aligned. 514 // FIXME: Can we get the real alignment here? 515 if (AM.BaseOffs % 4 != 0) 516 return isLegalMUBUFAddressingMode(AM); 517 518 // There are no SMRD extloads, so if we have to do a small type access we 519 // will use a MUBUF load. 520 // FIXME?: We also need to do this if unaligned, but we don't know the 521 // alignment here. 522 if (DL.getTypeStoreSize(Ty) < 4) 523 return isLegalMUBUFAddressingMode(AM); 524 525 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 526 // SMRD instructions have an 8-bit, dword offset on SI. 527 if (!isUInt<8>(AM.BaseOffs / 4)) 528 return false; 529 } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) { 530 // On CI+, this can also be a 32-bit literal constant offset. If it fits 531 // in 8-bits, it can use a smaller encoding. 532 if (!isUInt<32>(AM.BaseOffs / 4)) 533 return false; 534 } else if (Subtarget->getGeneration() == SISubtarget::VOLCANIC_ISLANDS) { 535 // On VI, these use the SMEM format and the offset is 20-bit in bytes. 536 if (!isUInt<20>(AM.BaseOffs)) 537 return false; 538 } else 539 llvm_unreachable("unhandled generation"); 540 541 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 542 return true; 543 544 if (AM.Scale == 1 && AM.HasBaseReg) 545 return true; 546 547 return false; 548 549 case AMDGPUAS::PRIVATE_ADDRESS: 550 return isLegalMUBUFAddressingMode(AM); 551 552 case AMDGPUAS::LOCAL_ADDRESS: 553 case AMDGPUAS::REGION_ADDRESS: 554 // Basic, single offset DS instructions allow a 16-bit unsigned immediate 555 // field. 556 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have 557 // an 8-bit dword offset but we don't know the alignment here. 558 if (!isUInt<16>(AM.BaseOffs)) 559 return false; 560 561 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 562 return true; 563 564 if (AM.Scale == 1 && AM.HasBaseReg) 565 return true; 566 567 return false; 568 569 case AMDGPUAS::FLAT_ADDRESS: 570 case AMDGPUAS::UNKNOWN_ADDRESS_SPACE: 571 // For an unknown address space, this usually means that this is for some 572 // reason being used for pure arithmetic, and not based on some addressing 573 // computation. We don't have instructions that compute pointers with any 574 // addressing modes, so treat them as having no offset like flat 575 // instructions. 576 return isLegalFlatAddressingMode(AM); 577 578 default: 579 llvm_unreachable("unhandled address space"); 580 } 581 } 582 583 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 584 unsigned AddrSpace, 585 unsigned Align, 586 bool *IsFast) const { 587 if (IsFast) 588 *IsFast = false; 589 590 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, 591 // which isn't a simple VT. 592 // Until MVT is extended to handle this, simply check for the size and 593 // rely on the condition below: allow accesses if the size is a multiple of 4. 594 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && 595 VT.getStoreSize() > 16)) { 596 return false; 597 } 598 599 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || 600 AddrSpace == AMDGPUAS::REGION_ADDRESS) { 601 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte 602 // aligned, 8 byte access in a single operation using ds_read2/write2_b32 603 // with adjacent offsets. 604 bool AlignedBy4 = (Align % 4 == 0); 605 if (IsFast) 606 *IsFast = AlignedBy4; 607 608 return AlignedBy4; 609 } 610 611 // FIXME: We have to be conservative here and assume that flat operations 612 // will access scratch. If we had access to the IR function, then we 613 // could determine if any private memory was used in the function. 614 if (!Subtarget->hasUnalignedScratchAccess() && 615 (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS || 616 AddrSpace == AMDGPUAS::FLAT_ADDRESS)) { 617 return false; 618 } 619 620 if (Subtarget->hasUnalignedBufferAccess()) { 621 // If we have an uniform constant load, it still requires using a slow 622 // buffer instruction if unaligned. 623 if (IsFast) { 624 *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS) ? 625 (Align % 4 == 0) : true; 626 } 627 628 return true; 629 } 630 631 // Smaller than dword value must be aligned. 632 if (VT.bitsLT(MVT::i32)) 633 return false; 634 635 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the 636 // byte-address are ignored, thus forcing Dword alignment. 637 // This applies to private, global, and constant memory. 638 if (IsFast) 639 *IsFast = true; 640 641 return VT.bitsGT(MVT::i32) && Align % 4 == 0; 642 } 643 644 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, 645 unsigned SrcAlign, bool IsMemset, 646 bool ZeroMemset, 647 bool MemcpyStrSrc, 648 MachineFunction &MF) const { 649 // FIXME: Should account for address space here. 650 651 // The default fallback uses the private pointer size as a guess for a type to 652 // use. Make sure we switch these to 64-bit accesses. 653 654 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global 655 return MVT::v4i32; 656 657 if (Size >= 8 && DstAlign >= 4) 658 return MVT::v2i32; 659 660 // Use the default. 661 return MVT::Other; 662 } 663 664 static bool isFlatGlobalAddrSpace(unsigned AS) { 665 return AS == AMDGPUAS::GLOBAL_ADDRESS || 666 AS == AMDGPUAS::FLAT_ADDRESS || 667 AS == AMDGPUAS::CONSTANT_ADDRESS; 668 } 669 670 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, 671 unsigned DestAS) const { 672 return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS); 673 } 674 675 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { 676 const MemSDNode *MemNode = cast<MemSDNode>(N); 677 const Value *Ptr = MemNode->getMemOperand()->getValue(); 678 const Instruction *I = dyn_cast<Instruction>(Ptr); 679 return I && I->getMetadata("amdgpu.noclobber"); 680 } 681 682 bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS, 683 unsigned DestAS) const { 684 // Flat -> private/local is a simple truncate. 685 // Flat -> global is no-op 686 if (SrcAS == AMDGPUAS::FLAT_ADDRESS) 687 return true; 688 689 return isNoopAddrSpaceCast(SrcAS, DestAS); 690 } 691 692 bool SITargetLowering::isMemOpUniform(const SDNode *N) const { 693 const MemSDNode *MemNode = cast<MemSDNode>(N); 694 695 return AMDGPU::isUniformMMO(MemNode->getMemOperand()); 696 } 697 698 TargetLoweringBase::LegalizeTypeAction 699 SITargetLowering::getPreferredVectorAction(EVT VT) const { 700 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) 701 return TypeSplitVector; 702 703 return TargetLoweringBase::getPreferredVectorAction(VT); 704 } 705 706 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 707 Type *Ty) const { 708 // FIXME: Could be smarter if called for vector constants. 709 return true; 710 } 711 712 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { 713 if (Subtarget->has16BitInsts() && VT == MVT::i16) { 714 switch (Op) { 715 case ISD::LOAD: 716 case ISD::STORE: 717 718 // These operations are done with 32-bit instructions anyway. 719 case ISD::AND: 720 case ISD::OR: 721 case ISD::XOR: 722 case ISD::SELECT: 723 // TODO: Extensions? 724 return true; 725 default: 726 return false; 727 } 728 } 729 730 // SimplifySetCC uses this function to determine whether or not it should 731 // create setcc with i1 operands. We don't have instructions for i1 setcc. 732 if (VT == MVT::i1 && Op == ISD::SETCC) 733 return false; 734 735 return TargetLowering::isTypeDesirableForOp(Op, VT); 736 } 737 738 SDValue SITargetLowering::LowerParameterPtr(SelectionDAG &DAG, 739 const SDLoc &SL, SDValue Chain, 740 unsigned Offset) const { 741 const DataLayout &DL = DAG.getDataLayout(); 742 MachineFunction &MF = DAG.getMachineFunction(); 743 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 744 unsigned InputPtrReg = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 745 746 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 747 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); 748 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, 749 MRI.getLiveInVirtReg(InputPtrReg), PtrVT); 750 return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, 751 DAG.getConstant(Offset, SL, PtrVT)); 752 } 753 754 SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, 755 const SDLoc &SL, SDValue Chain, 756 unsigned Offset, bool Signed, 757 const ISD::InputArg *Arg) const { 758 const DataLayout &DL = DAG.getDataLayout(); 759 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 760 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 761 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 762 763 unsigned Align = DL.getABITypeAlignment(Ty); 764 765 SDValue Ptr = LowerParameterPtr(DAG, SL, Chain, Offset); 766 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align, 767 MachineMemOperand::MONonTemporal | 768 MachineMemOperand::MODereferenceable | 769 MachineMemOperand::MOInvariant); 770 771 SDValue Val = Load; 772 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && 773 VT.bitsLT(MemVT)) { 774 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; 775 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); 776 } 777 778 if (MemVT.isFloatingPoint()) 779 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT); 780 else if (Signed) 781 Val = DAG.getSExtOrTrunc(Val, SL, VT); 782 else 783 Val = DAG.getZExtOrTrunc(Val, SL, VT); 784 785 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); 786 } 787 788 SDValue SITargetLowering::LowerFormalArguments( 789 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 790 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 791 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 792 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 793 794 MachineFunction &MF = DAG.getMachineFunction(); 795 FunctionType *FType = MF.getFunction()->getFunctionType(); 796 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 797 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 798 799 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { 800 const Function *Fn = MF.getFunction(); 801 DiagnosticInfoUnsupported NoGraphicsHSA( 802 *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); 803 DAG.getContext()->diagnose(NoGraphicsHSA); 804 return DAG.getEntryNode(); 805 } 806 807 // Create stack objects that are used for emitting debugger prologue if 808 // "amdgpu-debugger-emit-prologue" attribute was specified. 809 if (ST.debuggerEmitPrologue()) 810 createDebuggerPrologueStackObjects(MF); 811 812 SmallVector<ISD::InputArg, 16> Splits; 813 BitVector Skipped(Ins.size()); 814 815 for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) { 816 const ISD::InputArg &Arg = Ins[i]; 817 818 // First check if it's a PS input addr 819 if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() && 820 !Arg.Flags.isByVal() && PSInputNum <= 15) { 821 822 if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) { 823 // We can safely skip PS inputs 824 Skipped.set(i); 825 ++PSInputNum; 826 continue; 827 } 828 829 Info->markPSInputAllocated(PSInputNum); 830 if (Arg.Used) 831 Info->PSInputEna |= 1 << PSInputNum; 832 833 ++PSInputNum; 834 } 835 836 if (AMDGPU::isShader(CallConv)) { 837 // Second split vertices into their elements 838 if (Arg.VT.isVector()) { 839 ISD::InputArg NewArg = Arg; 840 NewArg.Flags.setSplit(); 841 NewArg.VT = Arg.VT.getVectorElementType(); 842 843 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a 844 // three or five element vertex only needs three or five registers, 845 // NOT four or eight. 846 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 847 unsigned NumElements = ParamType->getVectorNumElements(); 848 849 for (unsigned j = 0; j != NumElements; ++j) { 850 Splits.push_back(NewArg); 851 NewArg.PartOffset += NewArg.VT.getStoreSize(); 852 } 853 } else { 854 Splits.push_back(Arg); 855 } 856 } 857 } 858 859 SmallVector<CCValAssign, 16> ArgLocs; 860 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 861 *DAG.getContext()); 862 863 // At least one interpolation mode must be enabled or else the GPU will hang. 864 // 865 // Check PSInputAddr instead of PSInputEna. The idea is that if the user set 866 // PSInputAddr, the user wants to enable some bits after the compilation 867 // based on run-time states. Since we can't know what the final PSInputEna 868 // will look like, so we shouldn't do anything here and the user should take 869 // responsibility for the correct programming. 870 // 871 // Otherwise, the following restrictions apply: 872 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 873 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 874 // enabled too. 875 if (CallConv == CallingConv::AMDGPU_PS && 876 ((Info->getPSInputAddr() & 0x7F) == 0 || 877 ((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11)))) { 878 CCInfo.AllocateReg(AMDGPU::VGPR0); 879 CCInfo.AllocateReg(AMDGPU::VGPR1); 880 Info->markPSInputAllocated(0); 881 Info->PSInputEna |= 1; 882 } 883 884 if (!AMDGPU::isShader(CallConv)) { 885 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); 886 } else { 887 assert(!Info->hasDispatchPtr() && 888 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && 889 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && 890 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && 891 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && 892 !Info->hasWorkItemIDZ()); 893 } 894 895 if (Info->hasPrivateMemoryInputPtr()) { 896 unsigned PrivateMemoryPtrReg = Info->addPrivateMemoryPtr(*TRI); 897 MF.addLiveIn(PrivateMemoryPtrReg, &AMDGPU::SReg_64RegClass); 898 CCInfo.AllocateReg(PrivateMemoryPtrReg); 899 } 900 901 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 902 if (Info->hasPrivateSegmentBuffer()) { 903 unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI); 904 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass); 905 CCInfo.AllocateReg(PrivateSegmentBufferReg); 906 } 907 908 if (Info->hasDispatchPtr()) { 909 unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI); 910 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 911 CCInfo.AllocateReg(DispatchPtrReg); 912 } 913 914 if (Info->hasQueuePtr()) { 915 unsigned QueuePtrReg = Info->addQueuePtr(*TRI); 916 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 917 CCInfo.AllocateReg(QueuePtrReg); 918 } 919 920 if (Info->hasKernargSegmentPtr()) { 921 unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI); 922 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); 923 CCInfo.AllocateReg(InputPtrReg); 924 } 925 926 if (Info->hasDispatchID()) { 927 unsigned DispatchIDReg = Info->addDispatchID(*TRI); 928 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 929 CCInfo.AllocateReg(DispatchIDReg); 930 } 931 932 if (Info->hasFlatScratchInit()) { 933 unsigned FlatScratchInitReg = Info->addFlatScratchInit(*TRI); 934 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 935 CCInfo.AllocateReg(FlatScratchInitReg); 936 } 937 938 if (!AMDGPU::isShader(CallConv)) 939 analyzeFormalArgumentsCompute(CCInfo, Ins); 940 else 941 AnalyzeFormalArguments(CCInfo, Splits); 942 943 SmallVector<SDValue, 16> Chains; 944 945 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { 946 const ISD::InputArg &Arg = Ins[i]; 947 if (Skipped[i]) { 948 InVals.push_back(DAG.getUNDEF(Arg.VT)); 949 continue; 950 } 951 952 CCValAssign &VA = ArgLocs[ArgIdx++]; 953 MVT VT = VA.getLocVT(); 954 955 if (VA.isMemLoc()) { 956 VT = Ins[i].VT; 957 EVT MemVT = VA.getLocVT(); 958 const unsigned Offset = Subtarget->getExplicitKernelArgOffset(MF) + 959 VA.getLocMemOffset(); 960 // The first 36 bytes of the input buffer contains information about 961 // thread group and global sizes. 962 SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, Chain, 963 Offset, Ins[i].Flags.isSExt(), 964 &Ins[i]); 965 Chains.push_back(Arg.getValue(1)); 966 967 auto *ParamTy = 968 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); 969 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 970 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 971 // On SI local pointers are just offsets into LDS, so they are always 972 // less than 16-bits. On CI and newer they could potentially be 973 // real pointers, so we can't guarantee their size. 974 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, 975 DAG.getValueType(MVT::i16)); 976 } 977 978 InVals.push_back(Arg); 979 Info->setABIArgOffset(Offset + MemVT.getStoreSize()); 980 continue; 981 } 982 assert(VA.isRegLoc() && "Parameter must be in a register!"); 983 984 unsigned Reg = VA.getLocReg(); 985 986 if (VT == MVT::i64) { 987 // For now assume it is a pointer 988 Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, 989 &AMDGPU::SGPR_64RegClass); 990 Reg = MF.addLiveIn(Reg, &AMDGPU::SGPR_64RegClass); 991 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 992 InVals.push_back(Copy); 993 continue; 994 } 995 996 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 997 998 Reg = MF.addLiveIn(Reg, RC); 999 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1000 1001 if (Arg.VT.isVector()) { 1002 // Build a vector from the registers 1003 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 1004 unsigned NumElements = ParamType->getVectorNumElements(); 1005 1006 SmallVector<SDValue, 4> Regs; 1007 Regs.push_back(Val); 1008 for (unsigned j = 1; j != NumElements; ++j) { 1009 Reg = ArgLocs[ArgIdx++].getLocReg(); 1010 Reg = MF.addLiveIn(Reg, RC); 1011 1012 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1013 Regs.push_back(Copy); 1014 } 1015 1016 // Fill up the missing vector elements 1017 NumElements = Arg.VT.getVectorNumElements() - NumElements; 1018 Regs.append(NumElements, DAG.getUNDEF(VT)); 1019 1020 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs)); 1021 continue; 1022 } 1023 1024 InVals.push_back(Val); 1025 } 1026 1027 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 1028 // these from the dispatch pointer. 1029 1030 // Start adding system SGPRs. 1031 if (Info->hasWorkGroupIDX()) { 1032 unsigned Reg = Info->addWorkGroupIDX(); 1033 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1034 CCInfo.AllocateReg(Reg); 1035 } 1036 1037 if (Info->hasWorkGroupIDY()) { 1038 unsigned Reg = Info->addWorkGroupIDY(); 1039 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1040 CCInfo.AllocateReg(Reg); 1041 } 1042 1043 if (Info->hasWorkGroupIDZ()) { 1044 unsigned Reg = Info->addWorkGroupIDZ(); 1045 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1046 CCInfo.AllocateReg(Reg); 1047 } 1048 1049 if (Info->hasWorkGroupInfo()) { 1050 unsigned Reg = Info->addWorkGroupInfo(); 1051 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1052 CCInfo.AllocateReg(Reg); 1053 } 1054 1055 if (Info->hasPrivateSegmentWaveByteOffset()) { 1056 // Scratch wave offset passed in system SGPR. 1057 unsigned PrivateSegmentWaveByteOffsetReg; 1058 1059 if (AMDGPU::isShader(CallConv)) { 1060 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); 1061 Info->setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); 1062 } else 1063 PrivateSegmentWaveByteOffsetReg = Info->addPrivateSegmentWaveByteOffset(); 1064 1065 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); 1066 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); 1067 } 1068 1069 // Now that we've figured out where the scratch register inputs are, see if 1070 // should reserve the arguments and use them directly. 1071 bool HasStackObjects = MF.getFrameInfo().hasStackObjects(); 1072 // Record that we know we have non-spill stack objects so we don't need to 1073 // check all stack objects later. 1074 if (HasStackObjects) 1075 Info->setHasNonSpillStackObjects(true); 1076 1077 // Everything live out of a block is spilled with fast regalloc, so it's 1078 // almost certain that spilling will be required. 1079 if (getTargetMachine().getOptLevel() == CodeGenOpt::None) 1080 HasStackObjects = true; 1081 1082 if (ST.isAmdCodeObjectV2(MF)) { 1083 if (HasStackObjects) { 1084 // If we have stack objects, we unquestionably need the private buffer 1085 // resource. For the Code Object V2 ABI, this will be the first 4 user 1086 // SGPR inputs. We can reserve those and use them directly. 1087 1088 unsigned PrivateSegmentBufferReg = TRI->getPreloadedValue( 1089 MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); 1090 Info->setScratchRSrcReg(PrivateSegmentBufferReg); 1091 1092 unsigned PrivateSegmentWaveByteOffsetReg = TRI->getPreloadedValue( 1093 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1094 Info->setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg); 1095 } else { 1096 unsigned ReservedBufferReg 1097 = TRI->reservedPrivateSegmentBufferReg(MF); 1098 unsigned ReservedOffsetReg 1099 = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 1100 1101 // We tentatively reserve the last registers (skipping the last two 1102 // which may contain VCC). After register allocation, we'll replace 1103 // these with the ones immediately after those which were really 1104 // allocated. In the prologue copies will be inserted from the argument 1105 // to these reserved registers. 1106 Info->setScratchRSrcReg(ReservedBufferReg); 1107 Info->setScratchWaveOffsetReg(ReservedOffsetReg); 1108 } 1109 } else { 1110 unsigned ReservedBufferReg = TRI->reservedPrivateSegmentBufferReg(MF); 1111 1112 // Without HSA, relocations are used for the scratch pointer and the 1113 // buffer resource setup is always inserted in the prologue. Scratch wave 1114 // offset is still in an input SGPR. 1115 Info->setScratchRSrcReg(ReservedBufferReg); 1116 1117 if (HasStackObjects) { 1118 unsigned ScratchWaveOffsetReg = TRI->getPreloadedValue( 1119 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1120 Info->setScratchWaveOffsetReg(ScratchWaveOffsetReg); 1121 } else { 1122 unsigned ReservedOffsetReg 1123 = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 1124 Info->setScratchWaveOffsetReg(ReservedOffsetReg); 1125 } 1126 } 1127 1128 if (Info->hasWorkItemIDX()) { 1129 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X); 1130 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1131 CCInfo.AllocateReg(Reg); 1132 } 1133 1134 if (Info->hasWorkItemIDY()) { 1135 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y); 1136 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1137 CCInfo.AllocateReg(Reg); 1138 } 1139 1140 if (Info->hasWorkItemIDZ()) { 1141 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z); 1142 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1143 CCInfo.AllocateReg(Reg); 1144 } 1145 1146 if (Chains.empty()) 1147 return Chain; 1148 1149 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 1150 } 1151 1152 SDValue 1153 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1154 bool isVarArg, 1155 const SmallVectorImpl<ISD::OutputArg> &Outs, 1156 const SmallVectorImpl<SDValue> &OutVals, 1157 const SDLoc &DL, SelectionDAG &DAG) const { 1158 MachineFunction &MF = DAG.getMachineFunction(); 1159 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1160 1161 if (!AMDGPU::isShader(CallConv)) 1162 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, 1163 OutVals, DL, DAG); 1164 1165 Info->setIfReturnsVoid(Outs.size() == 0); 1166 1167 SmallVector<ISD::OutputArg, 48> Splits; 1168 SmallVector<SDValue, 48> SplitVals; 1169 1170 // Split vectors into their elements. 1171 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 1172 const ISD::OutputArg &Out = Outs[i]; 1173 1174 if (Out.VT.isVector()) { 1175 MVT VT = Out.VT.getVectorElementType(); 1176 ISD::OutputArg NewOut = Out; 1177 NewOut.Flags.setSplit(); 1178 NewOut.VT = VT; 1179 1180 // We want the original number of vector elements here, e.g. 1181 // three or five, not four or eight. 1182 unsigned NumElements = Out.ArgVT.getVectorNumElements(); 1183 1184 for (unsigned j = 0; j != NumElements; ++j) { 1185 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i], 1186 DAG.getConstant(j, DL, MVT::i32)); 1187 SplitVals.push_back(Elem); 1188 Splits.push_back(NewOut); 1189 NewOut.PartOffset += NewOut.VT.getStoreSize(); 1190 } 1191 } else { 1192 SplitVals.push_back(OutVals[i]); 1193 Splits.push_back(Out); 1194 } 1195 } 1196 1197 // CCValAssign - represent the assignment of the return value to a location. 1198 SmallVector<CCValAssign, 48> RVLocs; 1199 1200 // CCState - Info about the registers and stack slots. 1201 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1202 *DAG.getContext()); 1203 1204 // Analyze outgoing return values. 1205 AnalyzeReturn(CCInfo, Splits); 1206 1207 SDValue Flag; 1208 SmallVector<SDValue, 48> RetOps; 1209 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1210 1211 // Copy the result values into the output registers. 1212 for (unsigned i = 0, realRVLocIdx = 0; 1213 i != RVLocs.size(); 1214 ++i, ++realRVLocIdx) { 1215 CCValAssign &VA = RVLocs[i]; 1216 assert(VA.isRegLoc() && "Can only return in registers!"); 1217 1218 SDValue Arg = SplitVals[realRVLocIdx]; 1219 1220 // Copied from other backends. 1221 switch (VA.getLocInfo()) { 1222 default: llvm_unreachable("Unknown loc info!"); 1223 case CCValAssign::Full: 1224 break; 1225 case CCValAssign::BCvt: 1226 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 1227 break; 1228 } 1229 1230 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 1231 Flag = Chain.getValue(1); 1232 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1233 } 1234 1235 // Update chain and glue. 1236 RetOps[0] = Chain; 1237 if (Flag.getNode()) 1238 RetOps.push_back(Flag); 1239 1240 unsigned Opc = Info->returnsVoid() ? AMDGPUISD::ENDPGM : AMDGPUISD::RETURN; 1241 return DAG.getNode(Opc, DL, MVT::Other, RetOps); 1242 } 1243 1244 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT, 1245 SelectionDAG &DAG) const { 1246 unsigned Reg = StringSwitch<unsigned>(RegName) 1247 .Case("m0", AMDGPU::M0) 1248 .Case("exec", AMDGPU::EXEC) 1249 .Case("exec_lo", AMDGPU::EXEC_LO) 1250 .Case("exec_hi", AMDGPU::EXEC_HI) 1251 .Case("flat_scratch", AMDGPU::FLAT_SCR) 1252 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) 1253 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) 1254 .Default(AMDGPU::NoRegister); 1255 1256 if (Reg == AMDGPU::NoRegister) { 1257 report_fatal_error(Twine("invalid register name \"" 1258 + StringRef(RegName) + "\".")); 1259 1260 } 1261 1262 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 1263 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { 1264 report_fatal_error(Twine("invalid register \"" 1265 + StringRef(RegName) + "\" for subtarget.")); 1266 } 1267 1268 switch (Reg) { 1269 case AMDGPU::M0: 1270 case AMDGPU::EXEC_LO: 1271 case AMDGPU::EXEC_HI: 1272 case AMDGPU::FLAT_SCR_LO: 1273 case AMDGPU::FLAT_SCR_HI: 1274 if (VT.getSizeInBits() == 32) 1275 return Reg; 1276 break; 1277 case AMDGPU::EXEC: 1278 case AMDGPU::FLAT_SCR: 1279 if (VT.getSizeInBits() == 64) 1280 return Reg; 1281 break; 1282 default: 1283 llvm_unreachable("missing register type checking"); 1284 } 1285 1286 report_fatal_error(Twine("invalid type for register \"" 1287 + StringRef(RegName) + "\".")); 1288 } 1289 1290 // If kill is not the last instruction, split the block so kill is always a 1291 // proper terminator. 1292 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI, 1293 MachineBasicBlock *BB) const { 1294 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1295 1296 MachineBasicBlock::iterator SplitPoint(&MI); 1297 ++SplitPoint; 1298 1299 if (SplitPoint == BB->end()) { 1300 // Don't bother with a new block. 1301 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR)); 1302 return BB; 1303 } 1304 1305 MachineFunction *MF = BB->getParent(); 1306 MachineBasicBlock *SplitBB 1307 = MF->CreateMachineBasicBlock(BB->getBasicBlock()); 1308 1309 MF->insert(++MachineFunction::iterator(BB), SplitBB); 1310 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end()); 1311 1312 SplitBB->transferSuccessorsAndUpdatePHIs(BB); 1313 BB->addSuccessor(SplitBB); 1314 1315 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR)); 1316 return SplitBB; 1317 } 1318 1319 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the 1320 // wavefront. If the value is uniform and just happens to be in a VGPR, this 1321 // will only do one iteration. In the worst case, this will loop 64 times. 1322 // 1323 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. 1324 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop( 1325 const SIInstrInfo *TII, 1326 MachineRegisterInfo &MRI, 1327 MachineBasicBlock &OrigBB, 1328 MachineBasicBlock &LoopBB, 1329 const DebugLoc &DL, 1330 const MachineOperand &IdxReg, 1331 unsigned InitReg, 1332 unsigned ResultReg, 1333 unsigned PhiReg, 1334 unsigned InitSaveExecReg, 1335 int Offset, 1336 bool UseGPRIdxMode) { 1337 MachineBasicBlock::iterator I = LoopBB.begin(); 1338 1339 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1340 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1341 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1342 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1343 1344 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) 1345 .addReg(InitReg) 1346 .addMBB(&OrigBB) 1347 .addReg(ResultReg) 1348 .addMBB(&LoopBB); 1349 1350 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) 1351 .addReg(InitSaveExecReg) 1352 .addMBB(&OrigBB) 1353 .addReg(NewExec) 1354 .addMBB(&LoopBB); 1355 1356 // Read the next variant <- also loop target. 1357 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) 1358 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef())); 1359 1360 // Compare the just read M0 value to all possible Idx values. 1361 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) 1362 .addReg(CurrentIdxReg) 1363 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg()); 1364 1365 if (UseGPRIdxMode) { 1366 unsigned IdxReg; 1367 if (Offset == 0) { 1368 IdxReg = CurrentIdxReg; 1369 } else { 1370 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1371 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg) 1372 .addReg(CurrentIdxReg, RegState::Kill) 1373 .addImm(Offset); 1374 } 1375 1376 MachineInstr *SetIdx = 1377 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_IDX)) 1378 .addReg(IdxReg, RegState::Kill); 1379 SetIdx->getOperand(2).setIsUndef(); 1380 } else { 1381 // Move index from VCC into M0 1382 if (Offset == 0) { 1383 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1384 .addReg(CurrentIdxReg, RegState::Kill); 1385 } else { 1386 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 1387 .addReg(CurrentIdxReg, RegState::Kill) 1388 .addImm(Offset); 1389 } 1390 } 1391 1392 // Update EXEC, save the original EXEC value to VCC. 1393 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec) 1394 .addReg(CondReg, RegState::Kill); 1395 1396 MRI.setSimpleHint(NewExec, CondReg); 1397 1398 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 1399 MachineInstr *InsertPt = 1400 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 1401 .addReg(AMDGPU::EXEC) 1402 .addReg(NewExec); 1403 1404 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use 1405 // s_cbranch_scc0? 1406 1407 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. 1408 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 1409 .addMBB(&LoopBB); 1410 1411 return InsertPt->getIterator(); 1412 } 1413 1414 // This has slightly sub-optimal regalloc when the source vector is killed by 1415 // the read. The register allocator does not understand that the kill is 1416 // per-workitem, so is kept alive for the whole loop so we end up not re-using a 1417 // subregister from it, using 1 more VGPR than necessary. This was saved when 1418 // this was expanded after register allocation. 1419 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII, 1420 MachineBasicBlock &MBB, 1421 MachineInstr &MI, 1422 unsigned InitResultReg, 1423 unsigned PhiReg, 1424 int Offset, 1425 bool UseGPRIdxMode) { 1426 MachineFunction *MF = MBB.getParent(); 1427 MachineRegisterInfo &MRI = MF->getRegInfo(); 1428 const DebugLoc &DL = MI.getDebugLoc(); 1429 MachineBasicBlock::iterator I(&MI); 1430 1431 unsigned DstReg = MI.getOperand(0).getReg(); 1432 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1433 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1434 1435 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); 1436 1437 // Save the EXEC mask 1438 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec) 1439 .addReg(AMDGPU::EXEC); 1440 1441 // To insert the loop we need to split the block. Move everything after this 1442 // point to a new block, and insert a new empty block between the two. 1443 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); 1444 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); 1445 MachineFunction::iterator MBBI(MBB); 1446 ++MBBI; 1447 1448 MF->insert(MBBI, LoopBB); 1449 MF->insert(MBBI, RemainderBB); 1450 1451 LoopBB->addSuccessor(LoopBB); 1452 LoopBB->addSuccessor(RemainderBB); 1453 1454 // Move the rest of the block into a new block. 1455 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 1456 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 1457 1458 MBB.addSuccessor(LoopBB); 1459 1460 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1461 1462 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, 1463 InitResultReg, DstReg, PhiReg, TmpExec, 1464 Offset, UseGPRIdxMode); 1465 1466 MachineBasicBlock::iterator First = RemainderBB->begin(); 1467 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 1468 .addReg(SaveExec); 1469 1470 return InsPt; 1471 } 1472 1473 // Returns subreg index, offset 1474 static std::pair<unsigned, int> 1475 computeIndirectRegAndOffset(const SIRegisterInfo &TRI, 1476 const TargetRegisterClass *SuperRC, 1477 unsigned VecReg, 1478 int Offset) { 1479 int NumElts = SuperRC->getSize() / 4; 1480 1481 // Skip out of bounds offsets, or else we would end up using an undefined 1482 // register. 1483 if (Offset >= NumElts || Offset < 0) 1484 return std::make_pair(AMDGPU::sub0, Offset); 1485 1486 return std::make_pair(AMDGPU::sub0 + Offset, 0); 1487 } 1488 1489 // Return true if the index is an SGPR and was set. 1490 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, 1491 MachineRegisterInfo &MRI, 1492 MachineInstr &MI, 1493 int Offset, 1494 bool UseGPRIdxMode, 1495 bool IsIndirectSrc) { 1496 MachineBasicBlock *MBB = MI.getParent(); 1497 const DebugLoc &DL = MI.getDebugLoc(); 1498 MachineBasicBlock::iterator I(&MI); 1499 1500 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1501 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); 1502 1503 assert(Idx->getReg() != AMDGPU::NoRegister); 1504 1505 if (!TII->getRegisterInfo().isSGPRClass(IdxRC)) 1506 return false; 1507 1508 if (UseGPRIdxMode) { 1509 unsigned IdxMode = IsIndirectSrc ? 1510 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE; 1511 if (Offset == 0) { 1512 MachineInstr *SetOn = 1513 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1514 .add(*Idx) 1515 .addImm(IdxMode); 1516 1517 SetOn->getOperand(3).setIsUndef(); 1518 } else { 1519 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 1520 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) 1521 .add(*Idx) 1522 .addImm(Offset); 1523 MachineInstr *SetOn = 1524 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1525 .addReg(Tmp, RegState::Kill) 1526 .addImm(IdxMode); 1527 1528 SetOn->getOperand(3).setIsUndef(); 1529 } 1530 1531 return true; 1532 } 1533 1534 if (Offset == 0) { 1535 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0).add(*Idx); 1536 } else { 1537 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 1538 .add(*Idx) 1539 .addImm(Offset); 1540 } 1541 1542 return true; 1543 } 1544 1545 // Control flow needs to be inserted if indexing with a VGPR. 1546 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, 1547 MachineBasicBlock &MBB, 1548 const SISubtarget &ST) { 1549 const SIInstrInfo *TII = ST.getInstrInfo(); 1550 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1551 MachineFunction *MF = MBB.getParent(); 1552 MachineRegisterInfo &MRI = MF->getRegInfo(); 1553 1554 unsigned Dst = MI.getOperand(0).getReg(); 1555 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); 1556 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 1557 1558 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); 1559 1560 unsigned SubReg; 1561 std::tie(SubReg, Offset) 1562 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); 1563 1564 bool UseGPRIdxMode = ST.hasVGPRIndexMode() && EnableVGPRIndexMode; 1565 1566 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) { 1567 MachineBasicBlock::iterator I(&MI); 1568 const DebugLoc &DL = MI.getDebugLoc(); 1569 1570 if (UseGPRIdxMode) { 1571 // TODO: Look at the uses to avoid the copy. This may require rescheduling 1572 // to avoid interfering with other uses, so probably requires a new 1573 // optimization pass. 1574 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 1575 .addReg(SrcReg, RegState::Undef, SubReg) 1576 .addReg(SrcReg, RegState::Implicit) 1577 .addReg(AMDGPU::M0, RegState::Implicit); 1578 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1579 } else { 1580 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 1581 .addReg(SrcReg, RegState::Undef, SubReg) 1582 .addReg(SrcReg, RegState::Implicit); 1583 } 1584 1585 MI.eraseFromParent(); 1586 1587 return &MBB; 1588 } 1589 1590 const DebugLoc &DL = MI.getDebugLoc(); 1591 MachineBasicBlock::iterator I(&MI); 1592 1593 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1594 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1595 1596 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); 1597 1598 if (UseGPRIdxMode) { 1599 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1600 .addImm(0) // Reset inside loop. 1601 .addImm(VGPRIndexMode::SRC0_ENABLE); 1602 SetOn->getOperand(3).setIsUndef(); 1603 1604 // Disable again after the loop. 1605 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1606 } 1607 1608 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, UseGPRIdxMode); 1609 MachineBasicBlock *LoopBB = InsPt->getParent(); 1610 1611 if (UseGPRIdxMode) { 1612 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 1613 .addReg(SrcReg, RegState::Undef, SubReg) 1614 .addReg(SrcReg, RegState::Implicit) 1615 .addReg(AMDGPU::M0, RegState::Implicit); 1616 } else { 1617 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 1618 .addReg(SrcReg, RegState::Undef, SubReg) 1619 .addReg(SrcReg, RegState::Implicit); 1620 } 1621 1622 MI.eraseFromParent(); 1623 1624 return LoopBB; 1625 } 1626 1627 static unsigned getMOVRELDPseudo(const TargetRegisterClass *VecRC) { 1628 switch (VecRC->getSize()) { 1629 case 4: 1630 return AMDGPU::V_MOVRELD_B32_V1; 1631 case 8: 1632 return AMDGPU::V_MOVRELD_B32_V2; 1633 case 16: 1634 return AMDGPU::V_MOVRELD_B32_V4; 1635 case 32: 1636 return AMDGPU::V_MOVRELD_B32_V8; 1637 case 64: 1638 return AMDGPU::V_MOVRELD_B32_V16; 1639 default: 1640 llvm_unreachable("unsupported size for MOVRELD pseudos"); 1641 } 1642 } 1643 1644 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, 1645 MachineBasicBlock &MBB, 1646 const SISubtarget &ST) { 1647 const SIInstrInfo *TII = ST.getInstrInfo(); 1648 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1649 MachineFunction *MF = MBB.getParent(); 1650 MachineRegisterInfo &MRI = MF->getRegInfo(); 1651 1652 unsigned Dst = MI.getOperand(0).getReg(); 1653 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); 1654 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1655 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); 1656 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 1657 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); 1658 1659 // This can be an immediate, but will be folded later. 1660 assert(Val->getReg()); 1661 1662 unsigned SubReg; 1663 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, 1664 SrcVec->getReg(), 1665 Offset); 1666 bool UseGPRIdxMode = ST.hasVGPRIndexMode() && EnableVGPRIndexMode; 1667 1668 if (Idx->getReg() == AMDGPU::NoRegister) { 1669 MachineBasicBlock::iterator I(&MI); 1670 const DebugLoc &DL = MI.getDebugLoc(); 1671 1672 assert(Offset == 0); 1673 1674 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) 1675 .add(*SrcVec) 1676 .add(*Val) 1677 .addImm(SubReg); 1678 1679 MI.eraseFromParent(); 1680 return &MBB; 1681 } 1682 1683 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) { 1684 MachineBasicBlock::iterator I(&MI); 1685 const DebugLoc &DL = MI.getDebugLoc(); 1686 1687 if (UseGPRIdxMode) { 1688 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 1689 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst 1690 .add(*Val) 1691 .addReg(Dst, RegState::ImplicitDefine) 1692 .addReg(SrcVec->getReg(), RegState::Implicit) 1693 .addReg(AMDGPU::M0, RegState::Implicit); 1694 1695 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1696 } else { 1697 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC)); 1698 1699 BuildMI(MBB, I, DL, MovRelDesc) 1700 .addReg(Dst, RegState::Define) 1701 .addReg(SrcVec->getReg()) 1702 .add(*Val) 1703 .addImm(SubReg - AMDGPU::sub0); 1704 } 1705 1706 MI.eraseFromParent(); 1707 return &MBB; 1708 } 1709 1710 if (Val->isReg()) 1711 MRI.clearKillFlags(Val->getReg()); 1712 1713 const DebugLoc &DL = MI.getDebugLoc(); 1714 1715 if (UseGPRIdxMode) { 1716 MachineBasicBlock::iterator I(&MI); 1717 1718 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1719 .addImm(0) // Reset inside loop. 1720 .addImm(VGPRIndexMode::DST_ENABLE); 1721 SetOn->getOperand(3).setIsUndef(); 1722 1723 // Disable again after the loop. 1724 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1725 } 1726 1727 unsigned PhiReg = MRI.createVirtualRegister(VecRC); 1728 1729 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, 1730 Offset, UseGPRIdxMode); 1731 MachineBasicBlock *LoopBB = InsPt->getParent(); 1732 1733 if (UseGPRIdxMode) { 1734 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 1735 .addReg(PhiReg, RegState::Undef, SubReg) // vdst 1736 .add(*Val) // src0 1737 .addReg(Dst, RegState::ImplicitDefine) 1738 .addReg(PhiReg, RegState::Implicit) 1739 .addReg(AMDGPU::M0, RegState::Implicit); 1740 } else { 1741 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC)); 1742 1743 BuildMI(*LoopBB, InsPt, DL, MovRelDesc) 1744 .addReg(Dst, RegState::Define) 1745 .addReg(PhiReg) 1746 .add(*Val) 1747 .addImm(SubReg - AMDGPU::sub0); 1748 } 1749 1750 MI.eraseFromParent(); 1751 1752 return LoopBB; 1753 } 1754 1755 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( 1756 MachineInstr &MI, MachineBasicBlock *BB) const { 1757 1758 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1759 MachineFunction *MF = BB->getParent(); 1760 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1761 1762 if (TII->isMIMG(MI)) { 1763 if (!MI.memoperands_empty()) 1764 return BB; 1765 // Add a memoperand for mimg instructions so that they aren't assumed to 1766 // be ordered memory instuctions. 1767 1768 MachinePointerInfo PtrInfo(MFI->getImagePSV()); 1769 MachineMemOperand::Flags Flags = MachineMemOperand::MODereferenceable; 1770 if (MI.mayStore()) 1771 Flags |= MachineMemOperand::MOStore; 1772 1773 if (MI.mayLoad()) 1774 Flags |= MachineMemOperand::MOLoad; 1775 1776 auto MMO = MF->getMachineMemOperand(PtrInfo, Flags, 0, 0); 1777 MI.addMemOperand(*MF, MMO); 1778 return BB; 1779 } 1780 1781 switch (MI.getOpcode()) { 1782 case AMDGPU::S_TRAP_PSEUDO: { 1783 DebugLoc DL = MI.getDebugLoc(); 1784 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), AMDGPU::VGPR0) 1785 .addImm(1); 1786 1787 MachineFunction *MF = BB->getParent(); 1788 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1789 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 1790 assert(UserSGPR != AMDGPU::NoRegister); 1791 1792 if (!BB->isLiveIn(UserSGPR)) 1793 BB->addLiveIn(UserSGPR); 1794 1795 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), AMDGPU::SGPR0_SGPR1) 1796 .addReg(UserSGPR); 1797 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_TRAP)).addImm(0x1) 1798 .addReg(AMDGPU::VGPR0, RegState::Implicit) 1799 .addReg(AMDGPU::SGPR0_SGPR1, RegState::Implicit); 1800 1801 MI.eraseFromParent(); 1802 return BB; 1803 } 1804 1805 case AMDGPU::SI_INIT_M0: 1806 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), 1807 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1808 .add(MI.getOperand(0)); 1809 MI.eraseFromParent(); 1810 return BB; 1811 1812 case AMDGPU::GET_GROUPSTATICSIZE: { 1813 DebugLoc DL = MI.getDebugLoc(); 1814 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) 1815 .add(MI.getOperand(0)) 1816 .addImm(MFI->getLDSSize()); 1817 MI.eraseFromParent(); 1818 return BB; 1819 } 1820 case AMDGPU::SI_INDIRECT_SRC_V1: 1821 case AMDGPU::SI_INDIRECT_SRC_V2: 1822 case AMDGPU::SI_INDIRECT_SRC_V4: 1823 case AMDGPU::SI_INDIRECT_SRC_V8: 1824 case AMDGPU::SI_INDIRECT_SRC_V16: 1825 return emitIndirectSrc(MI, *BB, *getSubtarget()); 1826 case AMDGPU::SI_INDIRECT_DST_V1: 1827 case AMDGPU::SI_INDIRECT_DST_V2: 1828 case AMDGPU::SI_INDIRECT_DST_V4: 1829 case AMDGPU::SI_INDIRECT_DST_V8: 1830 case AMDGPU::SI_INDIRECT_DST_V16: 1831 return emitIndirectDst(MI, *BB, *getSubtarget()); 1832 case AMDGPU::SI_KILL: 1833 return splitKillBlock(MI, BB); 1834 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 1835 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 1836 1837 unsigned Dst = MI.getOperand(0).getReg(); 1838 unsigned Src0 = MI.getOperand(1).getReg(); 1839 unsigned Src1 = MI.getOperand(2).getReg(); 1840 const DebugLoc &DL = MI.getDebugLoc(); 1841 unsigned SrcCond = MI.getOperand(3).getReg(); 1842 1843 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1844 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1845 1846 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 1847 .addReg(Src0, 0, AMDGPU::sub0) 1848 .addReg(Src1, 0, AMDGPU::sub0) 1849 .addReg(SrcCond); 1850 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 1851 .addReg(Src0, 0, AMDGPU::sub1) 1852 .addReg(Src1, 0, AMDGPU::sub1) 1853 .addReg(SrcCond); 1854 1855 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) 1856 .addReg(DstLo) 1857 .addImm(AMDGPU::sub0) 1858 .addReg(DstHi) 1859 .addImm(AMDGPU::sub1); 1860 MI.eraseFromParent(); 1861 return BB; 1862 } 1863 case AMDGPU::SI_BR_UNDEF: { 1864 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1865 const DebugLoc &DL = MI.getDebugLoc(); 1866 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) 1867 .add(MI.getOperand(0)); 1868 Br->getOperand(1).setIsUndef(true); // read undef SCC 1869 MI.eraseFromParent(); 1870 return BB; 1871 } 1872 default: 1873 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 1874 } 1875 } 1876 1877 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1878 // This currently forces unfolding various combinations of fsub into fma with 1879 // free fneg'd operands. As long as we have fast FMA (controlled by 1880 // isFMAFasterThanFMulAndFAdd), we should perform these. 1881 1882 // When fma is quarter rate, for f64 where add / sub are at best half rate, 1883 // most of these combines appear to be cycle neutral but save on instruction 1884 // count / code size. 1885 return true; 1886 } 1887 1888 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, 1889 EVT VT) const { 1890 if (!VT.isVector()) { 1891 return MVT::i1; 1892 } 1893 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); 1894 } 1895 1896 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { 1897 // TODO: Should i16 be used always if legal? For now it would force VALU 1898 // shifts. 1899 return (VT == MVT::i16) ? MVT::i16 : MVT::i32; 1900 } 1901 1902 // Answering this is somewhat tricky and depends on the specific device which 1903 // have different rates for fma or all f64 operations. 1904 // 1905 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other 1906 // regardless of which device (although the number of cycles differs between 1907 // devices), so it is always profitable for f64. 1908 // 1909 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable 1910 // only on full rate devices. Normally, we should prefer selecting v_mad_f32 1911 // which we can always do even without fused FP ops since it returns the same 1912 // result as the separate operations and since it is always full 1913 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 1914 // however does not support denormals, so we do report fma as faster if we have 1915 // a fast fma device and require denormals. 1916 // 1917 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 1918 VT = VT.getScalarType(); 1919 1920 switch (VT.getSimpleVT().SimpleTy) { 1921 case MVT::f32: 1922 // This is as fast on some subtargets. However, we always have full rate f32 1923 // mad available which returns the same result as the separate operations 1924 // which we should prefer over fma. We can't use this if we want to support 1925 // denormals, so only report this in these cases. 1926 return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32(); 1927 case MVT::f64: 1928 return true; 1929 case MVT::f16: 1930 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals(); 1931 default: 1932 break; 1933 } 1934 1935 return false; 1936 } 1937 1938 //===----------------------------------------------------------------------===// 1939 // Custom DAG Lowering Operations 1940 //===----------------------------------------------------------------------===// 1941 1942 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 1943 switch (Op.getOpcode()) { 1944 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 1945 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 1946 case ISD::LOAD: { 1947 SDValue Result = LowerLOAD(Op, DAG); 1948 assert((!Result.getNode() || 1949 Result.getNode()->getNumValues() == 2) && 1950 "Load should return a value and a chain"); 1951 return Result; 1952 } 1953 1954 case ISD::FSIN: 1955 case ISD::FCOS: 1956 return LowerTrig(Op, DAG); 1957 case ISD::SELECT: return LowerSELECT(Op, DAG); 1958 case ISD::FDIV: return LowerFDIV(Op, DAG); 1959 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); 1960 case ISD::STORE: return LowerSTORE(Op, DAG); 1961 case ISD::GlobalAddress: { 1962 MachineFunction &MF = DAG.getMachineFunction(); 1963 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1964 return LowerGlobalAddress(MFI, Op, DAG); 1965 } 1966 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 1967 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 1968 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 1969 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); 1970 case ISD::INSERT_VECTOR_ELT: 1971 return lowerINSERT_VECTOR_ELT(Op, DAG); 1972 case ISD::EXTRACT_VECTOR_ELT: 1973 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 1974 case ISD::FP_ROUND: 1975 return lowerFP_ROUND(Op, DAG); 1976 } 1977 return SDValue(); 1978 } 1979 1980 void SITargetLowering::ReplaceNodeResults(SDNode *N, 1981 SmallVectorImpl<SDValue> &Results, 1982 SelectionDAG &DAG) const { 1983 switch (N->getOpcode()) { 1984 case ISD::INSERT_VECTOR_ELT: { 1985 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) 1986 Results.push_back(Res); 1987 return; 1988 } 1989 case ISD::EXTRACT_VECTOR_ELT: { 1990 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) 1991 Results.push_back(Res); 1992 return; 1993 } 1994 default: 1995 break; 1996 } 1997 } 1998 1999 /// \brief Helper function for LowerBRCOND 2000 static SDNode *findUser(SDValue Value, unsigned Opcode) { 2001 2002 SDNode *Parent = Value.getNode(); 2003 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); 2004 I != E; ++I) { 2005 2006 if (I.getUse().get() != Value) 2007 continue; 2008 2009 if (I->getOpcode() == Opcode) 2010 return *I; 2011 } 2012 return nullptr; 2013 } 2014 2015 bool SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { 2016 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 2017 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { 2018 case AMDGPUIntrinsic::amdgcn_if: 2019 case AMDGPUIntrinsic::amdgcn_else: 2020 case AMDGPUIntrinsic::amdgcn_end_cf: 2021 case AMDGPUIntrinsic::amdgcn_loop: 2022 return true; 2023 default: 2024 return false; 2025 } 2026 } 2027 2028 if (Intr->getOpcode() == ISD::INTRINSIC_WO_CHAIN) { 2029 switch (cast<ConstantSDNode>(Intr->getOperand(0))->getZExtValue()) { 2030 case AMDGPUIntrinsic::amdgcn_break: 2031 case AMDGPUIntrinsic::amdgcn_if_break: 2032 case AMDGPUIntrinsic::amdgcn_else_break: 2033 return true; 2034 default: 2035 return false; 2036 } 2037 } 2038 2039 return false; 2040 } 2041 2042 void SITargetLowering::createDebuggerPrologueStackObjects( 2043 MachineFunction &MF) const { 2044 // Create stack objects that are used for emitting debugger prologue. 2045 // 2046 // Debugger prologue writes work group IDs and work item IDs to scratch memory 2047 // at fixed location in the following format: 2048 // offset 0: work group ID x 2049 // offset 4: work group ID y 2050 // offset 8: work group ID z 2051 // offset 16: work item ID x 2052 // offset 20: work item ID y 2053 // offset 24: work item ID z 2054 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2055 int ObjectIdx = 0; 2056 2057 // For each dimension: 2058 for (unsigned i = 0; i < 3; ++i) { 2059 // Create fixed stack object for work group ID. 2060 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true); 2061 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx); 2062 // Create fixed stack object for work item ID. 2063 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true); 2064 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx); 2065 } 2066 } 2067 2068 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { 2069 const Triple &TT = getTargetMachine().getTargetTriple(); 2070 return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS && 2071 AMDGPU::shouldEmitConstantsToTextSection(TT); 2072 } 2073 2074 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { 2075 return (GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || 2076 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) && 2077 !shouldEmitFixup(GV) && 2078 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 2079 } 2080 2081 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { 2082 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); 2083 } 2084 2085 /// This transforms the control flow intrinsics to get the branch destination as 2086 /// last parameter, also switches branch target with BR if the need arise 2087 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, 2088 SelectionDAG &DAG) const { 2089 SDLoc DL(BRCOND); 2090 2091 SDNode *Intr = BRCOND.getOperand(1).getNode(); 2092 SDValue Target = BRCOND.getOperand(2); 2093 SDNode *BR = nullptr; 2094 SDNode *SetCC = nullptr; 2095 2096 if (Intr->getOpcode() == ISD::SETCC) { 2097 // As long as we negate the condition everything is fine 2098 SetCC = Intr; 2099 Intr = SetCC->getOperand(0).getNode(); 2100 2101 } else { 2102 // Get the target from BR if we don't negate the condition 2103 BR = findUser(BRCOND, ISD::BR); 2104 Target = BR->getOperand(1); 2105 } 2106 2107 // FIXME: This changes the types of the intrinsics instead of introducing new 2108 // nodes with the correct types. 2109 // e.g. llvm.amdgcn.loop 2110 2111 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3 2112 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088> 2113 2114 if (!isCFIntrinsic(Intr)) { 2115 // This is a uniform branch so we don't need to legalize. 2116 return BRCOND; 2117 } 2118 2119 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || 2120 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; 2121 2122 assert(!SetCC || 2123 (SetCC->getConstantOperandVal(1) == 1 && 2124 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == 2125 ISD::SETNE)); 2126 2127 // operands of the new intrinsic call 2128 SmallVector<SDValue, 4> Ops; 2129 if (HaveChain) 2130 Ops.push_back(BRCOND.getOperand(0)); 2131 2132 Ops.append(Intr->op_begin() + (HaveChain ? 1 : 0), Intr->op_end()); 2133 Ops.push_back(Target); 2134 2135 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); 2136 2137 // build the new intrinsic call 2138 SDNode *Result = DAG.getNode( 2139 Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL, 2140 DAG.getVTList(Res), Ops).getNode(); 2141 2142 if (!HaveChain) { 2143 SDValue Ops[] = { 2144 SDValue(Result, 0), 2145 BRCOND.getOperand(0) 2146 }; 2147 2148 Result = DAG.getMergeValues(Ops, DL).getNode(); 2149 } 2150 2151 if (BR) { 2152 // Give the branch instruction our target 2153 SDValue Ops[] = { 2154 BR->getOperand(0), 2155 BRCOND.getOperand(2) 2156 }; 2157 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); 2158 DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); 2159 BR = NewBR.getNode(); 2160 } 2161 2162 SDValue Chain = SDValue(Result, Result->getNumValues() - 1); 2163 2164 // Copy the intrinsic results to registers 2165 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { 2166 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); 2167 if (!CopyToReg) 2168 continue; 2169 2170 Chain = DAG.getCopyToReg( 2171 Chain, DL, 2172 CopyToReg->getOperand(1), 2173 SDValue(Result, i - 1), 2174 SDValue()); 2175 2176 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); 2177 } 2178 2179 // Remove the old intrinsic from the chain 2180 DAG.ReplaceAllUsesOfValueWith( 2181 SDValue(Intr, Intr->getNumValues() - 1), 2182 Intr->getOperand(0)); 2183 2184 return Chain; 2185 } 2186 2187 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG, 2188 SDValue Op, 2189 const SDLoc &DL, 2190 EVT VT) const { 2191 return Op.getValueType().bitsLE(VT) ? 2192 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : 2193 DAG.getNode(ISD::FTRUNC, DL, VT, Op); 2194 } 2195 2196 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 2197 assert(Op.getValueType() == MVT::f16 && 2198 "Do not know how to custom lower FP_ROUND for non-f16 type"); 2199 2200 SDValue Src = Op.getOperand(0); 2201 EVT SrcVT = Src.getValueType(); 2202 if (SrcVT != MVT::f64) 2203 return Op; 2204 2205 SDLoc DL(Op); 2206 2207 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); 2208 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); 2209 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);; 2210 } 2211 2212 SDValue SITargetLowering::getSegmentAperture(unsigned AS, 2213 SelectionDAG &DAG) const { 2214 SDLoc SL; 2215 MachineFunction &MF = DAG.getMachineFunction(); 2216 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2217 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 2218 assert(UserSGPR != AMDGPU::NoRegister); 2219 2220 SDValue QueuePtr = CreateLiveInRegister( 2221 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 2222 2223 // Offset into amd_queue_t for group_segment_aperture_base_hi / 2224 // private_segment_aperture_base_hi. 2225 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44; 2226 2227 SDValue Ptr = DAG.getNode(ISD::ADD, SL, MVT::i64, QueuePtr, 2228 DAG.getConstant(StructOffset, SL, MVT::i64)); 2229 2230 // TODO: Use custom target PseudoSourceValue. 2231 // TODO: We should use the value from the IR intrinsic call, but it might not 2232 // be available and how do we get it? 2233 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()), 2234 AMDGPUAS::CONSTANT_ADDRESS)); 2235 2236 MachinePointerInfo PtrInfo(V, StructOffset); 2237 return DAG.getLoad(MVT::i32, SL, QueuePtr.getValue(1), Ptr, PtrInfo, 2238 MinAlign(64, StructOffset), 2239 MachineMemOperand::MODereferenceable | 2240 MachineMemOperand::MOInvariant); 2241 } 2242 2243 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, 2244 SelectionDAG &DAG) const { 2245 SDLoc SL(Op); 2246 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); 2247 2248 SDValue Src = ASC->getOperand(0); 2249 2250 // FIXME: Really support non-0 null pointers. 2251 SDValue SegmentNullPtr = DAG.getConstant(-1, SL, MVT::i32); 2252 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); 2253 2254 // flat -> local/private 2255 if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 2256 if (ASC->getDestAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 2257 ASC->getDestAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { 2258 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); 2259 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); 2260 2261 return DAG.getNode(ISD::SELECT, SL, MVT::i32, 2262 NonNull, Ptr, SegmentNullPtr); 2263 } 2264 } 2265 2266 // local/private -> flat 2267 if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 2268 if (ASC->getSrcAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 2269 ASC->getSrcAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { 2270 SDValue NonNull 2271 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); 2272 2273 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), DAG); 2274 SDValue CvtPtr 2275 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); 2276 2277 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, 2278 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), 2279 FlatNullPtr); 2280 } 2281 } 2282 2283 // global <-> flat are no-ops and never emitted. 2284 2285 const MachineFunction &MF = DAG.getMachineFunction(); 2286 DiagnosticInfoUnsupported InvalidAddrSpaceCast( 2287 *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); 2288 DAG.getContext()->diagnose(InvalidAddrSpaceCast); 2289 2290 return DAG.getUNDEF(ASC->getValueType(0)); 2291 } 2292 2293 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 2294 SelectionDAG &DAG) const { 2295 SDValue Idx = Op.getOperand(2); 2296 if (isa<ConstantSDNode>(Idx)) 2297 return SDValue(); 2298 2299 // Avoid stack access for dynamic indexing. 2300 SDLoc SL(Op); 2301 SDValue Vec = Op.getOperand(0); 2302 SDValue Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Op.getOperand(1)); 2303 2304 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec 2305 SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Val); 2306 2307 // Convert vector index to bit-index. 2308 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, 2309 DAG.getConstant(16, SL, MVT::i32)); 2310 2311 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 2312 2313 SDValue BFM = DAG.getNode(ISD::SHL, SL, MVT::i32, 2314 DAG.getConstant(0xffff, SL, MVT::i32), 2315 ScaledIdx); 2316 2317 SDValue LHS = DAG.getNode(ISD::AND, SL, MVT::i32, BFM, ExtVal); 2318 SDValue RHS = DAG.getNode(ISD::AND, SL, MVT::i32, 2319 DAG.getNOT(SL, BFM, MVT::i32), BCVec); 2320 2321 SDValue BFI = DAG.getNode(ISD::OR, SL, MVT::i32, LHS, RHS); 2322 return DAG.getNode(ISD::BITCAST, SL, Op.getValueType(), BFI); 2323 } 2324 2325 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 2326 SelectionDAG &DAG) const { 2327 SDLoc SL(Op); 2328 2329 EVT ResultVT = Op.getValueType(); 2330 SDValue Vec = Op.getOperand(0); 2331 SDValue Idx = Op.getOperand(1); 2332 2333 if (const ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) { 2334 SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 2335 2336 if (CIdx->getZExtValue() == 1) { 2337 Result = DAG.getNode(ISD::SRL, SL, MVT::i32, Result, 2338 DAG.getConstant(16, SL, MVT::i32)); 2339 } else { 2340 assert(CIdx->getZExtValue() == 0); 2341 } 2342 2343 if (ResultVT.bitsLT(MVT::i32)) 2344 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result); 2345 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 2346 } 2347 2348 SDValue Sixteen = DAG.getConstant(16, SL, MVT::i32); 2349 2350 // Convert vector index to bit-index. 2351 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, Sixteen); 2352 2353 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 2354 SDValue Elt = DAG.getNode(ISD::SRL, SL, MVT::i32, BC, ScaledIdx); 2355 2356 SDValue Result = Elt; 2357 if (ResultVT.bitsLT(MVT::i32)) 2358 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result); 2359 2360 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 2361 } 2362 2363 bool 2364 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 2365 // We can fold offsets for anything that doesn't require a GOT relocation. 2366 return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || 2367 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) && 2368 !shouldEmitGOTReloc(GA->getGlobal()); 2369 } 2370 2371 static SDValue 2372 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, 2373 const SDLoc &DL, unsigned Offset, EVT PtrVT, 2374 unsigned GAFlags = SIInstrInfo::MO_NONE) { 2375 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is 2376 // lowered to the following code sequence: 2377 // 2378 // For constant address space: 2379 // s_getpc_b64 s[0:1] 2380 // s_add_u32 s0, s0, $symbol 2381 // s_addc_u32 s1, s1, 0 2382 // 2383 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 2384 // a fixup or relocation is emitted to replace $symbol with a literal 2385 // constant, which is a pc-relative offset from the encoding of the $symbol 2386 // operand to the global variable. 2387 // 2388 // For global address space: 2389 // s_getpc_b64 s[0:1] 2390 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo 2391 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi 2392 // 2393 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 2394 // fixups or relocations are emitted to replace $symbol@*@lo and 2395 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, 2396 // which is a 64-bit pc-relative offset from the encoding of the $symbol 2397 // operand to the global variable. 2398 // 2399 // What we want here is an offset from the value returned by s_getpc 2400 // (which is the address of the s_add_u32 instruction) to the global 2401 // variable, but since the encoding of $symbol starts 4 bytes after the start 2402 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too 2403 // small. This requires us to add 4 to the global variable offset in order to 2404 // compute the correct address. 2405 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 2406 GAFlags); 2407 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 2408 GAFlags == SIInstrInfo::MO_NONE ? 2409 GAFlags : GAFlags + 1); 2410 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); 2411 } 2412 2413 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 2414 SDValue Op, 2415 SelectionDAG &DAG) const { 2416 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 2417 2418 if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS && 2419 GSD->getAddressSpace() != AMDGPUAS::GLOBAL_ADDRESS) 2420 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 2421 2422 SDLoc DL(GSD); 2423 const GlobalValue *GV = GSD->getGlobal(); 2424 EVT PtrVT = Op.getValueType(); 2425 2426 if (shouldEmitFixup(GV)) 2427 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); 2428 else if (shouldEmitPCReloc(GV)) 2429 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, 2430 SIInstrInfo::MO_REL32); 2431 2432 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, 2433 SIInstrInfo::MO_GOTPCREL32); 2434 2435 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); 2436 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 2437 const DataLayout &DataLayout = DAG.getDataLayout(); 2438 unsigned Align = DataLayout.getABITypeAlignment(PtrTy); 2439 // FIXME: Use a PseudoSourceValue once those can be assigned an address space. 2440 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 2441 2442 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align, 2443 MachineMemOperand::MODereferenceable | 2444 MachineMemOperand::MOInvariant); 2445 } 2446 2447 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, 2448 const SDLoc &DL, SDValue V) const { 2449 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as 2450 // the destination register. 2451 // 2452 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, 2453 // so we will end up with redundant moves to m0. 2454 // 2455 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. 2456 2457 // A Null SDValue creates a glue result. 2458 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, 2459 V, Chain); 2460 return SDValue(M0, 0); 2461 } 2462 2463 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, 2464 SDValue Op, 2465 MVT VT, 2466 unsigned Offset) const { 2467 SDLoc SL(Op); 2468 SDValue Param = LowerParameter(DAG, MVT::i32, MVT::i32, SL, 2469 DAG.getEntryNode(), Offset, false); 2470 // The local size values will have the hi 16-bits as zero. 2471 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, 2472 DAG.getValueType(VT)); 2473 } 2474 2475 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 2476 EVT VT) { 2477 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), 2478 "non-hsa intrinsic with hsa target", 2479 DL.getDebugLoc()); 2480 DAG.getContext()->diagnose(BadIntrin); 2481 return DAG.getUNDEF(VT); 2482 } 2483 2484 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 2485 EVT VT) { 2486 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), 2487 "intrinsic not supported on subtarget", 2488 DL.getDebugLoc()); 2489 DAG.getContext()->diagnose(BadIntrin); 2490 return DAG.getUNDEF(VT); 2491 } 2492 2493 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 2494 SelectionDAG &DAG) const { 2495 MachineFunction &MF = DAG.getMachineFunction(); 2496 auto MFI = MF.getInfo<SIMachineFunctionInfo>(); 2497 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2498 2499 EVT VT = Op.getValueType(); 2500 SDLoc DL(Op); 2501 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2502 2503 // TODO: Should this propagate fast-math-flags? 2504 2505 switch (IntrinsicID) { 2506 case Intrinsic::amdgcn_implicit_buffer_ptr: { 2507 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); 2508 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 2509 } 2510 case Intrinsic::amdgcn_dispatch_ptr: 2511 case Intrinsic::amdgcn_queue_ptr: { 2512 if (!Subtarget->isAmdCodeObjectV2(MF)) { 2513 DiagnosticInfoUnsupported BadIntrin( 2514 *MF.getFunction(), "unsupported hsa intrinsic without hsa target", 2515 DL.getDebugLoc()); 2516 DAG.getContext()->diagnose(BadIntrin); 2517 return DAG.getUNDEF(VT); 2518 } 2519 2520 auto Reg = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? 2521 SIRegisterInfo::DISPATCH_PTR : SIRegisterInfo::QUEUE_PTR; 2522 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, 2523 TRI->getPreloadedValue(MF, Reg), VT); 2524 } 2525 case Intrinsic::amdgcn_implicitarg_ptr: { 2526 unsigned offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT); 2527 return LowerParameterPtr(DAG, DL, DAG.getEntryNode(), offset); 2528 } 2529 case Intrinsic::amdgcn_kernarg_segment_ptr: { 2530 unsigned Reg 2531 = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 2532 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 2533 } 2534 case Intrinsic::amdgcn_dispatch_id: { 2535 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::DISPATCH_ID); 2536 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 2537 } 2538 case Intrinsic::amdgcn_rcp: 2539 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); 2540 case Intrinsic::amdgcn_rsq: 2541 case AMDGPUIntrinsic::AMDGPU_rsq: // Legacy name 2542 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 2543 case Intrinsic::amdgcn_rsq_legacy: 2544 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 2545 return emitRemovedIntrinsicError(DAG, DL, VT); 2546 2547 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); 2548 case Intrinsic::amdgcn_rcp_legacy: 2549 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 2550 return emitRemovedIntrinsicError(DAG, DL, VT); 2551 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); 2552 case Intrinsic::amdgcn_rsq_clamp: { 2553 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 2554 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 2555 2556 Type *Type = VT.getTypeForEVT(*DAG.getContext()); 2557 APFloat Max = APFloat::getLargest(Type->getFltSemantics()); 2558 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); 2559 2560 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 2561 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, 2562 DAG.getConstantFP(Max, DL, VT)); 2563 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, 2564 DAG.getConstantFP(Min, DL, VT)); 2565 } 2566 case Intrinsic::r600_read_ngroups_x: 2567 if (Subtarget->isAmdHsaOS()) 2568 return emitNonHSAIntrinsicError(DAG, DL, VT); 2569 2570 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2571 SI::KernelInputOffsets::NGROUPS_X, false); 2572 case Intrinsic::r600_read_ngroups_y: 2573 if (Subtarget->isAmdHsaOS()) 2574 return emitNonHSAIntrinsicError(DAG, DL, VT); 2575 2576 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2577 SI::KernelInputOffsets::NGROUPS_Y, false); 2578 case Intrinsic::r600_read_ngroups_z: 2579 if (Subtarget->isAmdHsaOS()) 2580 return emitNonHSAIntrinsicError(DAG, DL, VT); 2581 2582 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2583 SI::KernelInputOffsets::NGROUPS_Z, false); 2584 case Intrinsic::r600_read_global_size_x: 2585 if (Subtarget->isAmdHsaOS()) 2586 return emitNonHSAIntrinsicError(DAG, DL, VT); 2587 2588 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2589 SI::KernelInputOffsets::GLOBAL_SIZE_X, false); 2590 case Intrinsic::r600_read_global_size_y: 2591 if (Subtarget->isAmdHsaOS()) 2592 return emitNonHSAIntrinsicError(DAG, DL, VT); 2593 2594 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2595 SI::KernelInputOffsets::GLOBAL_SIZE_Y, false); 2596 case Intrinsic::r600_read_global_size_z: 2597 if (Subtarget->isAmdHsaOS()) 2598 return emitNonHSAIntrinsicError(DAG, DL, VT); 2599 2600 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 2601 SI::KernelInputOffsets::GLOBAL_SIZE_Z, false); 2602 case Intrinsic::r600_read_local_size_x: 2603 if (Subtarget->isAmdHsaOS()) 2604 return emitNonHSAIntrinsicError(DAG, DL, VT); 2605 2606 return lowerImplicitZextParam(DAG, Op, MVT::i16, 2607 SI::KernelInputOffsets::LOCAL_SIZE_X); 2608 case Intrinsic::r600_read_local_size_y: 2609 if (Subtarget->isAmdHsaOS()) 2610 return emitNonHSAIntrinsicError(DAG, DL, VT); 2611 2612 return lowerImplicitZextParam(DAG, Op, MVT::i16, 2613 SI::KernelInputOffsets::LOCAL_SIZE_Y); 2614 case Intrinsic::r600_read_local_size_z: 2615 if (Subtarget->isAmdHsaOS()) 2616 return emitNonHSAIntrinsicError(DAG, DL, VT); 2617 2618 return lowerImplicitZextParam(DAG, Op, MVT::i16, 2619 SI::KernelInputOffsets::LOCAL_SIZE_Z); 2620 case Intrinsic::amdgcn_workgroup_id_x: 2621 case Intrinsic::r600_read_tgid_x: 2622 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, 2623 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_X), VT); 2624 case Intrinsic::amdgcn_workgroup_id_y: 2625 case Intrinsic::r600_read_tgid_y: 2626 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, 2627 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Y), VT); 2628 case Intrinsic::amdgcn_workgroup_id_z: 2629 case Intrinsic::r600_read_tgid_z: 2630 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, 2631 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Z), VT); 2632 case Intrinsic::amdgcn_workitem_id_x: 2633 case Intrinsic::r600_read_tidig_x: 2634 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 2635 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X), VT); 2636 case Intrinsic::amdgcn_workitem_id_y: 2637 case Intrinsic::r600_read_tidig_y: 2638 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 2639 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y), VT); 2640 case Intrinsic::amdgcn_workitem_id_z: 2641 case Intrinsic::r600_read_tidig_z: 2642 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 2643 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z), VT); 2644 case AMDGPUIntrinsic::SI_load_const: { 2645 SDValue Ops[] = { 2646 Op.getOperand(1), 2647 Op.getOperand(2) 2648 }; 2649 2650 MachineMemOperand *MMO = MF.getMachineMemOperand( 2651 MachinePointerInfo(), 2652 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 2653 MachineMemOperand::MOInvariant, 2654 VT.getStoreSize(), 4); 2655 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL, 2656 Op->getVTList(), Ops, VT, MMO); 2657 } 2658 case AMDGPUIntrinsic::amdgcn_fdiv_fast: 2659 return lowerFDIV_FAST(Op, DAG); 2660 case AMDGPUIntrinsic::SI_vs_load_input: 2661 return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT, 2662 Op.getOperand(1), 2663 Op.getOperand(2), 2664 Op.getOperand(3)); 2665 2666 case AMDGPUIntrinsic::SI_fs_constant: { 2667 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); 2668 SDValue Glue = M0.getValue(1); 2669 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, 2670 DAG.getConstant(2, DL, MVT::i32), // P0 2671 Op.getOperand(1), Op.getOperand(2), Glue); 2672 } 2673 case AMDGPUIntrinsic::SI_packf16: 2674 if (Op.getOperand(1).isUndef() && Op.getOperand(2).isUndef()) 2675 return DAG.getUNDEF(MVT::i32); 2676 return Op; 2677 case AMDGPUIntrinsic::SI_fs_interp: { 2678 SDValue IJ = Op.getOperand(4); 2679 SDValue I = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, 2680 DAG.getConstant(0, DL, MVT::i32)); 2681 SDValue J = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, 2682 DAG.getConstant(1, DL, MVT::i32)); 2683 I = DAG.getNode(ISD::BITCAST, DL, MVT::f32, I); 2684 J = DAG.getNode(ISD::BITCAST, DL, MVT::f32, J); 2685 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); 2686 SDValue Glue = M0.getValue(1); 2687 SDValue P1 = DAG.getNode(AMDGPUISD::INTERP_P1, DL, 2688 DAG.getVTList(MVT::f32, MVT::Glue), 2689 I, Op.getOperand(1), Op.getOperand(2), Glue); 2690 Glue = SDValue(P1.getNode(), 1); 2691 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, P1, J, 2692 Op.getOperand(1), Op.getOperand(2), Glue); 2693 } 2694 case Intrinsic::amdgcn_interp_mov: { 2695 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 2696 SDValue Glue = M0.getValue(1); 2697 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1), 2698 Op.getOperand(2), Op.getOperand(3), Glue); 2699 } 2700 case Intrinsic::amdgcn_interp_p1: { 2701 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 2702 SDValue Glue = M0.getValue(1); 2703 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), 2704 Op.getOperand(2), Op.getOperand(3), Glue); 2705 } 2706 case Intrinsic::amdgcn_interp_p2: { 2707 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 2708 SDValue Glue = SDValue(M0.getNode(), 1); 2709 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), 2710 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), 2711 Glue); 2712 } 2713 case Intrinsic::amdgcn_sin: 2714 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); 2715 2716 case Intrinsic::amdgcn_cos: 2717 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); 2718 2719 case Intrinsic::amdgcn_log_clamp: { 2720 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 2721 return SDValue(); 2722 2723 DiagnosticInfoUnsupported BadIntrin( 2724 *MF.getFunction(), "intrinsic not supported on subtarget", 2725 DL.getDebugLoc()); 2726 DAG.getContext()->diagnose(BadIntrin); 2727 return DAG.getUNDEF(VT); 2728 } 2729 case Intrinsic::amdgcn_ldexp: 2730 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, 2731 Op.getOperand(1), Op.getOperand(2)); 2732 2733 case Intrinsic::amdgcn_fract: 2734 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); 2735 2736 case Intrinsic::amdgcn_class: 2737 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, 2738 Op.getOperand(1), Op.getOperand(2)); 2739 case Intrinsic::amdgcn_div_fmas: 2740 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, 2741 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 2742 Op.getOperand(4)); 2743 2744 case Intrinsic::amdgcn_div_fixup: 2745 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, 2746 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 2747 2748 case Intrinsic::amdgcn_trig_preop: 2749 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, 2750 Op.getOperand(1), Op.getOperand(2)); 2751 case Intrinsic::amdgcn_div_scale: { 2752 // 3rd parameter required to be a constant. 2753 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 2754 if (!Param) 2755 return DAG.getUNDEF(VT); 2756 2757 // Translate to the operands expected by the machine instruction. The 2758 // first parameter must be the same as the first instruction. 2759 SDValue Numerator = Op.getOperand(1); 2760 SDValue Denominator = Op.getOperand(2); 2761 2762 // Note this order is opposite of the machine instruction's operations, 2763 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The 2764 // intrinsic has the numerator as the first operand to match a normal 2765 // division operation. 2766 2767 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; 2768 2769 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, 2770 Denominator, Numerator); 2771 } 2772 case Intrinsic::amdgcn_icmp: { 2773 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 2774 int CondCode = CD->getSExtValue(); 2775 2776 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || 2777 CondCode >= ICmpInst::Predicate::BAD_ICMP_PREDICATE) 2778 return DAG.getUNDEF(VT); 2779 2780 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); 2781 ISD::CondCode CCOpcode = getICmpCondCode(IcInput); 2782 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 2783 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 2784 } 2785 case Intrinsic::amdgcn_fcmp: { 2786 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 2787 int CondCode = CD->getSExtValue(); 2788 2789 if (CondCode <= FCmpInst::Predicate::FCMP_FALSE || 2790 CondCode >= FCmpInst::Predicate::FCMP_TRUE) 2791 return DAG.getUNDEF(VT); 2792 2793 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); 2794 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); 2795 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 2796 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 2797 } 2798 case Intrinsic::amdgcn_fmed3: 2799 return DAG.getNode(AMDGPUISD::FMED3, DL, VT, 2800 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 2801 case Intrinsic::amdgcn_fmul_legacy: 2802 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, 2803 Op.getOperand(1), Op.getOperand(2)); 2804 case Intrinsic::amdgcn_sffbh: 2805 case AMDGPUIntrinsic::AMDGPU_flbit_i32: // Legacy name. 2806 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); 2807 default: 2808 return AMDGPUTargetLowering::LowerOperation(Op, DAG); 2809 } 2810 } 2811 2812 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 2813 SelectionDAG &DAG) const { 2814 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2815 SDLoc DL(Op); 2816 switch (IntrID) { 2817 case Intrinsic::amdgcn_atomic_inc: 2818 case Intrinsic::amdgcn_atomic_dec: { 2819 MemSDNode *M = cast<MemSDNode>(Op); 2820 unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ? 2821 AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC; 2822 SDValue Ops[] = { 2823 M->getOperand(0), // Chain 2824 M->getOperand(2), // Ptr 2825 M->getOperand(3) // Value 2826 }; 2827 2828 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, 2829 M->getMemoryVT(), M->getMemOperand()); 2830 } 2831 case Intrinsic::amdgcn_buffer_load: 2832 case Intrinsic::amdgcn_buffer_load_format: { 2833 SDValue Ops[] = { 2834 Op.getOperand(0), // Chain 2835 Op.getOperand(2), // rsrc 2836 Op.getOperand(3), // vindex 2837 Op.getOperand(4), // offset 2838 Op.getOperand(5), // glc 2839 Op.getOperand(6) // slc 2840 }; 2841 MachineFunction &MF = DAG.getMachineFunction(); 2842 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 2843 2844 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? 2845 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; 2846 EVT VT = Op.getValueType(); 2847 EVT IntVT = VT.changeTypeToInteger(); 2848 2849 MachineMemOperand *MMO = MF.getMachineMemOperand( 2850 MachinePointerInfo(MFI->getBufferPSV()), 2851 MachineMemOperand::MOLoad, 2852 VT.getStoreSize(), VT.getStoreSize()); 2853 2854 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, MMO); 2855 } 2856 default: 2857 return SDValue(); 2858 } 2859 } 2860 2861 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, 2862 SelectionDAG &DAG) const { 2863 MachineFunction &MF = DAG.getMachineFunction(); 2864 SDLoc DL(Op); 2865 SDValue Chain = Op.getOperand(0); 2866 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2867 2868 switch (IntrinsicID) { 2869 case Intrinsic::amdgcn_exp: { 2870 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 2871 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 2872 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8)); 2873 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9)); 2874 2875 const SDValue Ops[] = { 2876 Chain, 2877 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 2878 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 2879 Op.getOperand(4), // src0 2880 Op.getOperand(5), // src1 2881 Op.getOperand(6), // src2 2882 Op.getOperand(7), // src3 2883 DAG.getTargetConstant(0, DL, MVT::i1), // compr 2884 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 2885 }; 2886 2887 unsigned Opc = Done->isNullValue() ? 2888 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 2889 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 2890 } 2891 case Intrinsic::amdgcn_exp_compr: { 2892 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 2893 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 2894 SDValue Src0 = Op.getOperand(4); 2895 SDValue Src1 = Op.getOperand(5); 2896 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); 2897 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7)); 2898 2899 SDValue Undef = DAG.getUNDEF(MVT::f32); 2900 const SDValue Ops[] = { 2901 Chain, 2902 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 2903 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 2904 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), 2905 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), 2906 Undef, // src2 2907 Undef, // src3 2908 DAG.getTargetConstant(1, DL, MVT::i1), // compr 2909 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 2910 }; 2911 2912 unsigned Opc = Done->isNullValue() ? 2913 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 2914 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 2915 } 2916 case Intrinsic::amdgcn_s_sendmsg: 2917 case AMDGPUIntrinsic::SI_sendmsg: { 2918 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 2919 SDValue Glue = Chain.getValue(1); 2920 return DAG.getNode(AMDGPUISD::SENDMSG, DL, MVT::Other, Chain, 2921 Op.getOperand(2), Glue); 2922 } 2923 case Intrinsic::amdgcn_s_sendmsghalt: { 2924 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 2925 SDValue Glue = Chain.getValue(1); 2926 return DAG.getNode(AMDGPUISD::SENDMSGHALT, DL, MVT::Other, Chain, 2927 Op.getOperand(2), Glue); 2928 } 2929 case AMDGPUIntrinsic::SI_tbuffer_store: { 2930 SDValue Ops[] = { 2931 Chain, 2932 Op.getOperand(2), 2933 Op.getOperand(3), 2934 Op.getOperand(4), 2935 Op.getOperand(5), 2936 Op.getOperand(6), 2937 Op.getOperand(7), 2938 Op.getOperand(8), 2939 Op.getOperand(9), 2940 Op.getOperand(10), 2941 Op.getOperand(11), 2942 Op.getOperand(12), 2943 Op.getOperand(13), 2944 Op.getOperand(14) 2945 }; 2946 2947 EVT VT = Op.getOperand(3).getValueType(); 2948 2949 MachineMemOperand *MMO = MF.getMachineMemOperand( 2950 MachinePointerInfo(), 2951 MachineMemOperand::MOStore, 2952 VT.getStoreSize(), 4); 2953 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL, 2954 Op->getVTList(), Ops, VT, MMO); 2955 } 2956 case AMDGPUIntrinsic::AMDGPU_kill: { 2957 SDValue Src = Op.getOperand(2); 2958 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) { 2959 if (!K->isNegative()) 2960 return Chain; 2961 2962 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32); 2963 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne); 2964 } 2965 2966 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src); 2967 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast); 2968 } 2969 case AMDGPUIntrinsic::SI_export: { // Legacy intrinsic. 2970 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(2)); 2971 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(3)); 2972 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(4)); 2973 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(5)); 2974 const ConstantSDNode *Compr = cast<ConstantSDNode>(Op.getOperand(6)); 2975 2976 const SDValue Ops[] = { 2977 Chain, 2978 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), 2979 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), 2980 Op.getOperand(7), // src0 2981 Op.getOperand(8), // src1 2982 Op.getOperand(9), // src2 2983 Op.getOperand(10), // src3 2984 DAG.getTargetConstant(Compr->getZExtValue(), DL, MVT::i1), 2985 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 2986 }; 2987 2988 unsigned Opc = Done->isNullValue() ? 2989 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 2990 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 2991 } 2992 default: 2993 return SDValue(); 2994 } 2995 } 2996 2997 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 2998 SDLoc DL(Op); 2999 LoadSDNode *Load = cast<LoadSDNode>(Op); 3000 ISD::LoadExtType ExtType = Load->getExtensionType(); 3001 EVT MemVT = Load->getMemoryVT(); 3002 3003 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { 3004 // FIXME: Copied from PPC 3005 // First, load into 32 bits, then truncate to 1 bit. 3006 3007 SDValue Chain = Load->getChain(); 3008 SDValue BasePtr = Load->getBasePtr(); 3009 MachineMemOperand *MMO = Load->getMemOperand(); 3010 3011 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; 3012 3013 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 3014 BasePtr, RealMemVT, MMO); 3015 3016 SDValue Ops[] = { 3017 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), 3018 NewLD.getValue(1) 3019 }; 3020 3021 return DAG.getMergeValues(Ops, DL); 3022 } 3023 3024 if (!MemVT.isVector()) 3025 return SDValue(); 3026 3027 assert(Op.getValueType().getVectorElementType() == MVT::i32 && 3028 "Custom lowering for non-i32 vectors hasn't been implemented."); 3029 3030 unsigned AS = Load->getAddressSpace(); 3031 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, 3032 AS, Load->getAlignment())) { 3033 SDValue Ops[2]; 3034 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); 3035 return DAG.getMergeValues(Ops, DL); 3036 } 3037 3038 MachineFunction &MF = DAG.getMachineFunction(); 3039 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 3040 // If there is a possibilty that flat instruction access scratch memory 3041 // then we need to use the same legalization rules we use for private. 3042 if (AS == AMDGPUAS::FLAT_ADDRESS) 3043 AS = MFI->hasFlatScratchInit() ? 3044 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; 3045 3046 unsigned NumElements = MemVT.getVectorNumElements(); 3047 switch (AS) { 3048 case AMDGPUAS::CONSTANT_ADDRESS: 3049 if (isMemOpUniform(Load)) 3050 return SDValue(); 3051 // Non-uniform loads will be selected to MUBUF instructions, so they 3052 // have the same legalization requirements as global and private 3053 // loads. 3054 // 3055 LLVM_FALLTHROUGH; 3056 case AMDGPUAS::GLOBAL_ADDRESS: 3057 if (Subtarget->getScalarizeGlobalBehavior() && isMemOpUniform(Load) && 3058 isMemOpHasNoClobberedMemOperand(Load)) 3059 return SDValue(); 3060 // Non-uniform loads will be selected to MUBUF instructions, so they 3061 // have the same legalization requirements as global and private 3062 // loads. 3063 // 3064 LLVM_FALLTHROUGH; 3065 case AMDGPUAS::FLAT_ADDRESS: 3066 if (NumElements > 4) 3067 return SplitVectorLoad(Op, DAG); 3068 // v4 loads are supported for private and global memory. 3069 return SDValue(); 3070 case AMDGPUAS::PRIVATE_ADDRESS: 3071 // Depending on the setting of the private_element_size field in the 3072 // resource descriptor, we can only make private accesses up to a certain 3073 // size. 3074 switch (Subtarget->getMaxPrivateElementSize()) { 3075 case 4: 3076 return scalarizeVectorLoad(Load, DAG); 3077 case 8: 3078 if (NumElements > 2) 3079 return SplitVectorLoad(Op, DAG); 3080 return SDValue(); 3081 case 16: 3082 // Same as global/flat 3083 if (NumElements > 4) 3084 return SplitVectorLoad(Op, DAG); 3085 return SDValue(); 3086 default: 3087 llvm_unreachable("unsupported private_element_size"); 3088 } 3089 case AMDGPUAS::LOCAL_ADDRESS: 3090 if (NumElements > 2) 3091 return SplitVectorLoad(Op, DAG); 3092 3093 if (NumElements == 2) 3094 return SDValue(); 3095 3096 // If properly aligned, if we split we might be able to use ds_read_b64. 3097 return SplitVectorLoad(Op, DAG); 3098 default: 3099 return SDValue(); 3100 } 3101 } 3102 3103 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 3104 if (Op.getValueType() != MVT::i64) 3105 return SDValue(); 3106 3107 SDLoc DL(Op); 3108 SDValue Cond = Op.getOperand(0); 3109 3110 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 3111 SDValue One = DAG.getConstant(1, DL, MVT::i32); 3112 3113 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); 3114 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); 3115 3116 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); 3117 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); 3118 3119 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); 3120 3121 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); 3122 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); 3123 3124 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); 3125 3126 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); 3127 return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res); 3128 } 3129 3130 // Catch division cases where we can use shortcuts with rcp and rsq 3131 // instructions. 3132 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, 3133 SelectionDAG &DAG) const { 3134 SDLoc SL(Op); 3135 SDValue LHS = Op.getOperand(0); 3136 SDValue RHS = Op.getOperand(1); 3137 EVT VT = Op.getValueType(); 3138 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath; 3139 3140 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { 3141 if (Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals()) || 3142 VT == MVT::f16) { 3143 if (CLHS->isExactlyValue(1.0)) { 3144 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 3145 // the CI documentation has a worst case error of 1 ulp. 3146 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 3147 // use it as long as we aren't trying to use denormals. 3148 // 3149 // v_rcp_f16 and v_rsq_f16 DO support denormals. 3150 3151 // 1.0 / sqrt(x) -> rsq(x) 3152 3153 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP 3154 // error seems really high at 2^29 ULP. 3155 if (RHS.getOpcode() == ISD::FSQRT) 3156 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); 3157 3158 // 1.0 / x -> rcp(x) 3159 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 3160 } 3161 3162 // Same as for 1.0, but expand the sign out of the constant. 3163 if (CLHS->isExactlyValue(-1.0)) { 3164 // -1.0 / x -> rcp (fneg x) 3165 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3166 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); 3167 } 3168 } 3169 } 3170 3171 const SDNodeFlags *Flags = Op->getFlags(); 3172 3173 if (Unsafe || Flags->hasAllowReciprocal()) { 3174 // Turn into multiply by the reciprocal. 3175 // x / y -> x * (1.0 / y) 3176 SDNodeFlags Flags; 3177 Flags.setUnsafeAlgebra(true); 3178 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 3179 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, &Flags); 3180 } 3181 3182 return SDValue(); 3183 } 3184 3185 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 3186 EVT VT, SDValue A, SDValue B, SDValue GlueChain) { 3187 if (GlueChain->getNumValues() <= 1) { 3188 return DAG.getNode(Opcode, SL, VT, A, B); 3189 } 3190 3191 assert(GlueChain->getNumValues() == 3); 3192 3193 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 3194 switch (Opcode) { 3195 default: llvm_unreachable("no chain equivalent for opcode"); 3196 case ISD::FMUL: 3197 Opcode = AMDGPUISD::FMUL_W_CHAIN; 3198 break; 3199 } 3200 3201 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, 3202 GlueChain.getValue(2)); 3203 } 3204 3205 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 3206 EVT VT, SDValue A, SDValue B, SDValue C, 3207 SDValue GlueChain) { 3208 if (GlueChain->getNumValues() <= 1) { 3209 return DAG.getNode(Opcode, SL, VT, A, B, C); 3210 } 3211 3212 assert(GlueChain->getNumValues() == 3); 3213 3214 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 3215 switch (Opcode) { 3216 default: llvm_unreachable("no chain equivalent for opcode"); 3217 case ISD::FMA: 3218 Opcode = AMDGPUISD::FMA_W_CHAIN; 3219 break; 3220 } 3221 3222 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C, 3223 GlueChain.getValue(2)); 3224 } 3225 3226 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { 3227 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 3228 return FastLowered; 3229 3230 SDLoc SL(Op); 3231 SDValue Src0 = Op.getOperand(0); 3232 SDValue Src1 = Op.getOperand(1); 3233 3234 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); 3235 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); 3236 3237 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); 3238 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); 3239 3240 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); 3241 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); 3242 3243 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); 3244 } 3245 3246 // Faster 2.5 ULP division that does not support denormals. 3247 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { 3248 SDLoc SL(Op); 3249 SDValue LHS = Op.getOperand(1); 3250 SDValue RHS = Op.getOperand(2); 3251 3252 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); 3253 3254 const APFloat K0Val(BitsToFloat(0x6f800000)); 3255 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); 3256 3257 const APFloat K1Val(BitsToFloat(0x2f800000)); 3258 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); 3259 3260 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 3261 3262 EVT SetCCVT = 3263 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 3264 3265 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); 3266 3267 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); 3268 3269 // TODO: Should this propagate fast-math-flags? 3270 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); 3271 3272 // rcp does not support denormals. 3273 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); 3274 3275 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); 3276 3277 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); 3278 } 3279 3280 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { 3281 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 3282 return FastLowered; 3283 3284 SDLoc SL(Op); 3285 SDValue LHS = Op.getOperand(0); 3286 SDValue RHS = Op.getOperand(1); 3287 3288 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 3289 3290 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); 3291 3292 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 3293 RHS, RHS, LHS); 3294 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 3295 LHS, RHS, LHS); 3296 3297 // Denominator is scaled to not be denormal, so using rcp is ok. 3298 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, 3299 DenominatorScaled); 3300 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, 3301 DenominatorScaled); 3302 3303 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | 3304 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | 3305 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); 3306 3307 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16); 3308 3309 if (!Subtarget->hasFP32Denormals()) { 3310 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); 3311 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE, 3312 SL, MVT::i32); 3313 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs, 3314 DAG.getEntryNode(), 3315 EnableDenormValue, BitField); 3316 SDValue Ops[3] = { 3317 NegDivScale0, 3318 EnableDenorm.getValue(0), 3319 EnableDenorm.getValue(1) 3320 }; 3321 3322 NegDivScale0 = DAG.getMergeValues(Ops, SL); 3323 } 3324 3325 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, 3326 ApproxRcp, One, NegDivScale0); 3327 3328 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, 3329 ApproxRcp, Fma0); 3330 3331 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, 3332 Fma1, Fma1); 3333 3334 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, 3335 NumeratorScaled, Mul); 3336 3337 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2); 3338 3339 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, 3340 NumeratorScaled, Fma3); 3341 3342 if (!Subtarget->hasFP32Denormals()) { 3343 const SDValue DisableDenormValue = 3344 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32); 3345 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other, 3346 Fma4.getValue(1), 3347 DisableDenormValue, 3348 BitField, 3349 Fma4.getValue(2)); 3350 3351 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 3352 DisableDenorm, DAG.getRoot()); 3353 DAG.setRoot(OutputChain); 3354 } 3355 3356 SDValue Scale = NumeratorScaled.getValue(1); 3357 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, 3358 Fma4, Fma1, Fma3, Scale); 3359 3360 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS); 3361 } 3362 3363 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { 3364 if (DAG.getTarget().Options.UnsafeFPMath) 3365 return lowerFastUnsafeFDIV(Op, DAG); 3366 3367 SDLoc SL(Op); 3368 SDValue X = Op.getOperand(0); 3369 SDValue Y = Op.getOperand(1); 3370 3371 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 3372 3373 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); 3374 3375 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); 3376 3377 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); 3378 3379 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); 3380 3381 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); 3382 3383 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); 3384 3385 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); 3386 3387 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); 3388 3389 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); 3390 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); 3391 3392 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, 3393 NegDivScale0, Mul, DivScale1); 3394 3395 SDValue Scale; 3396 3397 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 3398 // Workaround a hardware bug on SI where the condition output from div_scale 3399 // is not usable. 3400 3401 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); 3402 3403 // Figure out if the scale to use for div_fmas. 3404 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 3405 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); 3406 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); 3407 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); 3408 3409 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); 3410 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); 3411 3412 SDValue Scale0Hi 3413 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); 3414 SDValue Scale1Hi 3415 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); 3416 3417 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); 3418 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); 3419 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); 3420 } else { 3421 Scale = DivScale1.getValue(1); 3422 } 3423 3424 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, 3425 Fma4, Fma3, Mul, Scale); 3426 3427 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); 3428 } 3429 3430 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { 3431 EVT VT = Op.getValueType(); 3432 3433 if (VT == MVT::f32) 3434 return LowerFDIV32(Op, DAG); 3435 3436 if (VT == MVT::f64) 3437 return LowerFDIV64(Op, DAG); 3438 3439 if (VT == MVT::f16) 3440 return LowerFDIV16(Op, DAG); 3441 3442 llvm_unreachable("Unexpected type for fdiv"); 3443 } 3444 3445 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 3446 SDLoc DL(Op); 3447 StoreSDNode *Store = cast<StoreSDNode>(Op); 3448 EVT VT = Store->getMemoryVT(); 3449 3450 if (VT == MVT::i1) { 3451 return DAG.getTruncStore(Store->getChain(), DL, 3452 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), 3453 Store->getBasePtr(), MVT::i1, Store->getMemOperand()); 3454 } 3455 3456 assert(VT.isVector() && 3457 Store->getValue().getValueType().getScalarType() == MVT::i32); 3458 3459 unsigned AS = Store->getAddressSpace(); 3460 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 3461 AS, Store->getAlignment())) { 3462 return expandUnalignedStore(Store, DAG); 3463 } 3464 3465 MachineFunction &MF = DAG.getMachineFunction(); 3466 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 3467 // If there is a possibilty that flat instruction access scratch memory 3468 // then we need to use the same legalization rules we use for private. 3469 if (AS == AMDGPUAS::FLAT_ADDRESS) 3470 AS = MFI->hasFlatScratchInit() ? 3471 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; 3472 3473 unsigned NumElements = VT.getVectorNumElements(); 3474 switch (AS) { 3475 case AMDGPUAS::GLOBAL_ADDRESS: 3476 case AMDGPUAS::FLAT_ADDRESS: 3477 if (NumElements > 4) 3478 return SplitVectorStore(Op, DAG); 3479 return SDValue(); 3480 case AMDGPUAS::PRIVATE_ADDRESS: { 3481 switch (Subtarget->getMaxPrivateElementSize()) { 3482 case 4: 3483 return scalarizeVectorStore(Store, DAG); 3484 case 8: 3485 if (NumElements > 2) 3486 return SplitVectorStore(Op, DAG); 3487 return SDValue(); 3488 case 16: 3489 if (NumElements > 4) 3490 return SplitVectorStore(Op, DAG); 3491 return SDValue(); 3492 default: 3493 llvm_unreachable("unsupported private_element_size"); 3494 } 3495 } 3496 case AMDGPUAS::LOCAL_ADDRESS: { 3497 if (NumElements > 2) 3498 return SplitVectorStore(Op, DAG); 3499 3500 if (NumElements == 2) 3501 return Op; 3502 3503 // If properly aligned, if we split we might be able to use ds_write_b64. 3504 return SplitVectorStore(Op, DAG); 3505 } 3506 default: 3507 llvm_unreachable("unhandled address space"); 3508 } 3509 } 3510 3511 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 3512 SDLoc DL(Op); 3513 EVT VT = Op.getValueType(); 3514 SDValue Arg = Op.getOperand(0); 3515 // TODO: Should this propagate fast-math-flags? 3516 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, 3517 DAG.getNode(ISD::FMUL, DL, VT, Arg, 3518 DAG.getConstantFP(0.5/M_PI, DL, 3519 VT))); 3520 3521 switch (Op.getOpcode()) { 3522 case ISD::FCOS: 3523 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart); 3524 case ISD::FSIN: 3525 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart); 3526 default: 3527 llvm_unreachable("Wrong trig opcode"); 3528 } 3529 } 3530 3531 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 3532 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); 3533 assert(AtomicNode->isCompareAndSwap()); 3534 unsigned AS = AtomicNode->getAddressSpace(); 3535 3536 // No custom lowering required for local address space 3537 if (!isFlatGlobalAddrSpace(AS)) 3538 return Op; 3539 3540 // Non-local address space requires custom lowering for atomic compare 3541 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 3542 SDLoc DL(Op); 3543 SDValue ChainIn = Op.getOperand(0); 3544 SDValue Addr = Op.getOperand(1); 3545 SDValue Old = Op.getOperand(2); 3546 SDValue New = Op.getOperand(3); 3547 EVT VT = Op.getValueType(); 3548 MVT SimpleVT = VT.getSimpleVT(); 3549 MVT VecType = MVT::getVectorVT(SimpleVT, 2); 3550 3551 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); 3552 SDValue Ops[] = { ChainIn, Addr, NewOld }; 3553 3554 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), 3555 Ops, VT, AtomicNode->getMemOperand()); 3556 } 3557 3558 //===----------------------------------------------------------------------===// 3559 // Custom DAG optimizations 3560 //===----------------------------------------------------------------------===// 3561 3562 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, 3563 DAGCombinerInfo &DCI) const { 3564 EVT VT = N->getValueType(0); 3565 EVT ScalarVT = VT.getScalarType(); 3566 if (ScalarVT != MVT::f32) 3567 return SDValue(); 3568 3569 SelectionDAG &DAG = DCI.DAG; 3570 SDLoc DL(N); 3571 3572 SDValue Src = N->getOperand(0); 3573 EVT SrcVT = Src.getValueType(); 3574 3575 // TODO: We could try to match extracting the higher bytes, which would be 3576 // easier if i8 vectors weren't promoted to i32 vectors, particularly after 3577 // types are legalized. v4i8 -> v4f32 is probably the only case to worry 3578 // about in practice. 3579 if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) { 3580 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { 3581 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); 3582 DCI.AddToWorklist(Cvt.getNode()); 3583 return Cvt; 3584 } 3585 } 3586 3587 return SDValue(); 3588 } 3589 3590 /// \brief Return true if the given offset Size in bytes can be folded into 3591 /// the immediate offsets of a memory instruction for the given address space. 3592 static bool canFoldOffset(unsigned OffsetSize, unsigned AS, 3593 const SISubtarget &STI) { 3594 switch (AS) { 3595 case AMDGPUAS::GLOBAL_ADDRESS: 3596 // MUBUF instructions a 12-bit offset in bytes. 3597 return isUInt<12>(OffsetSize); 3598 case AMDGPUAS::CONSTANT_ADDRESS: 3599 // SMRD instructions have an 8-bit offset in dwords on SI and 3600 // a 20-bit offset in bytes on VI. 3601 if (STI.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 3602 return isUInt<20>(OffsetSize); 3603 else 3604 return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4); 3605 case AMDGPUAS::LOCAL_ADDRESS: 3606 case AMDGPUAS::REGION_ADDRESS: 3607 // The single offset versions have a 16-bit offset in bytes. 3608 return isUInt<16>(OffsetSize); 3609 case AMDGPUAS::PRIVATE_ADDRESS: 3610 // Indirect register addressing does not use any offsets. 3611 default: 3612 return false; 3613 } 3614 } 3615 3616 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) 3617 3618 // This is a variant of 3619 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), 3620 // 3621 // The normal DAG combiner will do this, but only if the add has one use since 3622 // that would increase the number of instructions. 3623 // 3624 // This prevents us from seeing a constant offset that can be folded into a 3625 // memory instruction's addressing mode. If we know the resulting add offset of 3626 // a pointer can be folded into an addressing offset, we can replace the pointer 3627 // operand with the add of new constant offset. This eliminates one of the uses, 3628 // and may allow the remaining use to also be simplified. 3629 // 3630 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, 3631 unsigned AddrSpace, 3632 DAGCombinerInfo &DCI) const { 3633 SDValue N0 = N->getOperand(0); 3634 SDValue N1 = N->getOperand(1); 3635 3636 if (N0.getOpcode() != ISD::ADD) 3637 return SDValue(); 3638 3639 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); 3640 if (!CN1) 3641 return SDValue(); 3642 3643 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3644 if (!CAdd) 3645 return SDValue(); 3646 3647 // If the resulting offset is too large, we can't fold it into the addressing 3648 // mode offset. 3649 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); 3650 if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *getSubtarget())) 3651 return SDValue(); 3652 3653 SelectionDAG &DAG = DCI.DAG; 3654 SDLoc SL(N); 3655 EVT VT = N->getValueType(0); 3656 3657 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); 3658 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); 3659 3660 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset); 3661 } 3662 3663 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, 3664 DAGCombinerInfo &DCI) const { 3665 SDValue Ptr = N->getBasePtr(); 3666 SelectionDAG &DAG = DCI.DAG; 3667 SDLoc SL(N); 3668 3669 // TODO: We could also do this for multiplies. 3670 unsigned AS = N->getAddressSpace(); 3671 if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUAS::PRIVATE_ADDRESS) { 3672 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI); 3673 if (NewPtr) { 3674 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); 3675 3676 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; 3677 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); 3678 } 3679 } 3680 3681 return SDValue(); 3682 } 3683 3684 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { 3685 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || 3686 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || 3687 (Opc == ISD::XOR && Val == 0); 3688 } 3689 3690 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This 3691 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit 3692 // integer combine opportunities since most 64-bit operations are decomposed 3693 // this way. TODO: We won't want this for SALU especially if it is an inline 3694 // immediate. 3695 SDValue SITargetLowering::splitBinaryBitConstantOp( 3696 DAGCombinerInfo &DCI, 3697 const SDLoc &SL, 3698 unsigned Opc, SDValue LHS, 3699 const ConstantSDNode *CRHS) const { 3700 uint64_t Val = CRHS->getZExtValue(); 3701 uint32_t ValLo = Lo_32(Val); 3702 uint32_t ValHi = Hi_32(Val); 3703 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3704 3705 if ((bitOpWithConstantIsReducible(Opc, ValLo) || 3706 bitOpWithConstantIsReducible(Opc, ValHi)) || 3707 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { 3708 // If we need to materialize a 64-bit immediate, it will be split up later 3709 // anyway. Avoid creating the harder to understand 64-bit immediate 3710 // materialization. 3711 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); 3712 } 3713 3714 return SDValue(); 3715 } 3716 3717 SDValue SITargetLowering::performAndCombine(SDNode *N, 3718 DAGCombinerInfo &DCI) const { 3719 if (DCI.isBeforeLegalize()) 3720 return SDValue(); 3721 3722 SelectionDAG &DAG = DCI.DAG; 3723 EVT VT = N->getValueType(0); 3724 SDValue LHS = N->getOperand(0); 3725 SDValue RHS = N->getOperand(1); 3726 3727 3728 if (VT == MVT::i64) { 3729 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 3730 if (CRHS) { 3731 if (SDValue Split 3732 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) 3733 return Split; 3734 } 3735 } 3736 3737 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> 3738 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) 3739 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { 3740 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 3741 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); 3742 3743 SDValue X = LHS.getOperand(0); 3744 SDValue Y = RHS.getOperand(0); 3745 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) 3746 return SDValue(); 3747 3748 if (LCC == ISD::SETO) { 3749 if (X != LHS.getOperand(1)) 3750 return SDValue(); 3751 3752 if (RCC == ISD::SETUNE) { 3753 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); 3754 if (!C1 || !C1->isInfinity() || C1->isNegative()) 3755 return SDValue(); 3756 3757 const uint32_t Mask = SIInstrFlags::N_NORMAL | 3758 SIInstrFlags::N_SUBNORMAL | 3759 SIInstrFlags::N_ZERO | 3760 SIInstrFlags::P_ZERO | 3761 SIInstrFlags::P_SUBNORMAL | 3762 SIInstrFlags::P_NORMAL; 3763 3764 static_assert(((~(SIInstrFlags::S_NAN | 3765 SIInstrFlags::Q_NAN | 3766 SIInstrFlags::N_INFINITY | 3767 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, 3768 "mask not equal"); 3769 3770 SDLoc DL(N); 3771 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 3772 X, DAG.getConstant(Mask, DL, MVT::i32)); 3773 } 3774 } 3775 } 3776 3777 return SDValue(); 3778 } 3779 3780 SDValue SITargetLowering::performOrCombine(SDNode *N, 3781 DAGCombinerInfo &DCI) const { 3782 SelectionDAG &DAG = DCI.DAG; 3783 SDValue LHS = N->getOperand(0); 3784 SDValue RHS = N->getOperand(1); 3785 3786 EVT VT = N->getValueType(0); 3787 if (VT == MVT::i1) { 3788 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) 3789 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && 3790 RHS.getOpcode() == AMDGPUISD::FP_CLASS) { 3791 SDValue Src = LHS.getOperand(0); 3792 if (Src != RHS.getOperand(0)) 3793 return SDValue(); 3794 3795 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 3796 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 3797 if (!CLHS || !CRHS) 3798 return SDValue(); 3799 3800 // Only 10 bits are used. 3801 static const uint32_t MaxMask = 0x3ff; 3802 3803 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; 3804 SDLoc DL(N); 3805 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 3806 Src, DAG.getConstant(NewMask, DL, MVT::i32)); 3807 } 3808 3809 return SDValue(); 3810 } 3811 3812 if (VT != MVT::i64) 3813 return SDValue(); 3814 3815 // TODO: This could be a generic combine with a predicate for extracting the 3816 // high half of an integer being free. 3817 3818 // (or i64:x, (zero_extend i32:y)) -> 3819 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) 3820 if (LHS.getOpcode() == ISD::ZERO_EXTEND && 3821 RHS.getOpcode() != ISD::ZERO_EXTEND) 3822 std::swap(LHS, RHS); 3823 3824 if (RHS.getOpcode() == ISD::ZERO_EXTEND) { 3825 SDValue ExtSrc = RHS.getOperand(0); 3826 EVT SrcVT = ExtSrc.getValueType(); 3827 if (SrcVT == MVT::i32) { 3828 SDLoc SL(N); 3829 SDValue LowLHS, HiBits; 3830 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); 3831 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); 3832 3833 DCI.AddToWorklist(LowOr.getNode()); 3834 DCI.AddToWorklist(HiBits.getNode()); 3835 3836 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 3837 LowOr, HiBits); 3838 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 3839 } 3840 } 3841 3842 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3843 if (CRHS) { 3844 if (SDValue Split 3845 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) 3846 return Split; 3847 } 3848 3849 return SDValue(); 3850 } 3851 3852 SDValue SITargetLowering::performXorCombine(SDNode *N, 3853 DAGCombinerInfo &DCI) const { 3854 EVT VT = N->getValueType(0); 3855 if (VT != MVT::i64) 3856 return SDValue(); 3857 3858 SDValue LHS = N->getOperand(0); 3859 SDValue RHS = N->getOperand(1); 3860 3861 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 3862 if (CRHS) { 3863 if (SDValue Split 3864 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) 3865 return Split; 3866 } 3867 3868 return SDValue(); 3869 } 3870 3871 SDValue SITargetLowering::performClassCombine(SDNode *N, 3872 DAGCombinerInfo &DCI) const { 3873 SelectionDAG &DAG = DCI.DAG; 3874 SDValue Mask = N->getOperand(1); 3875 3876 // fp_class x, 0 -> false 3877 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { 3878 if (CMask->isNullValue()) 3879 return DAG.getConstant(0, SDLoc(N), MVT::i1); 3880 } 3881 3882 if (N->getOperand(0).isUndef()) 3883 return DAG.getUNDEF(MVT::i1); 3884 3885 return SDValue(); 3886 } 3887 3888 // Constant fold canonicalize. 3889 SDValue SITargetLowering::performFCanonicalizeCombine( 3890 SDNode *N, 3891 DAGCombinerInfo &DCI) const { 3892 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); 3893 if (!CFP) 3894 return SDValue(); 3895 3896 SelectionDAG &DAG = DCI.DAG; 3897 const APFloat &C = CFP->getValueAPF(); 3898 3899 // Flush denormals to 0 if not enabled. 3900 if (C.isDenormal()) { 3901 EVT VT = N->getValueType(0); 3902 if (VT == MVT::f32 && !Subtarget->hasFP32Denormals()) 3903 return DAG.getConstantFP(0.0, SDLoc(N), VT); 3904 3905 if (VT == MVT::f64 && !Subtarget->hasFP64Denormals()) 3906 return DAG.getConstantFP(0.0, SDLoc(N), VT); 3907 3908 if (VT == MVT::f16 && !Subtarget->hasFP16Denormals()) 3909 return DAG.getConstantFP(0.0, SDLoc(N), VT); 3910 } 3911 3912 if (C.isNaN()) { 3913 EVT VT = N->getValueType(0); 3914 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); 3915 if (C.isSignaling()) { 3916 // Quiet a signaling NaN. 3917 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 3918 } 3919 3920 // Make sure it is the canonical NaN bitpattern. 3921 // 3922 // TODO: Can we use -1 as the canonical NaN value since it's an inline 3923 // immediate? 3924 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) 3925 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 3926 } 3927 3928 return SDValue(CFP, 0); 3929 } 3930 3931 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { 3932 switch (Opc) { 3933 case ISD::FMAXNUM: 3934 return AMDGPUISD::FMAX3; 3935 case ISD::SMAX: 3936 return AMDGPUISD::SMAX3; 3937 case ISD::UMAX: 3938 return AMDGPUISD::UMAX3; 3939 case ISD::FMINNUM: 3940 return AMDGPUISD::FMIN3; 3941 case ISD::SMIN: 3942 return AMDGPUISD::SMIN3; 3943 case ISD::UMIN: 3944 return AMDGPUISD::UMIN3; 3945 default: 3946 llvm_unreachable("Not a min/max opcode"); 3947 } 3948 } 3949 3950 static SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL, 3951 SDValue Op0, SDValue Op1, bool Signed) { 3952 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); 3953 if (!K1) 3954 return SDValue(); 3955 3956 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 3957 if (!K0) 3958 return SDValue(); 3959 3960 if (Signed) { 3961 if (K0->getAPIntValue().sge(K1->getAPIntValue())) 3962 return SDValue(); 3963 } else { 3964 if (K0->getAPIntValue().uge(K1->getAPIntValue())) 3965 return SDValue(); 3966 } 3967 3968 EVT VT = K0->getValueType(0); 3969 3970 MVT NVT = MVT::i32; 3971 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3972 3973 SDValue Tmp1, Tmp2, Tmp3; 3974 Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); 3975 Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); 3976 Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); 3977 3978 if (VT == MVT::i16) { 3979 Tmp1 = DAG.getNode(Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3, SL, NVT, 3980 Tmp1, Tmp2, Tmp3); 3981 3982 return DAG.getNode(ISD::TRUNCATE, SL, VT, Tmp1); 3983 } else 3984 return DAG.getNode(Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3, SL, VT, 3985 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); 3986 } 3987 3988 static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) { 3989 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions()) 3990 return true; 3991 3992 return DAG.isKnownNeverNaN(Op); 3993 } 3994 3995 static SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL, 3996 SDValue Op0, SDValue Op1) { 3997 ConstantFPSDNode *K1 = dyn_cast<ConstantFPSDNode>(Op1); 3998 if (!K1) 3999 return SDValue(); 4000 4001 ConstantFPSDNode *K0 = dyn_cast<ConstantFPSDNode>(Op0.getOperand(1)); 4002 if (!K0) 4003 return SDValue(); 4004 4005 // Ordered >= (although NaN inputs should have folded away by now). 4006 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF()); 4007 if (Cmp == APFloat::cmpGreaterThan) 4008 return SDValue(); 4009 4010 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a 4011 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would then 4012 // give the other result, which is different from med3 with a NaN input. 4013 SDValue Var = Op0.getOperand(0); 4014 if (!isKnownNeverSNan(DAG, Var)) 4015 return SDValue(); 4016 4017 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), 4018 Var, SDValue(K0, 0), SDValue(K1, 0)); 4019 } 4020 4021 SDValue SITargetLowering::performMinMaxCombine(SDNode *N, 4022 DAGCombinerInfo &DCI) const { 4023 SelectionDAG &DAG = DCI.DAG; 4024 4025 unsigned Opc = N->getOpcode(); 4026 SDValue Op0 = N->getOperand(0); 4027 SDValue Op1 = N->getOperand(1); 4028 4029 // Only do this if the inner op has one use since this will just increases 4030 // register pressure for no benefit. 4031 4032 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY) { 4033 // max(max(a, b), c) -> max3(a, b, c) 4034 // min(min(a, b), c) -> min3(a, b, c) 4035 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { 4036 SDLoc DL(N); 4037 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 4038 DL, 4039 N->getValueType(0), 4040 Op0.getOperand(0), 4041 Op0.getOperand(1), 4042 Op1); 4043 } 4044 4045 // Try commuted. 4046 // max(a, max(b, c)) -> max3(a, b, c) 4047 // min(a, min(b, c)) -> min3(a, b, c) 4048 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { 4049 SDLoc DL(N); 4050 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 4051 DL, 4052 N->getValueType(0), 4053 Op0, 4054 Op1.getOperand(0), 4055 Op1.getOperand(1)); 4056 } 4057 } 4058 4059 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) 4060 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { 4061 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) 4062 return Med3; 4063 } 4064 4065 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { 4066 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) 4067 return Med3; 4068 } 4069 4070 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) 4071 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || 4072 (Opc == AMDGPUISD::FMIN_LEGACY && 4073 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && 4074 N->getValueType(0) == MVT::f32 && Op0.hasOneUse()) { 4075 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) 4076 return Res; 4077 } 4078 4079 return SDValue(); 4080 } 4081 4082 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, 4083 const SDNode *N0, 4084 const SDNode *N1) const { 4085 EVT VT = N0->getValueType(0); 4086 4087 // Only do this if we are not trying to support denormals. v_mad_f32 does not 4088 // support denormals ever. 4089 if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) || 4090 (VT == MVT::f16 && !Subtarget->hasFP16Denormals())) 4091 return ISD::FMAD; 4092 4093 const TargetOptions &Options = DAG.getTarget().Options; 4094 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || 4095 Options.UnsafeFPMath || 4096 (cast<BinaryWithFlagsSDNode>(N0)->Flags.hasUnsafeAlgebra() && 4097 cast<BinaryWithFlagsSDNode>(N1)->Flags.hasUnsafeAlgebra())) && 4098 isFMAFasterThanFMulAndFAdd(VT)) { 4099 return ISD::FMA; 4100 } 4101 4102 return 0; 4103 } 4104 4105 SDValue SITargetLowering::performFAddCombine(SDNode *N, 4106 DAGCombinerInfo &DCI) const { 4107 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 4108 return SDValue(); 4109 4110 SelectionDAG &DAG = DCI.DAG; 4111 EVT VT = N->getValueType(0); 4112 assert(!VT.isVector()); 4113 4114 SDLoc SL(N); 4115 SDValue LHS = N->getOperand(0); 4116 SDValue RHS = N->getOperand(1); 4117 4118 // These should really be instruction patterns, but writing patterns with 4119 // source modiifiers is a pain. 4120 4121 // fadd (fadd (a, a), b) -> mad 2.0, a, b 4122 if (LHS.getOpcode() == ISD::FADD) { 4123 SDValue A = LHS.getOperand(0); 4124 if (A == LHS.getOperand(1)) { 4125 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 4126 if (FusedOp != 0) { 4127 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 4128 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); 4129 } 4130 } 4131 } 4132 4133 // fadd (b, fadd (a, a)) -> mad 2.0, a, b 4134 if (RHS.getOpcode() == ISD::FADD) { 4135 SDValue A = RHS.getOperand(0); 4136 if (A == RHS.getOperand(1)) { 4137 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 4138 if (FusedOp != 0) { 4139 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 4140 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); 4141 } 4142 } 4143 } 4144 4145 return SDValue(); 4146 } 4147 4148 SDValue SITargetLowering::performFSubCombine(SDNode *N, 4149 DAGCombinerInfo &DCI) const { 4150 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 4151 return SDValue(); 4152 4153 SelectionDAG &DAG = DCI.DAG; 4154 SDLoc SL(N); 4155 EVT VT = N->getValueType(0); 4156 assert(!VT.isVector()); 4157 4158 // Try to get the fneg to fold into the source modifier. This undoes generic 4159 // DAG combines and folds them into the mad. 4160 // 4161 // Only do this if we are not trying to support denormals. v_mad_f32 does 4162 // not support denormals ever. 4163 SDValue LHS = N->getOperand(0); 4164 SDValue RHS = N->getOperand(1); 4165 if (LHS.getOpcode() == ISD::FADD) { 4166 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) 4167 SDValue A = LHS.getOperand(0); 4168 if (A == LHS.getOperand(1)) { 4169 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 4170 if (FusedOp != 0){ 4171 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 4172 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 4173 4174 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); 4175 } 4176 } 4177 } 4178 4179 if (RHS.getOpcode() == ISD::FADD) { 4180 // (fsub c, (fadd a, a)) -> mad -2.0, a, c 4181 4182 SDValue A = RHS.getOperand(0); 4183 if (A == RHS.getOperand(1)) { 4184 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 4185 if (FusedOp != 0){ 4186 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); 4187 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); 4188 } 4189 } 4190 } 4191 4192 return SDValue(); 4193 } 4194 4195 SDValue SITargetLowering::performSetCCCombine(SDNode *N, 4196 DAGCombinerInfo &DCI) const { 4197 SelectionDAG &DAG = DCI.DAG; 4198 SDLoc SL(N); 4199 4200 SDValue LHS = N->getOperand(0); 4201 SDValue RHS = N->getOperand(1); 4202 EVT VT = LHS.getValueType(); 4203 4204 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && 4205 VT != MVT::f16)) 4206 return SDValue(); 4207 4208 // Match isinf pattern 4209 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) 4210 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 4211 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { 4212 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 4213 if (!CRHS) 4214 return SDValue(); 4215 4216 const APFloat &APF = CRHS->getValueAPF(); 4217 if (APF.isInfinity() && !APF.isNegative()) { 4218 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY; 4219 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), 4220 DAG.getConstant(Mask, SL, MVT::i32)); 4221 } 4222 } 4223 4224 return SDValue(); 4225 } 4226 4227 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, 4228 DAGCombinerInfo &DCI) const { 4229 SelectionDAG &DAG = DCI.DAG; 4230 SDLoc SL(N); 4231 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; 4232 4233 SDValue Src = N->getOperand(0); 4234 SDValue Srl = N->getOperand(0); 4235 if (Srl.getOpcode() == ISD::ZERO_EXTEND) 4236 Srl = Srl.getOperand(0); 4237 4238 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero. 4239 if (Srl.getOpcode() == ISD::SRL) { 4240 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x 4241 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x 4242 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x 4243 4244 if (const ConstantSDNode *C = 4245 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) { 4246 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)), 4247 EVT(MVT::i32)); 4248 4249 unsigned SrcOffset = C->getZExtValue() + 8 * Offset; 4250 if (SrcOffset < 32 && SrcOffset % 8 == 0) { 4251 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL, 4252 MVT::f32, Srl); 4253 } 4254 } 4255 } 4256 4257 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); 4258 4259 APInt KnownZero, KnownOne; 4260 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 4261 !DCI.isBeforeLegalizeOps()); 4262 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4263 if (TLO.ShrinkDemandedConstant(Src, Demanded) || 4264 TLI.SimplifyDemandedBits(Src, Demanded, KnownZero, KnownOne, TLO)) { 4265 DCI.CommitTargetLoweringOpt(TLO); 4266 } 4267 4268 return SDValue(); 4269 } 4270 4271 SDValue SITargetLowering::PerformDAGCombine(SDNode *N, 4272 DAGCombinerInfo &DCI) const { 4273 switch (N->getOpcode()) { 4274 default: 4275 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 4276 case ISD::FADD: 4277 return performFAddCombine(N, DCI); 4278 case ISD::FSUB: 4279 return performFSubCombine(N, DCI); 4280 case ISD::SETCC: 4281 return performSetCCCombine(N, DCI); 4282 case ISD::FMAXNUM: 4283 case ISD::FMINNUM: 4284 case ISD::SMAX: 4285 case ISD::SMIN: 4286 case ISD::UMAX: 4287 case ISD::UMIN: 4288 case AMDGPUISD::FMIN_LEGACY: 4289 case AMDGPUISD::FMAX_LEGACY: { 4290 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG && 4291 N->getValueType(0) != MVT::f64 && 4292 getTargetMachine().getOptLevel() > CodeGenOpt::None) 4293 return performMinMaxCombine(N, DCI); 4294 break; 4295 } 4296 case ISD::LOAD: 4297 case ISD::STORE: 4298 case ISD::ATOMIC_LOAD: 4299 case ISD::ATOMIC_STORE: 4300 case ISD::ATOMIC_CMP_SWAP: 4301 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 4302 case ISD::ATOMIC_SWAP: 4303 case ISD::ATOMIC_LOAD_ADD: 4304 case ISD::ATOMIC_LOAD_SUB: 4305 case ISD::ATOMIC_LOAD_AND: 4306 case ISD::ATOMIC_LOAD_OR: 4307 case ISD::ATOMIC_LOAD_XOR: 4308 case ISD::ATOMIC_LOAD_NAND: 4309 case ISD::ATOMIC_LOAD_MIN: 4310 case ISD::ATOMIC_LOAD_MAX: 4311 case ISD::ATOMIC_LOAD_UMIN: 4312 case ISD::ATOMIC_LOAD_UMAX: 4313 case AMDGPUISD::ATOMIC_INC: 4314 case AMDGPUISD::ATOMIC_DEC: // TODO: Target mem intrinsics. 4315 if (DCI.isBeforeLegalize()) 4316 break; 4317 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI); 4318 case ISD::AND: 4319 return performAndCombine(N, DCI); 4320 case ISD::OR: 4321 return performOrCombine(N, DCI); 4322 case ISD::XOR: 4323 return performXorCombine(N, DCI); 4324 case AMDGPUISD::FP_CLASS: 4325 return performClassCombine(N, DCI); 4326 case ISD::FCANONICALIZE: 4327 return performFCanonicalizeCombine(N, DCI); 4328 case AMDGPUISD::FRACT: 4329 case AMDGPUISD::RCP: 4330 case AMDGPUISD::RSQ: 4331 case AMDGPUISD::RCP_LEGACY: 4332 case AMDGPUISD::RSQ_LEGACY: 4333 case AMDGPUISD::RSQ_CLAMP: 4334 case AMDGPUISD::LDEXP: { 4335 SDValue Src = N->getOperand(0); 4336 if (Src.isUndef()) 4337 return Src; 4338 break; 4339 } 4340 case ISD::SINT_TO_FP: 4341 case ISD::UINT_TO_FP: 4342 return performUCharToFloatCombine(N, DCI); 4343 case AMDGPUISD::CVT_F32_UBYTE0: 4344 case AMDGPUISD::CVT_F32_UBYTE1: 4345 case AMDGPUISD::CVT_F32_UBYTE2: 4346 case AMDGPUISD::CVT_F32_UBYTE3: 4347 return performCvtF32UByteNCombine(N, DCI); 4348 } 4349 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 4350 } 4351 4352 /// \brief Helper function for adjustWritemask 4353 static unsigned SubIdx2Lane(unsigned Idx) { 4354 switch (Idx) { 4355 default: return 0; 4356 case AMDGPU::sub0: return 0; 4357 case AMDGPU::sub1: return 1; 4358 case AMDGPU::sub2: return 2; 4359 case AMDGPU::sub3: return 3; 4360 } 4361 } 4362 4363 /// \brief Adjust the writemask of MIMG instructions 4364 void SITargetLowering::adjustWritemask(MachineSDNode *&Node, 4365 SelectionDAG &DAG) const { 4366 SDNode *Users[4] = { }; 4367 unsigned Lane = 0; 4368 unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3; 4369 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); 4370 unsigned NewDmask = 0; 4371 4372 // Try to figure out the used register components 4373 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); 4374 I != E; ++I) { 4375 4376 // Abort if we can't understand the usage 4377 if (!I->isMachineOpcode() || 4378 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) 4379 return; 4380 4381 // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used. 4382 // Note that subregs are packed, i.e. Lane==0 is the first bit set 4383 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 4384 // set, etc. 4385 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 4386 4387 // Set which texture component corresponds to the lane. 4388 unsigned Comp; 4389 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) { 4390 assert(Dmask); 4391 Comp = countTrailingZeros(Dmask); 4392 Dmask &= ~(1 << Comp); 4393 } 4394 4395 // Abort if we have more than one user per component 4396 if (Users[Lane]) 4397 return; 4398 4399 Users[Lane] = *I; 4400 NewDmask |= 1 << Comp; 4401 } 4402 4403 // Abort if there's no change 4404 if (NewDmask == OldDmask) 4405 return; 4406 4407 // Adjust the writemask in the node 4408 std::vector<SDValue> Ops; 4409 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); 4410 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); 4411 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); 4412 Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops); 4413 4414 // If we only got one lane, replace it with a copy 4415 // (if NewDmask has only one bit set...) 4416 if (NewDmask && (NewDmask & (NewDmask-1)) == 0) { 4417 SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(), 4418 MVT::i32); 4419 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 4420 SDLoc(), Users[Lane]->getValueType(0), 4421 SDValue(Node, 0), RC); 4422 DAG.ReplaceAllUsesWith(Users[Lane], Copy); 4423 return; 4424 } 4425 4426 // Update the users of the node with the new indices 4427 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { 4428 SDNode *User = Users[i]; 4429 if (!User) 4430 continue; 4431 4432 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); 4433 DAG.UpdateNodeOperands(User, User->getOperand(0), Op); 4434 4435 switch (Idx) { 4436 default: break; 4437 case AMDGPU::sub0: Idx = AMDGPU::sub1; break; 4438 case AMDGPU::sub1: Idx = AMDGPU::sub2; break; 4439 case AMDGPU::sub2: Idx = AMDGPU::sub3; break; 4440 } 4441 } 4442 } 4443 4444 static bool isFrameIndexOp(SDValue Op) { 4445 if (Op.getOpcode() == ISD::AssertZext) 4446 Op = Op.getOperand(0); 4447 4448 return isa<FrameIndexSDNode>(Op); 4449 } 4450 4451 /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) 4452 /// with frame index operands. 4453 /// LLVM assumes that inputs are to these instructions are registers. 4454 void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, 4455 SelectionDAG &DAG) const { 4456 4457 SmallVector<SDValue, 8> Ops; 4458 for (unsigned i = 0; i < Node->getNumOperands(); ++i) { 4459 if (!isFrameIndexOp(Node->getOperand(i))) { 4460 Ops.push_back(Node->getOperand(i)); 4461 continue; 4462 } 4463 4464 SDLoc DL(Node); 4465 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, 4466 Node->getOperand(i).getValueType(), 4467 Node->getOperand(i)), 0)); 4468 } 4469 4470 DAG.UpdateNodeOperands(Node, Ops); 4471 } 4472 4473 /// \brief Fold the instructions after selecting them. 4474 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, 4475 SelectionDAG &DAG) const { 4476 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 4477 unsigned Opcode = Node->getMachineOpcode(); 4478 4479 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && 4480 !TII->isGather4(Opcode)) 4481 adjustWritemask(Node, DAG); 4482 4483 if (Opcode == AMDGPU::INSERT_SUBREG || 4484 Opcode == AMDGPU::REG_SEQUENCE) { 4485 legalizeTargetIndependentNode(Node, DAG); 4486 return Node; 4487 } 4488 return Node; 4489 } 4490 4491 /// \brief Assign the register class depending on the number of 4492 /// bits set in the writemask 4493 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 4494 SDNode *Node) const { 4495 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 4496 4497 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4498 4499 if (TII->isVOP3(MI.getOpcode())) { 4500 // Make sure constant bus requirements are respected. 4501 TII->legalizeOperandsVOP3(MRI, MI); 4502 return; 4503 } 4504 4505 if (TII->isMIMG(MI)) { 4506 unsigned VReg = MI.getOperand(0).getReg(); 4507 const TargetRegisterClass *RC = MRI.getRegClass(VReg); 4508 // TODO: Need mapping tables to handle other cases (register classes). 4509 if (RC != &AMDGPU::VReg_128RegClass) 4510 return; 4511 4512 unsigned DmaskIdx = MI.getNumOperands() == 12 ? 3 : 4; 4513 unsigned Writemask = MI.getOperand(DmaskIdx).getImm(); 4514 unsigned BitsSet = 0; 4515 for (unsigned i = 0; i < 4; ++i) 4516 BitsSet += Writemask & (1 << i) ? 1 : 0; 4517 switch (BitsSet) { 4518 default: return; 4519 case 1: RC = &AMDGPU::VGPR_32RegClass; break; 4520 case 2: RC = &AMDGPU::VReg_64RegClass; break; 4521 case 3: RC = &AMDGPU::VReg_96RegClass; break; 4522 } 4523 4524 unsigned NewOpcode = TII->getMaskedMIMGOp(MI.getOpcode(), BitsSet); 4525 MI.setDesc(TII->get(NewOpcode)); 4526 MRI.setRegClass(VReg, RC); 4527 return; 4528 } 4529 4530 // Replace unused atomics with the no return version. 4531 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); 4532 if (NoRetAtomicOp != -1) { 4533 if (!Node->hasAnyUseOfValue(0)) { 4534 MI.setDesc(TII->get(NoRetAtomicOp)); 4535 MI.RemoveOperand(0); 4536 return; 4537 } 4538 4539 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg 4540 // instruction, because the return type of these instructions is a vec2 of 4541 // the memory type, so it can be tied to the input operand. 4542 // This means these instructions always have a use, so we need to add a 4543 // special case to check if the atomic has only one extract_subreg use, 4544 // which itself has no uses. 4545 if ((Node->hasNUsesOfValue(1, 0) && 4546 Node->use_begin()->isMachineOpcode() && 4547 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && 4548 !Node->use_begin()->hasAnyUseOfValue(0))) { 4549 unsigned Def = MI.getOperand(0).getReg(); 4550 4551 // Change this into a noret atomic. 4552 MI.setDesc(TII->get(NoRetAtomicOp)); 4553 MI.RemoveOperand(0); 4554 4555 // If we only remove the def operand from the atomic instruction, the 4556 // extract_subreg will be left with a use of a vreg without a def. 4557 // So we need to insert an implicit_def to avoid machine verifier 4558 // errors. 4559 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), 4560 TII->get(AMDGPU::IMPLICIT_DEF), Def); 4561 } 4562 return; 4563 } 4564 } 4565 4566 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, 4567 uint64_t Val) { 4568 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); 4569 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); 4570 } 4571 4572 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, 4573 const SDLoc &DL, 4574 SDValue Ptr) const { 4575 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 4576 4577 // Build the half of the subregister with the constants before building the 4578 // full 128-bit register. If we are building multiple resource descriptors, 4579 // this will allow CSEing of the 2-component register. 4580 const SDValue Ops0[] = { 4581 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), 4582 buildSMovImm32(DAG, DL, 0), 4583 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 4584 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), 4585 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) 4586 }; 4587 4588 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, 4589 MVT::v2i32, Ops0), 0); 4590 4591 // Combine the constants and the pointer. 4592 const SDValue Ops1[] = { 4593 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 4594 Ptr, 4595 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), 4596 SubRegHi, 4597 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) 4598 }; 4599 4600 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); 4601 } 4602 4603 /// \brief Return a resource descriptor with the 'Add TID' bit enabled 4604 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] 4605 /// of the resource descriptor) to create an offset, which is added to 4606 /// the resource pointer. 4607 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, 4608 SDValue Ptr, uint32_t RsrcDword1, 4609 uint64_t RsrcDword2And3) const { 4610 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); 4611 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); 4612 if (RsrcDword1) { 4613 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, 4614 DAG.getConstant(RsrcDword1, DL, MVT::i32)), 4615 0); 4616 } 4617 4618 SDValue DataLo = buildSMovImm32(DAG, DL, 4619 RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); 4620 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); 4621 4622 const SDValue Ops[] = { 4623 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 4624 PtrLo, 4625 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 4626 PtrHi, 4627 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), 4628 DataLo, 4629 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), 4630 DataHi, 4631 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) 4632 }; 4633 4634 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); 4635 } 4636 4637 SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG, 4638 const TargetRegisterClass *RC, 4639 unsigned Reg, EVT VT) const { 4640 SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT); 4641 4642 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()), 4643 cast<RegisterSDNode>(VReg)->getReg(), VT); 4644 } 4645 4646 //===----------------------------------------------------------------------===// 4647 // SI Inline Assembly Support 4648 //===----------------------------------------------------------------------===// 4649 4650 std::pair<unsigned, const TargetRegisterClass *> 4651 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 4652 StringRef Constraint, 4653 MVT VT) const { 4654 if (!isTypeLegal(VT)) 4655 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 4656 4657 if (Constraint.size() == 1) { 4658 switch (Constraint[0]) { 4659 case 's': 4660 case 'r': 4661 switch (VT.getSizeInBits()) { 4662 default: 4663 return std::make_pair(0U, nullptr); 4664 case 32: 4665 case 16: 4666 return std::make_pair(0U, &AMDGPU::SReg_32_XM0RegClass); 4667 case 64: 4668 return std::make_pair(0U, &AMDGPU::SGPR_64RegClass); 4669 case 128: 4670 return std::make_pair(0U, &AMDGPU::SReg_128RegClass); 4671 case 256: 4672 return std::make_pair(0U, &AMDGPU::SReg_256RegClass); 4673 } 4674 4675 case 'v': 4676 switch (VT.getSizeInBits()) { 4677 default: 4678 return std::make_pair(0U, nullptr); 4679 case 32: 4680 case 16: 4681 return std::make_pair(0U, &AMDGPU::VGPR_32RegClass); 4682 case 64: 4683 return std::make_pair(0U, &AMDGPU::VReg_64RegClass); 4684 case 96: 4685 return std::make_pair(0U, &AMDGPU::VReg_96RegClass); 4686 case 128: 4687 return std::make_pair(0U, &AMDGPU::VReg_128RegClass); 4688 case 256: 4689 return std::make_pair(0U, &AMDGPU::VReg_256RegClass); 4690 case 512: 4691 return std::make_pair(0U, &AMDGPU::VReg_512RegClass); 4692 } 4693 } 4694 } 4695 4696 if (Constraint.size() > 1) { 4697 const TargetRegisterClass *RC = nullptr; 4698 if (Constraint[1] == 'v') { 4699 RC = &AMDGPU::VGPR_32RegClass; 4700 } else if (Constraint[1] == 's') { 4701 RC = &AMDGPU::SGPR_32RegClass; 4702 } 4703 4704 if (RC) { 4705 uint32_t Idx; 4706 bool Failed = Constraint.substr(2).getAsInteger(10, Idx); 4707 if (!Failed && Idx < RC->getNumRegs()) 4708 return std::make_pair(RC->getRegister(Idx), RC); 4709 } 4710 } 4711 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 4712 } 4713 4714 SITargetLowering::ConstraintType 4715 SITargetLowering::getConstraintType(StringRef Constraint) const { 4716 if (Constraint.size() == 1) { 4717 switch (Constraint[0]) { 4718 default: break; 4719 case 's': 4720 case 'v': 4721 return C_RegisterClass; 4722 } 4723 } 4724 return TargetLowering::getConstraintType(Constraint); 4725 } 4726