1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief Custom DAG lowering for SI 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifdef _MSC_VER 16 // Provide M_PI. 17 #define _USE_MATH_DEFINES 18 #endif 19 20 #include "SIISelLowering.h" 21 #include "AMDGPU.h" 22 #include "AMDGPUIntrinsicInfo.h" 23 #include "AMDGPUSubtarget.h" 24 #include "AMDGPUTargetMachine.h" 25 #include "SIDefines.h" 26 #include "SIInstrInfo.h" 27 #include "SIMachineFunctionInfo.h" 28 #include "SIRegisterInfo.h" 29 #include "Utils/AMDGPUBaseInfo.h" 30 #include "llvm/ADT/APFloat.h" 31 #include "llvm/ADT/APInt.h" 32 #include "llvm/ADT/ArrayRef.h" 33 #include "llvm/ADT/BitVector.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/StringRef.h" 36 #include "llvm/ADT/StringSwitch.h" 37 #include "llvm/ADT/Twine.h" 38 #include "llvm/CodeGen/Analysis.h" 39 #include "llvm/CodeGen/CallingConvLower.h" 40 #include "llvm/CodeGen/DAGCombine.h" 41 #include "llvm/CodeGen/ISDOpcodes.h" 42 #include "llvm/CodeGen/MachineBasicBlock.h" 43 #include "llvm/CodeGen/MachineFrameInfo.h" 44 #include "llvm/CodeGen/MachineFunction.h" 45 #include "llvm/CodeGen/MachineInstr.h" 46 #include "llvm/CodeGen/MachineInstrBuilder.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/MachineValueType.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/ValueTypes.h" 54 #include "llvm/IR/Constants.h" 55 #include "llvm/IR/DataLayout.h" 56 #include "llvm/IR/DebugLoc.h" 57 #include "llvm/IR/DerivedTypes.h" 58 #include "llvm/IR/DiagnosticInfo.h" 59 #include "llvm/IR/Function.h" 60 #include "llvm/IR/GlobalValue.h" 61 #include "llvm/IR/InstrTypes.h" 62 #include "llvm/IR/Instruction.h" 63 #include "llvm/IR/Instructions.h" 64 #include "llvm/IR/IntrinsicInst.h" 65 #include "llvm/IR/Type.h" 66 #include "llvm/Support/Casting.h" 67 #include "llvm/Support/CodeGen.h" 68 #include "llvm/Support/CommandLine.h" 69 #include "llvm/Support/Compiler.h" 70 #include "llvm/Support/ErrorHandling.h" 71 #include "llvm/Support/KnownBits.h" 72 #include "llvm/Support/MathExtras.h" 73 #include "llvm/Target/TargetCallingConv.h" 74 #include "llvm/Target/TargetOptions.h" 75 #include "llvm/Target/TargetRegisterInfo.h" 76 #include <cassert> 77 #include <cmath> 78 #include <cstdint> 79 #include <iterator> 80 #include <tuple> 81 #include <utility> 82 #include <vector> 83 84 using namespace llvm; 85 86 static cl::opt<bool> EnableVGPRIndexMode( 87 "amdgpu-vgpr-index-mode", 88 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), 89 cl::init(false)); 90 91 static unsigned findFirstFreeSGPR(CCState &CCInfo) { 92 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); 93 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { 94 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { 95 return AMDGPU::SGPR0 + Reg; 96 } 97 } 98 llvm_unreachable("Cannot allocate sgpr"); 99 } 100 101 SITargetLowering::SITargetLowering(const TargetMachine &TM, 102 const SISubtarget &STI) 103 : AMDGPUTargetLowering(TM, STI) { 104 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); 105 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); 106 107 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass); 108 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); 109 110 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); 111 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); 112 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); 113 114 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); 115 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); 116 117 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); 118 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); 119 120 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); 121 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); 122 123 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); 124 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); 125 126 if (Subtarget->has16BitInsts()) { 127 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass); 128 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass); 129 } 130 131 if (Subtarget->hasVOP3PInsts()) { 132 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass); 133 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass); 134 } 135 136 computeRegisterProperties(STI.getRegisterInfo()); 137 138 // We need to custom lower vector stores from local memory 139 setOperationAction(ISD::LOAD, MVT::v2i32, Custom); 140 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 141 setOperationAction(ISD::LOAD, MVT::v8i32, Custom); 142 setOperationAction(ISD::LOAD, MVT::v16i32, Custom); 143 setOperationAction(ISD::LOAD, MVT::i1, Custom); 144 145 setOperationAction(ISD::STORE, MVT::v2i32, Custom); 146 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 147 setOperationAction(ISD::STORE, MVT::v8i32, Custom); 148 setOperationAction(ISD::STORE, MVT::v16i32, Custom); 149 setOperationAction(ISD::STORE, MVT::i1, Custom); 150 151 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); 152 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); 153 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); 154 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); 155 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); 156 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); 157 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); 158 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); 159 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); 160 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); 161 162 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 163 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 164 setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand); 165 166 setOperationAction(ISD::SELECT, MVT::i1, Promote); 167 setOperationAction(ISD::SELECT, MVT::i64, Custom); 168 setOperationAction(ISD::SELECT, MVT::f64, Promote); 169 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); 170 171 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 172 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 173 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 174 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 175 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); 176 177 setOperationAction(ISD::SETCC, MVT::i1, Promote); 178 setOperationAction(ISD::SETCC, MVT::v2i1, Expand); 179 setOperationAction(ISD::SETCC, MVT::v4i1, Expand); 180 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); 181 182 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); 183 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 184 185 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); 186 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); 187 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 188 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); 189 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 190 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); 191 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); 192 193 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 194 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); 195 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); 196 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom); 197 198 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 199 200 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 201 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); 202 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); 203 204 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 205 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 206 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 207 setOperationAction(ISD::BR_CC, MVT::i64, Expand); 208 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 209 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 210 211 setOperationAction(ISD::UADDO, MVT::i32, Legal); 212 setOperationAction(ISD::USUBO, MVT::i32, Legal); 213 214 // We only support LOAD/STORE and vector manipulation ops for vectors 215 // with > 4 elements. 216 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, 217 MVT::v2i64, MVT::v2f64}) { 218 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 219 switch (Op) { 220 case ISD::LOAD: 221 case ISD::STORE: 222 case ISD::BUILD_VECTOR: 223 case ISD::BITCAST: 224 case ISD::EXTRACT_VECTOR_ELT: 225 case ISD::INSERT_VECTOR_ELT: 226 case ISD::INSERT_SUBVECTOR: 227 case ISD::EXTRACT_SUBVECTOR: 228 case ISD::SCALAR_TO_VECTOR: 229 break; 230 case ISD::CONCAT_VECTORS: 231 setOperationAction(Op, VT, Custom); 232 break; 233 default: 234 setOperationAction(Op, VT, Expand); 235 break; 236 } 237 } 238 } 239 240 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that 241 // is expanded to avoid having two separate loops in case the index is a VGPR. 242 243 // Most operations are naturally 32-bit vector operations. We only support 244 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. 245 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { 246 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 247 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); 248 249 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 250 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); 251 252 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 253 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); 254 255 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 256 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); 257 } 258 259 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); 260 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); 261 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); 262 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); 263 264 // Avoid stack access for these. 265 // TODO: Generalize to more vector types. 266 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); 267 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); 268 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 269 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 270 271 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, 272 // and output demarshalling 273 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 274 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 275 276 // We can't return success/failure, only the old value, 277 // let LLVM add the comparison 278 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); 279 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); 280 281 if (getSubtarget()->hasFlatAddressSpace()) { 282 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); 283 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); 284 } 285 286 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 287 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 288 289 // On SI this is s_memtime and s_memrealtime on VI. 290 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 291 setOperationAction(ISD::TRAP, MVT::Other, Custom); 292 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom); 293 294 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 295 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 296 297 if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) { 298 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 299 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 300 setOperationAction(ISD::FRINT, MVT::f64, Legal); 301 } 302 303 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 304 305 setOperationAction(ISD::FSIN, MVT::f32, Custom); 306 setOperationAction(ISD::FCOS, MVT::f32, Custom); 307 setOperationAction(ISD::FDIV, MVT::f32, Custom); 308 setOperationAction(ISD::FDIV, MVT::f64, Custom); 309 310 if (Subtarget->has16BitInsts()) { 311 setOperationAction(ISD::Constant, MVT::i16, Legal); 312 313 setOperationAction(ISD::SMIN, MVT::i16, Legal); 314 setOperationAction(ISD::SMAX, MVT::i16, Legal); 315 316 setOperationAction(ISD::UMIN, MVT::i16, Legal); 317 setOperationAction(ISD::UMAX, MVT::i16, Legal); 318 319 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); 320 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); 321 322 setOperationAction(ISD::ROTR, MVT::i16, Promote); 323 setOperationAction(ISD::ROTL, MVT::i16, Promote); 324 325 setOperationAction(ISD::SDIV, MVT::i16, Promote); 326 setOperationAction(ISD::UDIV, MVT::i16, Promote); 327 setOperationAction(ISD::SREM, MVT::i16, Promote); 328 setOperationAction(ISD::UREM, MVT::i16, Promote); 329 330 setOperationAction(ISD::BSWAP, MVT::i16, Promote); 331 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); 332 333 setOperationAction(ISD::CTTZ, MVT::i16, Promote); 334 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); 335 setOperationAction(ISD::CTLZ, MVT::i16, Promote); 336 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); 337 338 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); 339 340 setOperationAction(ISD::BR_CC, MVT::i16, Expand); 341 342 setOperationAction(ISD::LOAD, MVT::i16, Custom); 343 344 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 345 346 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); 347 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); 348 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); 349 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); 350 351 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); 352 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); 353 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); 354 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); 355 356 // F16 - Constant Actions. 357 setOperationAction(ISD::ConstantFP, MVT::f16, Legal); 358 359 // F16 - Load/Store Actions. 360 setOperationAction(ISD::LOAD, MVT::f16, Promote); 361 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); 362 setOperationAction(ISD::STORE, MVT::f16, Promote); 363 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); 364 365 // F16 - VOP1 Actions. 366 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); 367 setOperationAction(ISD::FCOS, MVT::f16, Promote); 368 setOperationAction(ISD::FSIN, MVT::f16, Promote); 369 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); 370 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); 371 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); 372 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); 373 setOperationAction(ISD::FROUND, MVT::f16, Custom); 374 375 // F16 - VOP2 Actions. 376 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 377 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 378 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 379 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 380 setOperationAction(ISD::FDIV, MVT::f16, Custom); 381 382 // F16 - VOP3 Actions. 383 setOperationAction(ISD::FMA, MVT::f16, Legal); 384 if (!Subtarget->hasFP16Denormals()) 385 setOperationAction(ISD::FMAD, MVT::f16, Legal); 386 } 387 388 if (Subtarget->hasVOP3PInsts()) { 389 for (MVT VT : {MVT::v2i16, MVT::v2f16}) { 390 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 391 switch (Op) { 392 case ISD::LOAD: 393 case ISD::STORE: 394 case ISD::BUILD_VECTOR: 395 case ISD::BITCAST: 396 case ISD::EXTRACT_VECTOR_ELT: 397 case ISD::INSERT_VECTOR_ELT: 398 case ISD::INSERT_SUBVECTOR: 399 case ISD::EXTRACT_SUBVECTOR: 400 case ISD::SCALAR_TO_VECTOR: 401 break; 402 case ISD::CONCAT_VECTORS: 403 setOperationAction(Op, VT, Custom); 404 break; 405 default: 406 setOperationAction(Op, VT, Expand); 407 break; 408 } 409 } 410 } 411 412 // XXX - Do these do anything? Vector constants turn into build_vector. 413 setOperationAction(ISD::Constant, MVT::v2i16, Legal); 414 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal); 415 416 setOperationAction(ISD::STORE, MVT::v2i16, Promote); 417 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32); 418 setOperationAction(ISD::STORE, MVT::v2f16, Promote); 419 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32); 420 421 setOperationAction(ISD::LOAD, MVT::v2i16, Promote); 422 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32); 423 setOperationAction(ISD::LOAD, MVT::v2f16, Promote); 424 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32); 425 426 setOperationAction(ISD::AND, MVT::v2i16, Promote); 427 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32); 428 setOperationAction(ISD::OR, MVT::v2i16, Promote); 429 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32); 430 setOperationAction(ISD::XOR, MVT::v2i16, Promote); 431 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32); 432 setOperationAction(ISD::SELECT, MVT::v2i16, Promote); 433 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32); 434 setOperationAction(ISD::SELECT, MVT::v2f16, Promote); 435 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32); 436 437 setOperationAction(ISD::ADD, MVT::v2i16, Legal); 438 setOperationAction(ISD::SUB, MVT::v2i16, Legal); 439 setOperationAction(ISD::MUL, MVT::v2i16, Legal); 440 setOperationAction(ISD::SHL, MVT::v2i16, Legal); 441 setOperationAction(ISD::SRL, MVT::v2i16, Legal); 442 setOperationAction(ISD::SRA, MVT::v2i16, Legal); 443 setOperationAction(ISD::SMIN, MVT::v2i16, Legal); 444 setOperationAction(ISD::UMIN, MVT::v2i16, Legal); 445 setOperationAction(ISD::SMAX, MVT::v2i16, Legal); 446 setOperationAction(ISD::UMAX, MVT::v2i16, Legal); 447 448 setOperationAction(ISD::FADD, MVT::v2f16, Legal); 449 setOperationAction(ISD::FNEG, MVT::v2f16, Legal); 450 setOperationAction(ISD::FMUL, MVT::v2f16, Legal); 451 setOperationAction(ISD::FMA, MVT::v2f16, Legal); 452 setOperationAction(ISD::FMINNUM, MVT::v2f16, Legal); 453 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Legal); 454 455 // This isn't really legal, but this avoids the legalizer unrolling it (and 456 // allows matching fneg (fabs x) patterns) 457 setOperationAction(ISD::FABS, MVT::v2f16, Legal); 458 459 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 460 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 461 462 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand); 463 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand); 464 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); 465 } else { 466 setOperationAction(ISD::SELECT, MVT::v2i16, Custom); 467 setOperationAction(ISD::SELECT, MVT::v2f16, Custom); 468 } 469 470 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) { 471 setOperationAction(ISD::SELECT, VT, Custom); 472 } 473 474 setTargetDAGCombine(ISD::FADD); 475 setTargetDAGCombine(ISD::FSUB); 476 setTargetDAGCombine(ISD::FMINNUM); 477 setTargetDAGCombine(ISD::FMAXNUM); 478 setTargetDAGCombine(ISD::SMIN); 479 setTargetDAGCombine(ISD::SMAX); 480 setTargetDAGCombine(ISD::UMIN); 481 setTargetDAGCombine(ISD::UMAX); 482 setTargetDAGCombine(ISD::SETCC); 483 setTargetDAGCombine(ISD::AND); 484 setTargetDAGCombine(ISD::OR); 485 setTargetDAGCombine(ISD::XOR); 486 setTargetDAGCombine(ISD::SINT_TO_FP); 487 setTargetDAGCombine(ISD::UINT_TO_FP); 488 setTargetDAGCombine(ISD::FCANONICALIZE); 489 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); 490 setTargetDAGCombine(ISD::ZERO_EXTEND); 491 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 492 493 // All memory operations. Some folding on the pointer operand is done to help 494 // matching the constant offsets in the addressing modes. 495 setTargetDAGCombine(ISD::LOAD); 496 setTargetDAGCombine(ISD::STORE); 497 setTargetDAGCombine(ISD::ATOMIC_LOAD); 498 setTargetDAGCombine(ISD::ATOMIC_STORE); 499 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); 500 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 501 setTargetDAGCombine(ISD::ATOMIC_SWAP); 502 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); 503 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); 504 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); 505 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); 506 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); 507 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); 508 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); 509 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); 510 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); 511 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); 512 513 setSchedulingPreference(Sched::RegPressure); 514 } 515 516 const SISubtarget *SITargetLowering::getSubtarget() const { 517 return static_cast<const SISubtarget *>(Subtarget); 518 } 519 520 //===----------------------------------------------------------------------===// 521 // TargetLowering queries 522 //===----------------------------------------------------------------------===// 523 524 bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &, 525 EVT) const { 526 // SI has some legal vector types, but no legal vector operations. Say no 527 // shuffles are legal in order to prefer scalarizing some vector operations. 528 return false; 529 } 530 531 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 532 const CallInst &CI, 533 unsigned IntrID) const { 534 switch (IntrID) { 535 case Intrinsic::amdgcn_atomic_inc: 536 case Intrinsic::amdgcn_atomic_dec: { 537 Info.opc = ISD::INTRINSIC_W_CHAIN; 538 Info.memVT = MVT::getVT(CI.getType()); 539 Info.ptrVal = CI.getOperand(0); 540 Info.align = 0; 541 542 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4)); 543 Info.vol = !Vol || !Vol->isNullValue(); 544 Info.readMem = true; 545 Info.writeMem = true; 546 return true; 547 } 548 default: 549 return false; 550 } 551 } 552 553 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II, 554 SmallVectorImpl<Value*> &Ops, 555 Type *&AccessTy) const { 556 switch (II->getIntrinsicID()) { 557 case Intrinsic::amdgcn_atomic_inc: 558 case Intrinsic::amdgcn_atomic_dec: { 559 Value *Ptr = II->getArgOperand(0); 560 AccessTy = II->getType(); 561 Ops.push_back(Ptr); 562 return true; 563 } 564 default: 565 return false; 566 } 567 } 568 569 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { 570 if (!Subtarget->hasFlatInstOffsets()) { 571 // Flat instructions do not have offsets, and only have the register 572 // address. 573 return AM.BaseOffs == 0 && AM.Scale == 0; 574 } 575 576 // GFX9 added a 13-bit signed offset. When using regular flat instructions, 577 // the sign bit is ignored and is treated as a 12-bit unsigned offset. 578 579 // Just r + i 580 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0; 581 } 582 583 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { 584 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and 585 // additionally can do r + r + i with addr64. 32-bit has more addressing 586 // mode options. Depending on the resource constant, it can also do 587 // (i64 r0) + (i32 r1) * (i14 i). 588 // 589 // Private arrays end up using a scratch buffer most of the time, so also 590 // assume those use MUBUF instructions. Scratch loads / stores are currently 591 // implemented as mubuf instructions with offen bit set, so slightly 592 // different than the normal addr64. 593 if (!isUInt<12>(AM.BaseOffs)) 594 return false; 595 596 // FIXME: Since we can split immediate into soffset and immediate offset, 597 // would it make sense to allow any immediate? 598 599 switch (AM.Scale) { 600 case 0: // r + i or just i, depending on HasBaseReg. 601 return true; 602 case 1: 603 return true; // We have r + r or r + i. 604 case 2: 605 if (AM.HasBaseReg) { 606 // Reject 2 * r + r. 607 return false; 608 } 609 610 // Allow 2 * r as r + r 611 // Or 2 * r + i is allowed as r + r + i. 612 return true; 613 default: // Don't allow n * r 614 return false; 615 } 616 } 617 618 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, 619 const AddrMode &AM, Type *Ty, 620 unsigned AS) const { 621 // No global is ever allowed as a base. 622 if (AM.BaseGV) 623 return false; 624 625 if (AS == AMDGPUASI.GLOBAL_ADDRESS) { 626 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 627 // Assume the we will use FLAT for all global memory accesses 628 // on VI. 629 // FIXME: This assumption is currently wrong. On VI we still use 630 // MUBUF instructions for the r + i addressing mode. As currently 631 // implemented, the MUBUF instructions only work on buffer < 4GB. 632 // It may be possible to support > 4GB buffers with MUBUF instructions, 633 // by setting the stride value in the resource descriptor which would 634 // increase the size limit to (stride * 4GB). However, this is risky, 635 // because it has never been validated. 636 return isLegalFlatAddressingMode(AM); 637 } 638 639 return isLegalMUBUFAddressingMode(AM); 640 } else if (AS == AMDGPUASI.CONSTANT_ADDRESS) { 641 // If the offset isn't a multiple of 4, it probably isn't going to be 642 // correctly aligned. 643 // FIXME: Can we get the real alignment here? 644 if (AM.BaseOffs % 4 != 0) 645 return isLegalMUBUFAddressingMode(AM); 646 647 // There are no SMRD extloads, so if we have to do a small type access we 648 // will use a MUBUF load. 649 // FIXME?: We also need to do this if unaligned, but we don't know the 650 // alignment here. 651 if (DL.getTypeStoreSize(Ty) < 4) 652 return isLegalMUBUFAddressingMode(AM); 653 654 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 655 // SMRD instructions have an 8-bit, dword offset on SI. 656 if (!isUInt<8>(AM.BaseOffs / 4)) 657 return false; 658 } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) { 659 // On CI+, this can also be a 32-bit literal constant offset. If it fits 660 // in 8-bits, it can use a smaller encoding. 661 if (!isUInt<32>(AM.BaseOffs / 4)) 662 return false; 663 } else if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 664 // On VI, these use the SMEM format and the offset is 20-bit in bytes. 665 if (!isUInt<20>(AM.BaseOffs)) 666 return false; 667 } else 668 llvm_unreachable("unhandled generation"); 669 670 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 671 return true; 672 673 if (AM.Scale == 1 && AM.HasBaseReg) 674 return true; 675 676 return false; 677 678 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 679 return isLegalMUBUFAddressingMode(AM); 680 } else if (AS == AMDGPUASI.LOCAL_ADDRESS || 681 AS == AMDGPUASI.REGION_ADDRESS) { 682 // Basic, single offset DS instructions allow a 16-bit unsigned immediate 683 // field. 684 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have 685 // an 8-bit dword offset but we don't know the alignment here. 686 if (!isUInt<16>(AM.BaseOffs)) 687 return false; 688 689 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 690 return true; 691 692 if (AM.Scale == 1 && AM.HasBaseReg) 693 return true; 694 695 return false; 696 } else if (AS == AMDGPUASI.FLAT_ADDRESS || 697 AS == AMDGPUASI.UNKNOWN_ADDRESS_SPACE) { 698 // For an unknown address space, this usually means that this is for some 699 // reason being used for pure arithmetic, and not based on some addressing 700 // computation. We don't have instructions that compute pointers with any 701 // addressing modes, so treat them as having no offset like flat 702 // instructions. 703 return isLegalFlatAddressingMode(AM); 704 } else { 705 llvm_unreachable("unhandled address space"); 706 } 707 } 708 709 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT) const { 710 if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) { 711 return (MemVT.getSizeInBits() <= 4 * 32); 712 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 713 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize(); 714 return (MemVT.getSizeInBits() <= MaxPrivateBits); 715 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 716 return (MemVT.getSizeInBits() <= 2 * 32); 717 } 718 return true; 719 } 720 721 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 722 unsigned AddrSpace, 723 unsigned Align, 724 bool *IsFast) const { 725 if (IsFast) 726 *IsFast = false; 727 728 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, 729 // which isn't a simple VT. 730 // Until MVT is extended to handle this, simply check for the size and 731 // rely on the condition below: allow accesses if the size is a multiple of 4. 732 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && 733 VT.getStoreSize() > 16)) { 734 return false; 735 } 736 737 if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS || 738 AddrSpace == AMDGPUASI.REGION_ADDRESS) { 739 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte 740 // aligned, 8 byte access in a single operation using ds_read2/write2_b32 741 // with adjacent offsets. 742 bool AlignedBy4 = (Align % 4 == 0); 743 if (IsFast) 744 *IsFast = AlignedBy4; 745 746 return AlignedBy4; 747 } 748 749 // FIXME: We have to be conservative here and assume that flat operations 750 // will access scratch. If we had access to the IR function, then we 751 // could determine if any private memory was used in the function. 752 if (!Subtarget->hasUnalignedScratchAccess() && 753 (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS || 754 AddrSpace == AMDGPUASI.FLAT_ADDRESS)) { 755 return false; 756 } 757 758 if (Subtarget->hasUnalignedBufferAccess()) { 759 // If we have an uniform constant load, it still requires using a slow 760 // buffer instruction if unaligned. 761 if (IsFast) { 762 *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS) ? 763 (Align % 4 == 0) : true; 764 } 765 766 return true; 767 } 768 769 // Smaller than dword value must be aligned. 770 if (VT.bitsLT(MVT::i32)) 771 return false; 772 773 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the 774 // byte-address are ignored, thus forcing Dword alignment. 775 // This applies to private, global, and constant memory. 776 if (IsFast) 777 *IsFast = true; 778 779 return VT.bitsGT(MVT::i32) && Align % 4 == 0; 780 } 781 782 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, 783 unsigned SrcAlign, bool IsMemset, 784 bool ZeroMemset, 785 bool MemcpyStrSrc, 786 MachineFunction &MF) const { 787 // FIXME: Should account for address space here. 788 789 // The default fallback uses the private pointer size as a guess for a type to 790 // use. Make sure we switch these to 64-bit accesses. 791 792 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global 793 return MVT::v4i32; 794 795 if (Size >= 8 && DstAlign >= 4) 796 return MVT::v2i32; 797 798 // Use the default. 799 return MVT::Other; 800 } 801 802 static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) { 803 return AS == AMDGPUASI.GLOBAL_ADDRESS || 804 AS == AMDGPUASI.FLAT_ADDRESS || 805 AS == AMDGPUASI.CONSTANT_ADDRESS; 806 } 807 808 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, 809 unsigned DestAS) const { 810 return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) && 811 isFlatGlobalAddrSpace(DestAS, AMDGPUASI); 812 } 813 814 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { 815 const MemSDNode *MemNode = cast<MemSDNode>(N); 816 const Value *Ptr = MemNode->getMemOperand()->getValue(); 817 const Instruction *I = dyn_cast<Instruction>(Ptr); 818 return I && I->getMetadata("amdgpu.noclobber"); 819 } 820 821 bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS, 822 unsigned DestAS) const { 823 // Flat -> private/local is a simple truncate. 824 // Flat -> global is no-op 825 if (SrcAS == AMDGPUASI.FLAT_ADDRESS) 826 return true; 827 828 return isNoopAddrSpaceCast(SrcAS, DestAS); 829 } 830 831 bool SITargetLowering::isMemOpUniform(const SDNode *N) const { 832 const MemSDNode *MemNode = cast<MemSDNode>(N); 833 834 return AMDGPU::isUniformMMO(MemNode->getMemOperand()); 835 } 836 837 TargetLoweringBase::LegalizeTypeAction 838 SITargetLowering::getPreferredVectorAction(EVT VT) const { 839 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) 840 return TypeSplitVector; 841 842 return TargetLoweringBase::getPreferredVectorAction(VT); 843 } 844 845 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 846 Type *Ty) const { 847 // FIXME: Could be smarter if called for vector constants. 848 return true; 849 } 850 851 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { 852 if (Subtarget->has16BitInsts() && VT == MVT::i16) { 853 switch (Op) { 854 case ISD::LOAD: 855 case ISD::STORE: 856 857 // These operations are done with 32-bit instructions anyway. 858 case ISD::AND: 859 case ISD::OR: 860 case ISD::XOR: 861 case ISD::SELECT: 862 // TODO: Extensions? 863 return true; 864 default: 865 return false; 866 } 867 } 868 869 // SimplifySetCC uses this function to determine whether or not it should 870 // create setcc with i1 operands. We don't have instructions for i1 setcc. 871 if (VT == MVT::i1 && Op == ISD::SETCC) 872 return false; 873 874 return TargetLowering::isTypeDesirableForOp(Op, VT); 875 } 876 877 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG, 878 const SDLoc &SL, 879 SDValue Chain, 880 uint64_t Offset) const { 881 const DataLayout &DL = DAG.getDataLayout(); 882 MachineFunction &MF = DAG.getMachineFunction(); 883 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 884 unsigned InputPtrReg = TRI->getPreloadedValue(MF, 885 SIRegisterInfo::KERNARG_SEGMENT_PTR); 886 887 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 888 MVT PtrVT = getPointerTy(DL, AMDGPUASI.CONSTANT_ADDRESS); 889 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, 890 MRI.getLiveInVirtReg(InputPtrReg), PtrVT); 891 return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, 892 DAG.getConstant(Offset, SL, PtrVT)); 893 } 894 895 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, 896 const SDLoc &SL, SDValue Val, 897 bool Signed, 898 const ISD::InputArg *Arg) const { 899 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && 900 VT.bitsLT(MemVT)) { 901 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; 902 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); 903 } 904 905 if (MemVT.isFloatingPoint()) 906 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT); 907 else if (Signed) 908 Val = DAG.getSExtOrTrunc(Val, SL, VT); 909 else 910 Val = DAG.getZExtOrTrunc(Val, SL, VT); 911 912 return Val; 913 } 914 915 SDValue SITargetLowering::lowerKernargMemParameter( 916 SelectionDAG &DAG, EVT VT, EVT MemVT, 917 const SDLoc &SL, SDValue Chain, 918 uint64_t Offset, bool Signed, 919 const ISD::InputArg *Arg) const { 920 const DataLayout &DL = DAG.getDataLayout(); 921 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 922 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS); 923 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 924 925 unsigned Align = DL.getABITypeAlignment(Ty); 926 927 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset); 928 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align, 929 MachineMemOperand::MONonTemporal | 930 MachineMemOperand::MODereferenceable | 931 MachineMemOperand::MOInvariant); 932 933 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg); 934 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); 935 } 936 937 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA, 938 const SDLoc &SL, SDValue Chain, 939 const ISD::InputArg &Arg) const { 940 MachineFunction &MF = DAG.getMachineFunction(); 941 MachineFrameInfo &MFI = MF.getFrameInfo(); 942 943 if (Arg.Flags.isByVal()) { 944 unsigned Size = Arg.Flags.getByValSize(); 945 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false); 946 return DAG.getFrameIndex(FrameIdx, MVT::i32); 947 } 948 949 unsigned ArgOffset = VA.getLocMemOffset(); 950 unsigned ArgSize = VA.getValVT().getStoreSize(); 951 952 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true); 953 954 // Create load nodes to retrieve arguments from the stack. 955 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 956 SDValue ArgValue; 957 958 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) 959 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 960 MVT MemVT = VA.getValVT(); 961 962 switch (VA.getLocInfo()) { 963 default: 964 break; 965 case CCValAssign::BCvt: 966 MemVT = VA.getLocVT(); 967 break; 968 case CCValAssign::SExt: 969 ExtType = ISD::SEXTLOAD; 970 break; 971 case CCValAssign::ZExt: 972 ExtType = ISD::ZEXTLOAD; 973 break; 974 case CCValAssign::AExt: 975 ExtType = ISD::EXTLOAD; 976 break; 977 } 978 979 ArgValue = DAG.getExtLoad( 980 ExtType, SL, VA.getLocVT(), Chain, FIN, 981 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), 982 MemVT); 983 return ArgValue; 984 } 985 986 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits, 987 CallingConv::ID CallConv, 988 ArrayRef<ISD::InputArg> Ins, 989 BitVector &Skipped, 990 FunctionType *FType, 991 SIMachineFunctionInfo *Info) { 992 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) { 993 const ISD::InputArg &Arg = Ins[I]; 994 995 // First check if it's a PS input addr. 996 if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() && 997 !Arg.Flags.isByVal() && PSInputNum <= 15) { 998 999 if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) { 1000 // We can safely skip PS inputs. 1001 Skipped.set(I); 1002 ++PSInputNum; 1003 continue; 1004 } 1005 1006 Info->markPSInputAllocated(PSInputNum); 1007 if (Arg.Used) 1008 Info->markPSInputEnabled(PSInputNum); 1009 1010 ++PSInputNum; 1011 } 1012 1013 // Second split vertices into their elements. 1014 if (Arg.VT.isVector()) { 1015 ISD::InputArg NewArg = Arg; 1016 NewArg.Flags.setSplit(); 1017 NewArg.VT = Arg.VT.getVectorElementType(); 1018 1019 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a 1020 // three or five element vertex only needs three or five registers, 1021 // NOT four or eight. 1022 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 1023 unsigned NumElements = ParamType->getVectorNumElements(); 1024 1025 for (unsigned J = 0; J != NumElements; ++J) { 1026 Splits.push_back(NewArg); 1027 NewArg.PartOffset += NewArg.VT.getStoreSize(); 1028 } 1029 } else { 1030 Splits.push_back(Arg); 1031 } 1032 } 1033 } 1034 1035 // Allocate special inputs passed in VGPRs. 1036 static void allocateSpecialInputVGPRs(CCState &CCInfo, 1037 MachineFunction &MF, 1038 const SIRegisterInfo &TRI, 1039 SIMachineFunctionInfo &Info) { 1040 if (Info.hasWorkItemIDX()) { 1041 unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X); 1042 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1043 CCInfo.AllocateReg(Reg); 1044 } 1045 1046 if (Info.hasWorkItemIDY()) { 1047 unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y); 1048 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1049 CCInfo.AllocateReg(Reg); 1050 } 1051 1052 if (Info.hasWorkItemIDZ()) { 1053 unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z); 1054 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1055 CCInfo.AllocateReg(Reg); 1056 } 1057 } 1058 1059 // Allocate special inputs passed in user SGPRs. 1060 static void allocateHSAUserSGPRs(CCState &CCInfo, 1061 MachineFunction &MF, 1062 const SIRegisterInfo &TRI, 1063 SIMachineFunctionInfo &Info) { 1064 if (Info.hasPrivateMemoryInputPtr()) { 1065 unsigned PrivateMemoryPtrReg = Info.addPrivateMemoryPtr(TRI); 1066 MF.addLiveIn(PrivateMemoryPtrReg, &AMDGPU::SGPR_64RegClass); 1067 CCInfo.AllocateReg(PrivateMemoryPtrReg); 1068 } 1069 1070 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 1071 if (Info.hasPrivateSegmentBuffer()) { 1072 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 1073 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 1074 CCInfo.AllocateReg(PrivateSegmentBufferReg); 1075 } 1076 1077 if (Info.hasDispatchPtr()) { 1078 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI); 1079 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 1080 CCInfo.AllocateReg(DispatchPtrReg); 1081 } 1082 1083 if (Info.hasQueuePtr()) { 1084 unsigned QueuePtrReg = Info.addQueuePtr(TRI); 1085 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 1086 CCInfo.AllocateReg(QueuePtrReg); 1087 } 1088 1089 if (Info.hasKernargSegmentPtr()) { 1090 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI); 1091 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); 1092 CCInfo.AllocateReg(InputPtrReg); 1093 } 1094 1095 if (Info.hasDispatchID()) { 1096 unsigned DispatchIDReg = Info.addDispatchID(TRI); 1097 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 1098 CCInfo.AllocateReg(DispatchIDReg); 1099 } 1100 1101 if (Info.hasFlatScratchInit()) { 1102 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI); 1103 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 1104 CCInfo.AllocateReg(FlatScratchInitReg); 1105 } 1106 1107 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 1108 // these from the dispatch pointer. 1109 } 1110 1111 // Allocate special input registers that are initialized per-wave. 1112 static void allocateSystemSGPRs(CCState &CCInfo, 1113 MachineFunction &MF, 1114 SIMachineFunctionInfo &Info, 1115 CallingConv::ID CallConv, 1116 bool IsShader) { 1117 if (Info.hasWorkGroupIDX()) { 1118 unsigned Reg = Info.addWorkGroupIDX(); 1119 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1120 CCInfo.AllocateReg(Reg); 1121 } 1122 1123 if (Info.hasWorkGroupIDY()) { 1124 unsigned Reg = Info.addWorkGroupIDY(); 1125 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1126 CCInfo.AllocateReg(Reg); 1127 } 1128 1129 if (Info.hasWorkGroupIDZ()) { 1130 unsigned Reg = Info.addWorkGroupIDZ(); 1131 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1132 CCInfo.AllocateReg(Reg); 1133 } 1134 1135 if (Info.hasWorkGroupInfo()) { 1136 unsigned Reg = Info.addWorkGroupInfo(); 1137 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1138 CCInfo.AllocateReg(Reg); 1139 } 1140 1141 if (Info.hasPrivateSegmentWaveByteOffset()) { 1142 // Scratch wave offset passed in system SGPR. 1143 unsigned PrivateSegmentWaveByteOffsetReg; 1144 1145 if (IsShader) { 1146 PrivateSegmentWaveByteOffsetReg = 1147 Info.getPrivateSegmentWaveByteOffsetSystemSGPR(); 1148 1149 // This is true if the scratch wave byte offset doesn't have a fixed 1150 // location. 1151 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) { 1152 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); 1153 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); 1154 } 1155 } else 1156 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset(); 1157 1158 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); 1159 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); 1160 } 1161 } 1162 1163 static void reservePrivateMemoryRegs(const TargetMachine &TM, 1164 MachineFunction &MF, 1165 const SIRegisterInfo &TRI, 1166 SIMachineFunctionInfo &Info, 1167 bool NeedSP) { 1168 // Now that we've figured out where the scratch register inputs are, see if 1169 // should reserve the arguments and use them directly. 1170 MachineFrameInfo &MFI = MF.getFrameInfo(); 1171 bool HasStackObjects = MFI.hasStackObjects(); 1172 1173 // Record that we know we have non-spill stack objects so we don't need to 1174 // check all stack objects later. 1175 if (HasStackObjects) 1176 Info.setHasNonSpillStackObjects(true); 1177 1178 // Everything live out of a block is spilled with fast regalloc, so it's 1179 // almost certain that spilling will be required. 1180 if (TM.getOptLevel() == CodeGenOpt::None) 1181 HasStackObjects = true; 1182 1183 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 1184 if (ST.isAmdCodeObjectV2(MF)) { 1185 if (HasStackObjects) { 1186 // If we have stack objects, we unquestionably need the private buffer 1187 // resource. For the Code Object V2 ABI, this will be the first 4 user 1188 // SGPR inputs. We can reserve those and use them directly. 1189 1190 unsigned PrivateSegmentBufferReg = TRI.getPreloadedValue( 1191 MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); 1192 Info.setScratchRSrcReg(PrivateSegmentBufferReg); 1193 1194 unsigned PrivateSegmentWaveByteOffsetReg = TRI.getPreloadedValue( 1195 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1196 Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg); 1197 } else { 1198 unsigned ReservedBufferReg 1199 = TRI.reservedPrivateSegmentBufferReg(MF); 1200 unsigned ReservedOffsetReg 1201 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1202 1203 // We tentatively reserve the last registers (skipping the last two 1204 // which may contain VCC). After register allocation, we'll replace 1205 // these with the ones immediately after those which were really 1206 // allocated. In the prologue copies will be inserted from the argument 1207 // to these reserved registers. 1208 Info.setScratchRSrcReg(ReservedBufferReg); 1209 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1210 } 1211 } else { 1212 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF); 1213 1214 // Without HSA, relocations are used for the scratch pointer and the 1215 // buffer resource setup is always inserted in the prologue. Scratch wave 1216 // offset is still in an input SGPR. 1217 Info.setScratchRSrcReg(ReservedBufferReg); 1218 1219 if (HasStackObjects) { 1220 unsigned ScratchWaveOffsetReg = TRI.getPreloadedValue( 1221 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1222 Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg); 1223 } else { 1224 unsigned ReservedOffsetReg 1225 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1226 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1227 } 1228 } 1229 1230 if (NeedSP){ 1231 unsigned ReservedStackPtrOffsetReg = TRI.reservedStackPtrOffsetReg(MF); 1232 Info.setStackPtrOffsetReg(ReservedStackPtrOffsetReg); 1233 1234 assert(Info.getStackPtrOffsetReg() != Info.getFrameOffsetReg()); 1235 assert(!TRI.isSubRegister(Info.getScratchRSrcReg(), 1236 Info.getStackPtrOffsetReg())); 1237 } 1238 } 1239 1240 SDValue SITargetLowering::LowerFormalArguments( 1241 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1242 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1243 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1244 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 1245 1246 MachineFunction &MF = DAG.getMachineFunction(); 1247 FunctionType *FType = MF.getFunction()->getFunctionType(); 1248 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1249 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 1250 1251 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { 1252 const Function *Fn = MF.getFunction(); 1253 DiagnosticInfoUnsupported NoGraphicsHSA( 1254 *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); 1255 DAG.getContext()->diagnose(NoGraphicsHSA); 1256 return DAG.getEntryNode(); 1257 } 1258 1259 // Create stack objects that are used for emitting debugger prologue if 1260 // "amdgpu-debugger-emit-prologue" attribute was specified. 1261 if (ST.debuggerEmitPrologue()) 1262 createDebuggerPrologueStackObjects(MF); 1263 1264 SmallVector<ISD::InputArg, 16> Splits; 1265 SmallVector<CCValAssign, 16> ArgLocs; 1266 BitVector Skipped(Ins.size()); 1267 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1268 *DAG.getContext()); 1269 1270 bool IsShader = AMDGPU::isShader(CallConv); 1271 bool IsKernel = AMDGPU::isKernel(CallConv); 1272 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv); 1273 1274 if (IsShader) { 1275 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info); 1276 1277 // At least one interpolation mode must be enabled or else the GPU will 1278 // hang. 1279 // 1280 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 1281 // set PSInputAddr, the user wants to enable some bits after the compilation 1282 // based on run-time states. Since we can't know what the final PSInputEna 1283 // will look like, so we shouldn't do anything here and the user should take 1284 // responsibility for the correct programming. 1285 // 1286 // Otherwise, the following restrictions apply: 1287 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 1288 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 1289 // enabled too. 1290 if (CallConv == CallingConv::AMDGPU_PS && 1291 ((Info->getPSInputAddr() & 0x7F) == 0 || 1292 ((Info->getPSInputAddr() & 0xF) == 0 && 1293 Info->isPSInputAllocated(11)))) { 1294 CCInfo.AllocateReg(AMDGPU::VGPR0); 1295 CCInfo.AllocateReg(AMDGPU::VGPR1); 1296 Info->markPSInputAllocated(0); 1297 Info->markPSInputEnabled(0); 1298 } 1299 1300 assert(!Info->hasDispatchPtr() && 1301 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && 1302 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && 1303 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && 1304 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && 1305 !Info->hasWorkItemIDZ()); 1306 } else if (IsKernel) { 1307 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); 1308 } else { 1309 Splits.append(Ins.begin(), Ins.end()); 1310 } 1311 1312 if (IsEntryFunc) { 1313 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); 1314 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info); 1315 } 1316 1317 if (IsKernel) { 1318 analyzeFormalArgumentsCompute(CCInfo, Ins); 1319 } else { 1320 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg); 1321 CCInfo.AnalyzeFormalArguments(Splits, AssignFn); 1322 } 1323 1324 SmallVector<SDValue, 16> Chains; 1325 1326 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { 1327 const ISD::InputArg &Arg = Ins[i]; 1328 if (Skipped[i]) { 1329 InVals.push_back(DAG.getUNDEF(Arg.VT)); 1330 continue; 1331 } 1332 1333 CCValAssign &VA = ArgLocs[ArgIdx++]; 1334 MVT VT = VA.getLocVT(); 1335 1336 if (IsEntryFunc && VA.isMemLoc()) { 1337 VT = Ins[i].VT; 1338 EVT MemVT = VA.getLocVT(); 1339 1340 const uint64_t Offset = Subtarget->getExplicitKernelArgOffset(MF) + 1341 VA.getLocMemOffset(); 1342 Info->setABIArgOffset(Offset + MemVT.getStoreSize()); 1343 1344 // The first 36 bytes of the input buffer contains information about 1345 // thread group and global sizes. 1346 SDValue Arg = lowerKernargMemParameter( 1347 DAG, VT, MemVT, DL, Chain, Offset, Ins[i].Flags.isSExt(), &Ins[i]); 1348 Chains.push_back(Arg.getValue(1)); 1349 1350 auto *ParamTy = 1351 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); 1352 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 1353 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 1354 // On SI local pointers are just offsets into LDS, so they are always 1355 // less than 16-bits. On CI and newer they could potentially be 1356 // real pointers, so we can't guarantee their size. 1357 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, 1358 DAG.getValueType(MVT::i16)); 1359 } 1360 1361 InVals.push_back(Arg); 1362 continue; 1363 } else if (!IsEntryFunc && VA.isMemLoc()) { 1364 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg); 1365 InVals.push_back(Val); 1366 if (!Arg.Flags.isByVal()) 1367 Chains.push_back(Val.getValue(1)); 1368 continue; 1369 } 1370 1371 assert(VA.isRegLoc() && "Parameter must be in a register!"); 1372 1373 unsigned Reg = VA.getLocReg(); 1374 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 1375 1376 Reg = MF.addLiveIn(Reg, RC); 1377 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1378 1379 if (IsShader && Arg.VT.isVector()) { 1380 // Build a vector from the registers 1381 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 1382 unsigned NumElements = ParamType->getVectorNumElements(); 1383 1384 SmallVector<SDValue, 4> Regs; 1385 Regs.push_back(Val); 1386 for (unsigned j = 1; j != NumElements; ++j) { 1387 Reg = ArgLocs[ArgIdx++].getLocReg(); 1388 Reg = MF.addLiveIn(Reg, RC); 1389 1390 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1391 Regs.push_back(Copy); 1392 } 1393 1394 // Fill up the missing vector elements 1395 NumElements = Arg.VT.getVectorNumElements() - NumElements; 1396 Regs.append(NumElements, DAG.getUNDEF(VT)); 1397 1398 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs)); 1399 continue; 1400 } 1401 1402 InVals.push_back(Val); 1403 } 1404 1405 const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 1406 1407 // TODO: Could maybe omit SP if only tail calls? 1408 bool NeedSP = FrameInfo.hasCalls() || FrameInfo.hasVarSizedObjects(); 1409 1410 // Start adding system SGPRs. 1411 if (IsEntryFunc) { 1412 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader); 1413 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info, NeedSP); 1414 } else { 1415 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 1416 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg()); 1417 CCInfo.AllocateReg(Info->getFrameOffsetReg()); 1418 1419 if (NeedSP) { 1420 unsigned StackPtrReg = findFirstFreeSGPR(CCInfo); 1421 CCInfo.AllocateReg(StackPtrReg); 1422 Info->setStackPtrOffsetReg(StackPtrReg); 1423 } 1424 } 1425 1426 return Chains.empty() ? Chain : 1427 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 1428 } 1429 1430 // TODO: If return values can't fit in registers, we should return as many as 1431 // possible in registers before passing on stack. 1432 bool SITargetLowering::CanLowerReturn( 1433 CallingConv::ID CallConv, 1434 MachineFunction &MF, bool IsVarArg, 1435 const SmallVectorImpl<ISD::OutputArg> &Outs, 1436 LLVMContext &Context) const { 1437 // Replacing returns with sret/stack usage doesn't make sense for shaders. 1438 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn 1439 // for shaders. Vector types should be explicitly handled by CC. 1440 if (AMDGPU::isEntryFunctionCC(CallConv)) 1441 return true; 1442 1443 SmallVector<CCValAssign, 16> RVLocs; 1444 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 1445 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); 1446 } 1447 1448 SDValue 1449 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1450 bool isVarArg, 1451 const SmallVectorImpl<ISD::OutputArg> &Outs, 1452 const SmallVectorImpl<SDValue> &OutVals, 1453 const SDLoc &DL, SelectionDAG &DAG) const { 1454 MachineFunction &MF = DAG.getMachineFunction(); 1455 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1456 1457 if (AMDGPU::isKernel(CallConv)) { 1458 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, 1459 OutVals, DL, DAG); 1460 } 1461 1462 bool IsShader = AMDGPU::isShader(CallConv); 1463 1464 Info->setIfReturnsVoid(Outs.size() == 0); 1465 bool IsWaveEnd = Info->returnsVoid() && IsShader; 1466 1467 SmallVector<ISD::OutputArg, 48> Splits; 1468 SmallVector<SDValue, 48> SplitVals; 1469 1470 // Split vectors into their elements. 1471 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 1472 const ISD::OutputArg &Out = Outs[i]; 1473 1474 if (IsShader && Out.VT.isVector()) { 1475 MVT VT = Out.VT.getVectorElementType(); 1476 ISD::OutputArg NewOut = Out; 1477 NewOut.Flags.setSplit(); 1478 NewOut.VT = VT; 1479 1480 // We want the original number of vector elements here, e.g. 1481 // three or five, not four or eight. 1482 unsigned NumElements = Out.ArgVT.getVectorNumElements(); 1483 1484 for (unsigned j = 0; j != NumElements; ++j) { 1485 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i], 1486 DAG.getConstant(j, DL, MVT::i32)); 1487 SplitVals.push_back(Elem); 1488 Splits.push_back(NewOut); 1489 NewOut.PartOffset += NewOut.VT.getStoreSize(); 1490 } 1491 } else { 1492 SplitVals.push_back(OutVals[i]); 1493 Splits.push_back(Out); 1494 } 1495 } 1496 1497 // CCValAssign - represent the assignment of the return value to a location. 1498 SmallVector<CCValAssign, 48> RVLocs; 1499 1500 // CCState - Info about the registers and stack slots. 1501 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1502 *DAG.getContext()); 1503 1504 // Analyze outgoing return values. 1505 CCInfo.AnalyzeReturn(Splits, CCAssignFnForReturn(CallConv, isVarArg)); 1506 1507 SDValue Flag; 1508 SmallVector<SDValue, 48> RetOps; 1509 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1510 1511 // Add return address for callable functions. 1512 if (!Info->isEntryFunction()) { 1513 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 1514 SDValue ReturnAddrReg = CreateLiveInRegister( 1515 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); 1516 1517 // FIXME: Should be able to use a vreg here, but need a way to prevent it 1518 // from being allcoated to a CSR. 1519 1520 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), 1521 MVT::i64); 1522 1523 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag); 1524 Flag = Chain.getValue(1); 1525 1526 RetOps.push_back(PhysReturnAddrReg); 1527 } 1528 1529 // Copy the result values into the output registers. 1530 for (unsigned i = 0, realRVLocIdx = 0; 1531 i != RVLocs.size(); 1532 ++i, ++realRVLocIdx) { 1533 CCValAssign &VA = RVLocs[i]; 1534 assert(VA.isRegLoc() && "Can only return in registers!"); 1535 // TODO: Partially return in registers if return values don't fit. 1536 1537 SDValue Arg = SplitVals[realRVLocIdx]; 1538 1539 // Copied from other backends. 1540 switch (VA.getLocInfo()) { 1541 case CCValAssign::Full: 1542 break; 1543 case CCValAssign::BCvt: 1544 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 1545 break; 1546 case CCValAssign::SExt: 1547 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 1548 break; 1549 case CCValAssign::ZExt: 1550 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 1551 break; 1552 case CCValAssign::AExt: 1553 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 1554 break; 1555 default: 1556 llvm_unreachable("Unknown loc info!"); 1557 } 1558 1559 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 1560 Flag = Chain.getValue(1); 1561 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1562 } 1563 1564 // FIXME: Does sret work properly? 1565 1566 // Update chain and glue. 1567 RetOps[0] = Chain; 1568 if (Flag.getNode()) 1569 RetOps.push_back(Flag); 1570 1571 unsigned Opc = AMDGPUISD::ENDPGM; 1572 if (!IsWaveEnd) 1573 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG; 1574 return DAG.getNode(Opc, DL, MVT::Other, RetOps); 1575 } 1576 1577 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT, 1578 SelectionDAG &DAG) const { 1579 unsigned Reg = StringSwitch<unsigned>(RegName) 1580 .Case("m0", AMDGPU::M0) 1581 .Case("exec", AMDGPU::EXEC) 1582 .Case("exec_lo", AMDGPU::EXEC_LO) 1583 .Case("exec_hi", AMDGPU::EXEC_HI) 1584 .Case("flat_scratch", AMDGPU::FLAT_SCR) 1585 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) 1586 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) 1587 .Default(AMDGPU::NoRegister); 1588 1589 if (Reg == AMDGPU::NoRegister) { 1590 report_fatal_error(Twine("invalid register name \"" 1591 + StringRef(RegName) + "\".")); 1592 1593 } 1594 1595 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 1596 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { 1597 report_fatal_error(Twine("invalid register \"" 1598 + StringRef(RegName) + "\" for subtarget.")); 1599 } 1600 1601 switch (Reg) { 1602 case AMDGPU::M0: 1603 case AMDGPU::EXEC_LO: 1604 case AMDGPU::EXEC_HI: 1605 case AMDGPU::FLAT_SCR_LO: 1606 case AMDGPU::FLAT_SCR_HI: 1607 if (VT.getSizeInBits() == 32) 1608 return Reg; 1609 break; 1610 case AMDGPU::EXEC: 1611 case AMDGPU::FLAT_SCR: 1612 if (VT.getSizeInBits() == 64) 1613 return Reg; 1614 break; 1615 default: 1616 llvm_unreachable("missing register type checking"); 1617 } 1618 1619 report_fatal_error(Twine("invalid type for register \"" 1620 + StringRef(RegName) + "\".")); 1621 } 1622 1623 // If kill is not the last instruction, split the block so kill is always a 1624 // proper terminator. 1625 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI, 1626 MachineBasicBlock *BB) const { 1627 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1628 1629 MachineBasicBlock::iterator SplitPoint(&MI); 1630 ++SplitPoint; 1631 1632 if (SplitPoint == BB->end()) { 1633 // Don't bother with a new block. 1634 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR)); 1635 return BB; 1636 } 1637 1638 MachineFunction *MF = BB->getParent(); 1639 MachineBasicBlock *SplitBB 1640 = MF->CreateMachineBasicBlock(BB->getBasicBlock()); 1641 1642 MF->insert(++MachineFunction::iterator(BB), SplitBB); 1643 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end()); 1644 1645 SplitBB->transferSuccessorsAndUpdatePHIs(BB); 1646 BB->addSuccessor(SplitBB); 1647 1648 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR)); 1649 return SplitBB; 1650 } 1651 1652 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the 1653 // wavefront. If the value is uniform and just happens to be in a VGPR, this 1654 // will only do one iteration. In the worst case, this will loop 64 times. 1655 // 1656 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. 1657 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop( 1658 const SIInstrInfo *TII, 1659 MachineRegisterInfo &MRI, 1660 MachineBasicBlock &OrigBB, 1661 MachineBasicBlock &LoopBB, 1662 const DebugLoc &DL, 1663 const MachineOperand &IdxReg, 1664 unsigned InitReg, 1665 unsigned ResultReg, 1666 unsigned PhiReg, 1667 unsigned InitSaveExecReg, 1668 int Offset, 1669 bool UseGPRIdxMode) { 1670 MachineBasicBlock::iterator I = LoopBB.begin(); 1671 1672 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1673 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1674 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1675 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1676 1677 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) 1678 .addReg(InitReg) 1679 .addMBB(&OrigBB) 1680 .addReg(ResultReg) 1681 .addMBB(&LoopBB); 1682 1683 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) 1684 .addReg(InitSaveExecReg) 1685 .addMBB(&OrigBB) 1686 .addReg(NewExec) 1687 .addMBB(&LoopBB); 1688 1689 // Read the next variant <- also loop target. 1690 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) 1691 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef())); 1692 1693 // Compare the just read M0 value to all possible Idx values. 1694 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) 1695 .addReg(CurrentIdxReg) 1696 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg()); 1697 1698 if (UseGPRIdxMode) { 1699 unsigned IdxReg; 1700 if (Offset == 0) { 1701 IdxReg = CurrentIdxReg; 1702 } else { 1703 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1704 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg) 1705 .addReg(CurrentIdxReg, RegState::Kill) 1706 .addImm(Offset); 1707 } 1708 1709 MachineInstr *SetIdx = 1710 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_IDX)) 1711 .addReg(IdxReg, RegState::Kill); 1712 SetIdx->getOperand(2).setIsUndef(); 1713 } else { 1714 // Move index from VCC into M0 1715 if (Offset == 0) { 1716 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1717 .addReg(CurrentIdxReg, RegState::Kill); 1718 } else { 1719 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 1720 .addReg(CurrentIdxReg, RegState::Kill) 1721 .addImm(Offset); 1722 } 1723 } 1724 1725 // Update EXEC, save the original EXEC value to VCC. 1726 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec) 1727 .addReg(CondReg, RegState::Kill); 1728 1729 MRI.setSimpleHint(NewExec, CondReg); 1730 1731 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 1732 MachineInstr *InsertPt = 1733 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 1734 .addReg(AMDGPU::EXEC) 1735 .addReg(NewExec); 1736 1737 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use 1738 // s_cbranch_scc0? 1739 1740 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. 1741 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 1742 .addMBB(&LoopBB); 1743 1744 return InsertPt->getIterator(); 1745 } 1746 1747 // This has slightly sub-optimal regalloc when the source vector is killed by 1748 // the read. The register allocator does not understand that the kill is 1749 // per-workitem, so is kept alive for the whole loop so we end up not re-using a 1750 // subregister from it, using 1 more VGPR than necessary. This was saved when 1751 // this was expanded after register allocation. 1752 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII, 1753 MachineBasicBlock &MBB, 1754 MachineInstr &MI, 1755 unsigned InitResultReg, 1756 unsigned PhiReg, 1757 int Offset, 1758 bool UseGPRIdxMode) { 1759 MachineFunction *MF = MBB.getParent(); 1760 MachineRegisterInfo &MRI = MF->getRegInfo(); 1761 const DebugLoc &DL = MI.getDebugLoc(); 1762 MachineBasicBlock::iterator I(&MI); 1763 1764 unsigned DstReg = MI.getOperand(0).getReg(); 1765 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1766 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1767 1768 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); 1769 1770 // Save the EXEC mask 1771 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec) 1772 .addReg(AMDGPU::EXEC); 1773 1774 // To insert the loop we need to split the block. Move everything after this 1775 // point to a new block, and insert a new empty block between the two. 1776 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); 1777 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); 1778 MachineFunction::iterator MBBI(MBB); 1779 ++MBBI; 1780 1781 MF->insert(MBBI, LoopBB); 1782 MF->insert(MBBI, RemainderBB); 1783 1784 LoopBB->addSuccessor(LoopBB); 1785 LoopBB->addSuccessor(RemainderBB); 1786 1787 // Move the rest of the block into a new block. 1788 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 1789 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 1790 1791 MBB.addSuccessor(LoopBB); 1792 1793 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1794 1795 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, 1796 InitResultReg, DstReg, PhiReg, TmpExec, 1797 Offset, UseGPRIdxMode); 1798 1799 MachineBasicBlock::iterator First = RemainderBB->begin(); 1800 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 1801 .addReg(SaveExec); 1802 1803 return InsPt; 1804 } 1805 1806 // Returns subreg index, offset 1807 static std::pair<unsigned, int> 1808 computeIndirectRegAndOffset(const SIRegisterInfo &TRI, 1809 const TargetRegisterClass *SuperRC, 1810 unsigned VecReg, 1811 int Offset) { 1812 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32; 1813 1814 // Skip out of bounds offsets, or else we would end up using an undefined 1815 // register. 1816 if (Offset >= NumElts || Offset < 0) 1817 return std::make_pair(AMDGPU::sub0, Offset); 1818 1819 return std::make_pair(AMDGPU::sub0 + Offset, 0); 1820 } 1821 1822 // Return true if the index is an SGPR and was set. 1823 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, 1824 MachineRegisterInfo &MRI, 1825 MachineInstr &MI, 1826 int Offset, 1827 bool UseGPRIdxMode, 1828 bool IsIndirectSrc) { 1829 MachineBasicBlock *MBB = MI.getParent(); 1830 const DebugLoc &DL = MI.getDebugLoc(); 1831 MachineBasicBlock::iterator I(&MI); 1832 1833 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1834 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); 1835 1836 assert(Idx->getReg() != AMDGPU::NoRegister); 1837 1838 if (!TII->getRegisterInfo().isSGPRClass(IdxRC)) 1839 return false; 1840 1841 if (UseGPRIdxMode) { 1842 unsigned IdxMode = IsIndirectSrc ? 1843 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE; 1844 if (Offset == 0) { 1845 MachineInstr *SetOn = 1846 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1847 .add(*Idx) 1848 .addImm(IdxMode); 1849 1850 SetOn->getOperand(3).setIsUndef(); 1851 } else { 1852 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 1853 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) 1854 .add(*Idx) 1855 .addImm(Offset); 1856 MachineInstr *SetOn = 1857 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1858 .addReg(Tmp, RegState::Kill) 1859 .addImm(IdxMode); 1860 1861 SetOn->getOperand(3).setIsUndef(); 1862 } 1863 1864 return true; 1865 } 1866 1867 if (Offset == 0) { 1868 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1869 .add(*Idx); 1870 } else { 1871 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 1872 .add(*Idx) 1873 .addImm(Offset); 1874 } 1875 1876 return true; 1877 } 1878 1879 // Control flow needs to be inserted if indexing with a VGPR. 1880 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, 1881 MachineBasicBlock &MBB, 1882 const SISubtarget &ST) { 1883 const SIInstrInfo *TII = ST.getInstrInfo(); 1884 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1885 MachineFunction *MF = MBB.getParent(); 1886 MachineRegisterInfo &MRI = MF->getRegInfo(); 1887 1888 unsigned Dst = MI.getOperand(0).getReg(); 1889 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); 1890 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 1891 1892 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); 1893 1894 unsigned SubReg; 1895 std::tie(SubReg, Offset) 1896 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); 1897 1898 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode); 1899 1900 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) { 1901 MachineBasicBlock::iterator I(&MI); 1902 const DebugLoc &DL = MI.getDebugLoc(); 1903 1904 if (UseGPRIdxMode) { 1905 // TODO: Look at the uses to avoid the copy. This may require rescheduling 1906 // to avoid interfering with other uses, so probably requires a new 1907 // optimization pass. 1908 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 1909 .addReg(SrcReg, RegState::Undef, SubReg) 1910 .addReg(SrcReg, RegState::Implicit) 1911 .addReg(AMDGPU::M0, RegState::Implicit); 1912 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1913 } else { 1914 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 1915 .addReg(SrcReg, RegState::Undef, SubReg) 1916 .addReg(SrcReg, RegState::Implicit); 1917 } 1918 1919 MI.eraseFromParent(); 1920 1921 return &MBB; 1922 } 1923 1924 const DebugLoc &DL = MI.getDebugLoc(); 1925 MachineBasicBlock::iterator I(&MI); 1926 1927 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1928 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1929 1930 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); 1931 1932 if (UseGPRIdxMode) { 1933 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1934 .addImm(0) // Reset inside loop. 1935 .addImm(VGPRIndexMode::SRC0_ENABLE); 1936 SetOn->getOperand(3).setIsUndef(); 1937 1938 // Disable again after the loop. 1939 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1940 } 1941 1942 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, UseGPRIdxMode); 1943 MachineBasicBlock *LoopBB = InsPt->getParent(); 1944 1945 if (UseGPRIdxMode) { 1946 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 1947 .addReg(SrcReg, RegState::Undef, SubReg) 1948 .addReg(SrcReg, RegState::Implicit) 1949 .addReg(AMDGPU::M0, RegState::Implicit); 1950 } else { 1951 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 1952 .addReg(SrcReg, RegState::Undef, SubReg) 1953 .addReg(SrcReg, RegState::Implicit); 1954 } 1955 1956 MI.eraseFromParent(); 1957 1958 return LoopBB; 1959 } 1960 1961 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI, 1962 const TargetRegisterClass *VecRC) { 1963 switch (TRI.getRegSizeInBits(*VecRC)) { 1964 case 32: // 4 bytes 1965 return AMDGPU::V_MOVRELD_B32_V1; 1966 case 64: // 8 bytes 1967 return AMDGPU::V_MOVRELD_B32_V2; 1968 case 128: // 16 bytes 1969 return AMDGPU::V_MOVRELD_B32_V4; 1970 case 256: // 32 bytes 1971 return AMDGPU::V_MOVRELD_B32_V8; 1972 case 512: // 64 bytes 1973 return AMDGPU::V_MOVRELD_B32_V16; 1974 default: 1975 llvm_unreachable("unsupported size for MOVRELD pseudos"); 1976 } 1977 } 1978 1979 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, 1980 MachineBasicBlock &MBB, 1981 const SISubtarget &ST) { 1982 const SIInstrInfo *TII = ST.getInstrInfo(); 1983 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1984 MachineFunction *MF = MBB.getParent(); 1985 MachineRegisterInfo &MRI = MF->getRegInfo(); 1986 1987 unsigned Dst = MI.getOperand(0).getReg(); 1988 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); 1989 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1990 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); 1991 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 1992 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); 1993 1994 // This can be an immediate, but will be folded later. 1995 assert(Val->getReg()); 1996 1997 unsigned SubReg; 1998 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, 1999 SrcVec->getReg(), 2000 Offset); 2001 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode); 2002 2003 if (Idx->getReg() == AMDGPU::NoRegister) { 2004 MachineBasicBlock::iterator I(&MI); 2005 const DebugLoc &DL = MI.getDebugLoc(); 2006 2007 assert(Offset == 0); 2008 2009 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) 2010 .add(*SrcVec) 2011 .add(*Val) 2012 .addImm(SubReg); 2013 2014 MI.eraseFromParent(); 2015 return &MBB; 2016 } 2017 2018 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) { 2019 MachineBasicBlock::iterator I(&MI); 2020 const DebugLoc &DL = MI.getDebugLoc(); 2021 2022 if (UseGPRIdxMode) { 2023 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 2024 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst 2025 .add(*Val) 2026 .addReg(Dst, RegState::ImplicitDefine) 2027 .addReg(SrcVec->getReg(), RegState::Implicit) 2028 .addReg(AMDGPU::M0, RegState::Implicit); 2029 2030 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 2031 } else { 2032 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC)); 2033 2034 BuildMI(MBB, I, DL, MovRelDesc) 2035 .addReg(Dst, RegState::Define) 2036 .addReg(SrcVec->getReg()) 2037 .add(*Val) 2038 .addImm(SubReg - AMDGPU::sub0); 2039 } 2040 2041 MI.eraseFromParent(); 2042 return &MBB; 2043 } 2044 2045 if (Val->isReg()) 2046 MRI.clearKillFlags(Val->getReg()); 2047 2048 const DebugLoc &DL = MI.getDebugLoc(); 2049 2050 if (UseGPRIdxMode) { 2051 MachineBasicBlock::iterator I(&MI); 2052 2053 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 2054 .addImm(0) // Reset inside loop. 2055 .addImm(VGPRIndexMode::DST_ENABLE); 2056 SetOn->getOperand(3).setIsUndef(); 2057 2058 // Disable again after the loop. 2059 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 2060 } 2061 2062 unsigned PhiReg = MRI.createVirtualRegister(VecRC); 2063 2064 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, 2065 Offset, UseGPRIdxMode); 2066 MachineBasicBlock *LoopBB = InsPt->getParent(); 2067 2068 if (UseGPRIdxMode) { 2069 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 2070 .addReg(PhiReg, RegState::Undef, SubReg) // vdst 2071 .add(*Val) // src0 2072 .addReg(Dst, RegState::ImplicitDefine) 2073 .addReg(PhiReg, RegState::Implicit) 2074 .addReg(AMDGPU::M0, RegState::Implicit); 2075 } else { 2076 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC)); 2077 2078 BuildMI(*LoopBB, InsPt, DL, MovRelDesc) 2079 .addReg(Dst, RegState::Define) 2080 .addReg(PhiReg) 2081 .add(*Val) 2082 .addImm(SubReg - AMDGPU::sub0); 2083 } 2084 2085 MI.eraseFromParent(); 2086 2087 return LoopBB; 2088 } 2089 2090 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( 2091 MachineInstr &MI, MachineBasicBlock *BB) const { 2092 2093 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 2094 MachineFunction *MF = BB->getParent(); 2095 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 2096 2097 if (TII->isMIMG(MI)) { 2098 if (!MI.memoperands_empty()) 2099 return BB; 2100 // Add a memoperand for mimg instructions so that they aren't assumed to 2101 // be ordered memory instuctions. 2102 2103 MachinePointerInfo PtrInfo(MFI->getImagePSV()); 2104 MachineMemOperand::Flags Flags = MachineMemOperand::MODereferenceable; 2105 if (MI.mayStore()) 2106 Flags |= MachineMemOperand::MOStore; 2107 2108 if (MI.mayLoad()) 2109 Flags |= MachineMemOperand::MOLoad; 2110 2111 auto MMO = MF->getMachineMemOperand(PtrInfo, Flags, 0, 0); 2112 MI.addMemOperand(*MF, MMO); 2113 return BB; 2114 } 2115 2116 switch (MI.getOpcode()) { 2117 case AMDGPU::SI_INIT_M0: 2118 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), 2119 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 2120 .add(MI.getOperand(0)); 2121 MI.eraseFromParent(); 2122 return BB; 2123 2124 case AMDGPU::SI_INIT_EXEC: 2125 // This should be before all vector instructions. 2126 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64), 2127 AMDGPU::EXEC) 2128 .addImm(MI.getOperand(0).getImm()); 2129 MI.eraseFromParent(); 2130 return BB; 2131 2132 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: { 2133 // Extract the thread count from an SGPR input and set EXEC accordingly. 2134 // Since BFM can't shift by 64, handle that case with CMP + CMOV. 2135 // 2136 // S_BFE_U32 count, input, {shift, 7} 2137 // S_BFM_B64 exec, count, 0 2138 // S_CMP_EQ_U32 count, 64 2139 // S_CMOV_B64 exec, -1 2140 MachineInstr *FirstMI = &*BB->begin(); 2141 MachineRegisterInfo &MRI = MF->getRegInfo(); 2142 unsigned InputReg = MI.getOperand(0).getReg(); 2143 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2144 bool Found = false; 2145 2146 // Move the COPY of the input reg to the beginning, so that we can use it. 2147 for (auto I = BB->begin(); I != &MI; I++) { 2148 if (I->getOpcode() != TargetOpcode::COPY || 2149 I->getOperand(0).getReg() != InputReg) 2150 continue; 2151 2152 if (I == FirstMI) { 2153 FirstMI = &*++BB->begin(); 2154 } else { 2155 I->removeFromParent(); 2156 BB->insert(FirstMI, &*I); 2157 } 2158 Found = true; 2159 break; 2160 } 2161 assert(Found); 2162 (void)Found; 2163 2164 // This should be before all vector instructions. 2165 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg) 2166 .addReg(InputReg) 2167 .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000); 2168 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64), 2169 AMDGPU::EXEC) 2170 .addReg(CountReg) 2171 .addImm(0); 2172 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32)) 2173 .addReg(CountReg, RegState::Kill) 2174 .addImm(64); 2175 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64), 2176 AMDGPU::EXEC) 2177 .addImm(-1); 2178 MI.eraseFromParent(); 2179 return BB; 2180 } 2181 2182 case AMDGPU::GET_GROUPSTATICSIZE: { 2183 DebugLoc DL = MI.getDebugLoc(); 2184 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) 2185 .add(MI.getOperand(0)) 2186 .addImm(MFI->getLDSSize()); 2187 MI.eraseFromParent(); 2188 return BB; 2189 } 2190 case AMDGPU::SI_INDIRECT_SRC_V1: 2191 case AMDGPU::SI_INDIRECT_SRC_V2: 2192 case AMDGPU::SI_INDIRECT_SRC_V4: 2193 case AMDGPU::SI_INDIRECT_SRC_V8: 2194 case AMDGPU::SI_INDIRECT_SRC_V16: 2195 return emitIndirectSrc(MI, *BB, *getSubtarget()); 2196 case AMDGPU::SI_INDIRECT_DST_V1: 2197 case AMDGPU::SI_INDIRECT_DST_V2: 2198 case AMDGPU::SI_INDIRECT_DST_V4: 2199 case AMDGPU::SI_INDIRECT_DST_V8: 2200 case AMDGPU::SI_INDIRECT_DST_V16: 2201 return emitIndirectDst(MI, *BB, *getSubtarget()); 2202 case AMDGPU::SI_KILL: 2203 return splitKillBlock(MI, BB); 2204 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 2205 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 2206 2207 unsigned Dst = MI.getOperand(0).getReg(); 2208 unsigned Src0 = MI.getOperand(1).getReg(); 2209 unsigned Src1 = MI.getOperand(2).getReg(); 2210 const DebugLoc &DL = MI.getDebugLoc(); 2211 unsigned SrcCond = MI.getOperand(3).getReg(); 2212 2213 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2214 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2215 2216 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 2217 .addReg(Src0, 0, AMDGPU::sub0) 2218 .addReg(Src1, 0, AMDGPU::sub0) 2219 .addReg(SrcCond); 2220 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 2221 .addReg(Src0, 0, AMDGPU::sub1) 2222 .addReg(Src1, 0, AMDGPU::sub1) 2223 .addReg(SrcCond); 2224 2225 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) 2226 .addReg(DstLo) 2227 .addImm(AMDGPU::sub0) 2228 .addReg(DstHi) 2229 .addImm(AMDGPU::sub1); 2230 MI.eraseFromParent(); 2231 return BB; 2232 } 2233 case AMDGPU::SI_BR_UNDEF: { 2234 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 2235 const DebugLoc &DL = MI.getDebugLoc(); 2236 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) 2237 .add(MI.getOperand(0)); 2238 Br->getOperand(1).setIsUndef(true); // read undef SCC 2239 MI.eraseFromParent(); 2240 return BB; 2241 } 2242 default: 2243 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 2244 } 2245 } 2246 2247 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { 2248 // This currently forces unfolding various combinations of fsub into fma with 2249 // free fneg'd operands. As long as we have fast FMA (controlled by 2250 // isFMAFasterThanFMulAndFAdd), we should perform these. 2251 2252 // When fma is quarter rate, for f64 where add / sub are at best half rate, 2253 // most of these combines appear to be cycle neutral but save on instruction 2254 // count / code size. 2255 return true; 2256 } 2257 2258 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, 2259 EVT VT) const { 2260 if (!VT.isVector()) { 2261 return MVT::i1; 2262 } 2263 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); 2264 } 2265 2266 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { 2267 // TODO: Should i16 be used always if legal? For now it would force VALU 2268 // shifts. 2269 return (VT == MVT::i16) ? MVT::i16 : MVT::i32; 2270 } 2271 2272 // Answering this is somewhat tricky and depends on the specific device which 2273 // have different rates for fma or all f64 operations. 2274 // 2275 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other 2276 // regardless of which device (although the number of cycles differs between 2277 // devices), so it is always profitable for f64. 2278 // 2279 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable 2280 // only on full rate devices. Normally, we should prefer selecting v_mad_f32 2281 // which we can always do even without fused FP ops since it returns the same 2282 // result as the separate operations and since it is always full 2283 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 2284 // however does not support denormals, so we do report fma as faster if we have 2285 // a fast fma device and require denormals. 2286 // 2287 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 2288 VT = VT.getScalarType(); 2289 2290 switch (VT.getSimpleVT().SimpleTy) { 2291 case MVT::f32: 2292 // This is as fast on some subtargets. However, we always have full rate f32 2293 // mad available which returns the same result as the separate operations 2294 // which we should prefer over fma. We can't use this if we want to support 2295 // denormals, so only report this in these cases. 2296 return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32(); 2297 case MVT::f64: 2298 return true; 2299 case MVT::f16: 2300 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals(); 2301 default: 2302 break; 2303 } 2304 2305 return false; 2306 } 2307 2308 //===----------------------------------------------------------------------===// 2309 // Custom DAG Lowering Operations 2310 //===----------------------------------------------------------------------===// 2311 2312 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 2313 switch (Op.getOpcode()) { 2314 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 2315 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 2316 case ISD::LOAD: { 2317 SDValue Result = LowerLOAD(Op, DAG); 2318 assert((!Result.getNode() || 2319 Result.getNode()->getNumValues() == 2) && 2320 "Load should return a value and a chain"); 2321 return Result; 2322 } 2323 2324 case ISD::FSIN: 2325 case ISD::FCOS: 2326 return LowerTrig(Op, DAG); 2327 case ISD::SELECT: return LowerSELECT(Op, DAG); 2328 case ISD::FDIV: return LowerFDIV(Op, DAG); 2329 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); 2330 case ISD::STORE: return LowerSTORE(Op, DAG); 2331 case ISD::GlobalAddress: { 2332 MachineFunction &MF = DAG.getMachineFunction(); 2333 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 2334 return LowerGlobalAddress(MFI, Op, DAG); 2335 } 2336 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 2337 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 2338 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 2339 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); 2340 case ISD::INSERT_VECTOR_ELT: 2341 return lowerINSERT_VECTOR_ELT(Op, DAG); 2342 case ISD::EXTRACT_VECTOR_ELT: 2343 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 2344 case ISD::FP_ROUND: 2345 return lowerFP_ROUND(Op, DAG); 2346 2347 case ISD::TRAP: 2348 case ISD::DEBUGTRAP: 2349 return lowerTRAP(Op, DAG); 2350 } 2351 return SDValue(); 2352 } 2353 2354 void SITargetLowering::ReplaceNodeResults(SDNode *N, 2355 SmallVectorImpl<SDValue> &Results, 2356 SelectionDAG &DAG) const { 2357 switch (N->getOpcode()) { 2358 case ISD::INSERT_VECTOR_ELT: { 2359 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) 2360 Results.push_back(Res); 2361 return; 2362 } 2363 case ISD::EXTRACT_VECTOR_ELT: { 2364 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) 2365 Results.push_back(Res); 2366 return; 2367 } 2368 case ISD::INTRINSIC_WO_CHAIN: { 2369 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 2370 switch (IID) { 2371 case Intrinsic::amdgcn_cvt_pkrtz: { 2372 SDValue Src0 = N->getOperand(1); 2373 SDValue Src1 = N->getOperand(2); 2374 SDLoc SL(N); 2375 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32, 2376 Src0, Src1); 2377 2378 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt)); 2379 return; 2380 } 2381 default: 2382 break; 2383 } 2384 } 2385 case ISD::SELECT: { 2386 SDLoc SL(N); 2387 EVT VT = N->getValueType(0); 2388 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 2389 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1)); 2390 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2)); 2391 2392 EVT SelectVT = NewVT; 2393 if (NewVT.bitsLT(MVT::i32)) { 2394 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS); 2395 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS); 2396 SelectVT = MVT::i32; 2397 } 2398 2399 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT, 2400 N->getOperand(0), LHS, RHS); 2401 2402 if (NewVT != SelectVT) 2403 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect); 2404 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect)); 2405 return; 2406 } 2407 default: 2408 break; 2409 } 2410 } 2411 2412 /// \brief Helper function for LowerBRCOND 2413 static SDNode *findUser(SDValue Value, unsigned Opcode) { 2414 2415 SDNode *Parent = Value.getNode(); 2416 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); 2417 I != E; ++I) { 2418 2419 if (I.getUse().get() != Value) 2420 continue; 2421 2422 if (I->getOpcode() == Opcode) 2423 return *I; 2424 } 2425 return nullptr; 2426 } 2427 2428 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { 2429 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 2430 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { 2431 case Intrinsic::amdgcn_if: 2432 return AMDGPUISD::IF; 2433 case Intrinsic::amdgcn_else: 2434 return AMDGPUISD::ELSE; 2435 case Intrinsic::amdgcn_loop: 2436 return AMDGPUISD::LOOP; 2437 case Intrinsic::amdgcn_end_cf: 2438 llvm_unreachable("should not occur"); 2439 default: 2440 return 0; 2441 } 2442 } 2443 2444 // break, if_break, else_break are all only used as inputs to loop, not 2445 // directly as branch conditions. 2446 return 0; 2447 } 2448 2449 void SITargetLowering::createDebuggerPrologueStackObjects( 2450 MachineFunction &MF) const { 2451 // Create stack objects that are used for emitting debugger prologue. 2452 // 2453 // Debugger prologue writes work group IDs and work item IDs to scratch memory 2454 // at fixed location in the following format: 2455 // offset 0: work group ID x 2456 // offset 4: work group ID y 2457 // offset 8: work group ID z 2458 // offset 16: work item ID x 2459 // offset 20: work item ID y 2460 // offset 24: work item ID z 2461 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2462 int ObjectIdx = 0; 2463 2464 // For each dimension: 2465 for (unsigned i = 0; i < 3; ++i) { 2466 // Create fixed stack object for work group ID. 2467 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true); 2468 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx); 2469 // Create fixed stack object for work item ID. 2470 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true); 2471 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx); 2472 } 2473 } 2474 2475 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { 2476 const Triple &TT = getTargetMachine().getTargetTriple(); 2477 return GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS && 2478 AMDGPU::shouldEmitConstantsToTextSection(TT); 2479 } 2480 2481 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { 2482 return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS || 2483 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) && 2484 !shouldEmitFixup(GV) && 2485 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 2486 } 2487 2488 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { 2489 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); 2490 } 2491 2492 /// This transforms the control flow intrinsics to get the branch destination as 2493 /// last parameter, also switches branch target with BR if the need arise 2494 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, 2495 SelectionDAG &DAG) const { 2496 SDLoc DL(BRCOND); 2497 2498 SDNode *Intr = BRCOND.getOperand(1).getNode(); 2499 SDValue Target = BRCOND.getOperand(2); 2500 SDNode *BR = nullptr; 2501 SDNode *SetCC = nullptr; 2502 2503 if (Intr->getOpcode() == ISD::SETCC) { 2504 // As long as we negate the condition everything is fine 2505 SetCC = Intr; 2506 Intr = SetCC->getOperand(0).getNode(); 2507 2508 } else { 2509 // Get the target from BR if we don't negate the condition 2510 BR = findUser(BRCOND, ISD::BR); 2511 Target = BR->getOperand(1); 2512 } 2513 2514 // FIXME: This changes the types of the intrinsics instead of introducing new 2515 // nodes with the correct types. 2516 // e.g. llvm.amdgcn.loop 2517 2518 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3 2519 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088> 2520 2521 unsigned CFNode = isCFIntrinsic(Intr); 2522 if (CFNode == 0) { 2523 // This is a uniform branch so we don't need to legalize. 2524 return BRCOND; 2525 } 2526 2527 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || 2528 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; 2529 2530 assert(!SetCC || 2531 (SetCC->getConstantOperandVal(1) == 1 && 2532 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == 2533 ISD::SETNE)); 2534 2535 // operands of the new intrinsic call 2536 SmallVector<SDValue, 4> Ops; 2537 if (HaveChain) 2538 Ops.push_back(BRCOND.getOperand(0)); 2539 2540 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end()); 2541 Ops.push_back(Target); 2542 2543 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); 2544 2545 // build the new intrinsic call 2546 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode(); 2547 2548 if (!HaveChain) { 2549 SDValue Ops[] = { 2550 SDValue(Result, 0), 2551 BRCOND.getOperand(0) 2552 }; 2553 2554 Result = DAG.getMergeValues(Ops, DL).getNode(); 2555 } 2556 2557 if (BR) { 2558 // Give the branch instruction our target 2559 SDValue Ops[] = { 2560 BR->getOperand(0), 2561 BRCOND.getOperand(2) 2562 }; 2563 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); 2564 DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); 2565 BR = NewBR.getNode(); 2566 } 2567 2568 SDValue Chain = SDValue(Result, Result->getNumValues() - 1); 2569 2570 // Copy the intrinsic results to registers 2571 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { 2572 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); 2573 if (!CopyToReg) 2574 continue; 2575 2576 Chain = DAG.getCopyToReg( 2577 Chain, DL, 2578 CopyToReg->getOperand(1), 2579 SDValue(Result, i - 1), 2580 SDValue()); 2581 2582 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); 2583 } 2584 2585 // Remove the old intrinsic from the chain 2586 DAG.ReplaceAllUsesOfValueWith( 2587 SDValue(Intr, Intr->getNumValues() - 1), 2588 Intr->getOperand(0)); 2589 2590 return Chain; 2591 } 2592 2593 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG, 2594 SDValue Op, 2595 const SDLoc &DL, 2596 EVT VT) const { 2597 return Op.getValueType().bitsLE(VT) ? 2598 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : 2599 DAG.getNode(ISD::FTRUNC, DL, VT, Op); 2600 } 2601 2602 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 2603 assert(Op.getValueType() == MVT::f16 && 2604 "Do not know how to custom lower FP_ROUND for non-f16 type"); 2605 2606 SDValue Src = Op.getOperand(0); 2607 EVT SrcVT = Src.getValueType(); 2608 if (SrcVT != MVT::f64) 2609 return Op; 2610 2611 SDLoc DL(Op); 2612 2613 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); 2614 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); 2615 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc); 2616 } 2617 2618 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { 2619 SDLoc SL(Op); 2620 MachineFunction &MF = DAG.getMachineFunction(); 2621 SDValue Chain = Op.getOperand(0); 2622 2623 unsigned TrapID = Op.getOpcode() == ISD::DEBUGTRAP ? 2624 SISubtarget::TrapIDLLVMDebugTrap : SISubtarget::TrapIDLLVMTrap; 2625 2626 if (Subtarget->getTrapHandlerAbi() == SISubtarget::TrapHandlerAbiHsa && 2627 Subtarget->isTrapHandlerEnabled()) { 2628 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2629 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 2630 assert(UserSGPR != AMDGPU::NoRegister); 2631 2632 SDValue QueuePtr = CreateLiveInRegister( 2633 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 2634 2635 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64); 2636 2637 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01, 2638 QueuePtr, SDValue()); 2639 2640 SDValue Ops[] = { 2641 ToReg, 2642 DAG.getTargetConstant(TrapID, SL, MVT::i16), 2643 SGPR01, 2644 ToReg.getValue(1) 2645 }; 2646 2647 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); 2648 } 2649 2650 switch (TrapID) { 2651 case SISubtarget::TrapIDLLVMTrap: 2652 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain); 2653 case SISubtarget::TrapIDLLVMDebugTrap: { 2654 DiagnosticInfoUnsupported NoTrap(*MF.getFunction(), 2655 "debugtrap handler not supported", 2656 Op.getDebugLoc(), 2657 DS_Warning); 2658 LLVMContext &Ctx = MF.getFunction()->getContext(); 2659 Ctx.diagnose(NoTrap); 2660 return Chain; 2661 } 2662 default: 2663 llvm_unreachable("unsupported trap handler type!"); 2664 } 2665 2666 return Chain; 2667 } 2668 2669 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL, 2670 SelectionDAG &DAG) const { 2671 // FIXME: Use inline constants (src_{shared, private}_base) instead. 2672 if (Subtarget->hasApertureRegs()) { 2673 unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ? 2674 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE : 2675 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE; 2676 unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ? 2677 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE : 2678 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE; 2679 unsigned Encoding = 2680 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ | 2681 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ | 2682 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_; 2683 2684 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16); 2685 SDValue ApertureReg = SDValue( 2686 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0); 2687 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32); 2688 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount); 2689 } 2690 2691 MachineFunction &MF = DAG.getMachineFunction(); 2692 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2693 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 2694 assert(UserSGPR != AMDGPU::NoRegister); 2695 2696 SDValue QueuePtr = CreateLiveInRegister( 2697 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 2698 2699 // Offset into amd_queue_t for group_segment_aperture_base_hi / 2700 // private_segment_aperture_base_hi. 2701 uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44; 2702 2703 SDValue Ptr = DAG.getNode(ISD::ADD, DL, MVT::i64, QueuePtr, 2704 DAG.getConstant(StructOffset, DL, MVT::i64)); 2705 2706 // TODO: Use custom target PseudoSourceValue. 2707 // TODO: We should use the value from the IR intrinsic call, but it might not 2708 // be available and how do we get it? 2709 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()), 2710 AMDGPUASI.CONSTANT_ADDRESS)); 2711 2712 MachinePointerInfo PtrInfo(V, StructOffset); 2713 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo, 2714 MinAlign(64, StructOffset), 2715 MachineMemOperand::MODereferenceable | 2716 MachineMemOperand::MOInvariant); 2717 } 2718 2719 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, 2720 SelectionDAG &DAG) const { 2721 SDLoc SL(Op); 2722 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); 2723 2724 SDValue Src = ASC->getOperand(0); 2725 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); 2726 2727 const AMDGPUTargetMachine &TM = 2728 static_cast<const AMDGPUTargetMachine &>(getTargetMachine()); 2729 2730 // flat -> local/private 2731 if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) { 2732 unsigned DestAS = ASC->getDestAddressSpace(); 2733 2734 if (DestAS == AMDGPUASI.LOCAL_ADDRESS || 2735 DestAS == AMDGPUASI.PRIVATE_ADDRESS) { 2736 unsigned NullVal = TM.getNullPointerValue(DestAS); 2737 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 2738 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); 2739 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); 2740 2741 return DAG.getNode(ISD::SELECT, SL, MVT::i32, 2742 NonNull, Ptr, SegmentNullPtr); 2743 } 2744 } 2745 2746 // local/private -> flat 2747 if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) { 2748 unsigned SrcAS = ASC->getSrcAddressSpace(); 2749 2750 if (SrcAS == AMDGPUASI.LOCAL_ADDRESS || 2751 SrcAS == AMDGPUASI.PRIVATE_ADDRESS) { 2752 unsigned NullVal = TM.getNullPointerValue(SrcAS); 2753 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 2754 2755 SDValue NonNull 2756 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); 2757 2758 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG); 2759 SDValue CvtPtr 2760 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); 2761 2762 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, 2763 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), 2764 FlatNullPtr); 2765 } 2766 } 2767 2768 // global <-> flat are no-ops and never emitted. 2769 2770 const MachineFunction &MF = DAG.getMachineFunction(); 2771 DiagnosticInfoUnsupported InvalidAddrSpaceCast( 2772 *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); 2773 DAG.getContext()->diagnose(InvalidAddrSpaceCast); 2774 2775 return DAG.getUNDEF(ASC->getValueType(0)); 2776 } 2777 2778 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 2779 SelectionDAG &DAG) const { 2780 SDValue Idx = Op.getOperand(2); 2781 if (isa<ConstantSDNode>(Idx)) 2782 return SDValue(); 2783 2784 // Avoid stack access for dynamic indexing. 2785 SDLoc SL(Op); 2786 SDValue Vec = Op.getOperand(0); 2787 SDValue Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Op.getOperand(1)); 2788 2789 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec 2790 SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Val); 2791 2792 // Convert vector index to bit-index. 2793 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, 2794 DAG.getConstant(16, SL, MVT::i32)); 2795 2796 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 2797 2798 SDValue BFM = DAG.getNode(ISD::SHL, SL, MVT::i32, 2799 DAG.getConstant(0xffff, SL, MVT::i32), 2800 ScaledIdx); 2801 2802 SDValue LHS = DAG.getNode(ISD::AND, SL, MVT::i32, BFM, ExtVal); 2803 SDValue RHS = DAG.getNode(ISD::AND, SL, MVT::i32, 2804 DAG.getNOT(SL, BFM, MVT::i32), BCVec); 2805 2806 SDValue BFI = DAG.getNode(ISD::OR, SL, MVT::i32, LHS, RHS); 2807 return DAG.getNode(ISD::BITCAST, SL, Op.getValueType(), BFI); 2808 } 2809 2810 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 2811 SelectionDAG &DAG) const { 2812 SDLoc SL(Op); 2813 2814 EVT ResultVT = Op.getValueType(); 2815 SDValue Vec = Op.getOperand(0); 2816 SDValue Idx = Op.getOperand(1); 2817 2818 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); 2819 2820 // Make sure we we do any optimizations that will make it easier to fold 2821 // source modifiers before obscuring it with bit operations. 2822 2823 // XXX - Why doesn't this get called when vector_shuffle is expanded? 2824 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI)) 2825 return Combined; 2826 2827 if (const ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) { 2828 SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 2829 2830 if (CIdx->getZExtValue() == 1) { 2831 Result = DAG.getNode(ISD::SRL, SL, MVT::i32, Result, 2832 DAG.getConstant(16, SL, MVT::i32)); 2833 } else { 2834 assert(CIdx->getZExtValue() == 0); 2835 } 2836 2837 if (ResultVT.bitsLT(MVT::i32)) 2838 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result); 2839 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 2840 } 2841 2842 SDValue Sixteen = DAG.getConstant(16, SL, MVT::i32); 2843 2844 // Convert vector index to bit-index. 2845 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, Sixteen); 2846 2847 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 2848 SDValue Elt = DAG.getNode(ISD::SRL, SL, MVT::i32, BC, ScaledIdx); 2849 2850 SDValue Result = Elt; 2851 if (ResultVT.bitsLT(MVT::i32)) 2852 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result); 2853 2854 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 2855 } 2856 2857 bool 2858 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 2859 // We can fold offsets for anything that doesn't require a GOT relocation. 2860 return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS || 2861 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) && 2862 !shouldEmitGOTReloc(GA->getGlobal()); 2863 } 2864 2865 static SDValue 2866 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, 2867 const SDLoc &DL, unsigned Offset, EVT PtrVT, 2868 unsigned GAFlags = SIInstrInfo::MO_NONE) { 2869 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is 2870 // lowered to the following code sequence: 2871 // 2872 // For constant address space: 2873 // s_getpc_b64 s[0:1] 2874 // s_add_u32 s0, s0, $symbol 2875 // s_addc_u32 s1, s1, 0 2876 // 2877 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 2878 // a fixup or relocation is emitted to replace $symbol with a literal 2879 // constant, which is a pc-relative offset from the encoding of the $symbol 2880 // operand to the global variable. 2881 // 2882 // For global address space: 2883 // s_getpc_b64 s[0:1] 2884 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo 2885 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi 2886 // 2887 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 2888 // fixups or relocations are emitted to replace $symbol@*@lo and 2889 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, 2890 // which is a 64-bit pc-relative offset from the encoding of the $symbol 2891 // operand to the global variable. 2892 // 2893 // What we want here is an offset from the value returned by s_getpc 2894 // (which is the address of the s_add_u32 instruction) to the global 2895 // variable, but since the encoding of $symbol starts 4 bytes after the start 2896 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too 2897 // small. This requires us to add 4 to the global variable offset in order to 2898 // compute the correct address. 2899 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 2900 GAFlags); 2901 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 2902 GAFlags == SIInstrInfo::MO_NONE ? 2903 GAFlags : GAFlags + 1); 2904 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); 2905 } 2906 2907 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 2908 SDValue Op, 2909 SelectionDAG &DAG) const { 2910 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 2911 2912 if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS && 2913 GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS) 2914 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 2915 2916 SDLoc DL(GSD); 2917 const GlobalValue *GV = GSD->getGlobal(); 2918 EVT PtrVT = Op.getValueType(); 2919 2920 if (shouldEmitFixup(GV)) 2921 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); 2922 else if (shouldEmitPCReloc(GV)) 2923 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, 2924 SIInstrInfo::MO_REL32); 2925 2926 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, 2927 SIInstrInfo::MO_GOTPCREL32); 2928 2929 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); 2930 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS); 2931 const DataLayout &DataLayout = DAG.getDataLayout(); 2932 unsigned Align = DataLayout.getABITypeAlignment(PtrTy); 2933 // FIXME: Use a PseudoSourceValue once those can be assigned an address space. 2934 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 2935 2936 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align, 2937 MachineMemOperand::MODereferenceable | 2938 MachineMemOperand::MOInvariant); 2939 } 2940 2941 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, 2942 const SDLoc &DL, SDValue V) const { 2943 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as 2944 // the destination register. 2945 // 2946 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, 2947 // so we will end up with redundant moves to m0. 2948 // 2949 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. 2950 2951 // A Null SDValue creates a glue result. 2952 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, 2953 V, Chain); 2954 return SDValue(M0, 0); 2955 } 2956 2957 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, 2958 SDValue Op, 2959 MVT VT, 2960 unsigned Offset) const { 2961 SDLoc SL(Op); 2962 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL, 2963 DAG.getEntryNode(), Offset, false); 2964 // The local size values will have the hi 16-bits as zero. 2965 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, 2966 DAG.getValueType(VT)); 2967 } 2968 2969 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 2970 EVT VT) { 2971 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), 2972 "non-hsa intrinsic with hsa target", 2973 DL.getDebugLoc()); 2974 DAG.getContext()->diagnose(BadIntrin); 2975 return DAG.getUNDEF(VT); 2976 } 2977 2978 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 2979 EVT VT) { 2980 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), 2981 "intrinsic not supported on subtarget", 2982 DL.getDebugLoc()); 2983 DAG.getContext()->diagnose(BadIntrin); 2984 return DAG.getUNDEF(VT); 2985 } 2986 2987 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 2988 SelectionDAG &DAG) const { 2989 MachineFunction &MF = DAG.getMachineFunction(); 2990 auto MFI = MF.getInfo<SIMachineFunctionInfo>(); 2991 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2992 2993 EVT VT = Op.getValueType(); 2994 SDLoc DL(Op); 2995 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2996 2997 // TODO: Should this propagate fast-math-flags? 2998 2999 switch (IntrinsicID) { 3000 case Intrinsic::amdgcn_implicit_buffer_ptr: { 3001 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); 3002 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 3003 } 3004 case Intrinsic::amdgcn_dispatch_ptr: 3005 case Intrinsic::amdgcn_queue_ptr: { 3006 if (!Subtarget->isAmdCodeObjectV2(MF)) { 3007 DiagnosticInfoUnsupported BadIntrin( 3008 *MF.getFunction(), "unsupported hsa intrinsic without hsa target", 3009 DL.getDebugLoc()); 3010 DAG.getContext()->diagnose(BadIntrin); 3011 return DAG.getUNDEF(VT); 3012 } 3013 3014 auto Reg = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? 3015 SIRegisterInfo::DISPATCH_PTR : SIRegisterInfo::QUEUE_PTR; 3016 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, 3017 TRI->getPreloadedValue(MF, Reg), VT); 3018 } 3019 case Intrinsic::amdgcn_implicitarg_ptr: { 3020 unsigned offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT); 3021 return lowerKernArgParameterPtr(DAG, DL, DAG.getEntryNode(), offset); 3022 } 3023 case Intrinsic::amdgcn_kernarg_segment_ptr: { 3024 unsigned Reg 3025 = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 3026 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 3027 } 3028 case Intrinsic::amdgcn_dispatch_id: { 3029 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::DISPATCH_ID); 3030 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 3031 } 3032 case Intrinsic::amdgcn_rcp: 3033 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); 3034 case Intrinsic::amdgcn_rsq: 3035 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 3036 case Intrinsic::amdgcn_rsq_legacy: 3037 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 3038 return emitRemovedIntrinsicError(DAG, DL, VT); 3039 3040 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); 3041 case Intrinsic::amdgcn_rcp_legacy: 3042 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 3043 return emitRemovedIntrinsicError(DAG, DL, VT); 3044 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); 3045 case Intrinsic::amdgcn_rsq_clamp: { 3046 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 3047 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 3048 3049 Type *Type = VT.getTypeForEVT(*DAG.getContext()); 3050 APFloat Max = APFloat::getLargest(Type->getFltSemantics()); 3051 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); 3052 3053 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 3054 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, 3055 DAG.getConstantFP(Max, DL, VT)); 3056 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, 3057 DAG.getConstantFP(Min, DL, VT)); 3058 } 3059 case Intrinsic::r600_read_ngroups_x: 3060 if (Subtarget->isAmdHsaOS()) 3061 return emitNonHSAIntrinsicError(DAG, DL, VT); 3062 3063 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 3064 SI::KernelInputOffsets::NGROUPS_X, false); 3065 case Intrinsic::r600_read_ngroups_y: 3066 if (Subtarget->isAmdHsaOS()) 3067 return emitNonHSAIntrinsicError(DAG, DL, VT); 3068 3069 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 3070 SI::KernelInputOffsets::NGROUPS_Y, false); 3071 case Intrinsic::r600_read_ngroups_z: 3072 if (Subtarget->isAmdHsaOS()) 3073 return emitNonHSAIntrinsicError(DAG, DL, VT); 3074 3075 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 3076 SI::KernelInputOffsets::NGROUPS_Z, false); 3077 case Intrinsic::r600_read_global_size_x: 3078 if (Subtarget->isAmdHsaOS()) 3079 return emitNonHSAIntrinsicError(DAG, DL, VT); 3080 3081 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 3082 SI::KernelInputOffsets::GLOBAL_SIZE_X, false); 3083 case Intrinsic::r600_read_global_size_y: 3084 if (Subtarget->isAmdHsaOS()) 3085 return emitNonHSAIntrinsicError(DAG, DL, VT); 3086 3087 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 3088 SI::KernelInputOffsets::GLOBAL_SIZE_Y, false); 3089 case Intrinsic::r600_read_global_size_z: 3090 if (Subtarget->isAmdHsaOS()) 3091 return emitNonHSAIntrinsicError(DAG, DL, VT); 3092 3093 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 3094 SI::KernelInputOffsets::GLOBAL_SIZE_Z, false); 3095 case Intrinsic::r600_read_local_size_x: 3096 if (Subtarget->isAmdHsaOS()) 3097 return emitNonHSAIntrinsicError(DAG, DL, VT); 3098 3099 return lowerImplicitZextParam(DAG, Op, MVT::i16, 3100 SI::KernelInputOffsets::LOCAL_SIZE_X); 3101 case Intrinsic::r600_read_local_size_y: 3102 if (Subtarget->isAmdHsaOS()) 3103 return emitNonHSAIntrinsicError(DAG, DL, VT); 3104 3105 return lowerImplicitZextParam(DAG, Op, MVT::i16, 3106 SI::KernelInputOffsets::LOCAL_SIZE_Y); 3107 case Intrinsic::r600_read_local_size_z: 3108 if (Subtarget->isAmdHsaOS()) 3109 return emitNonHSAIntrinsicError(DAG, DL, VT); 3110 3111 return lowerImplicitZextParam(DAG, Op, MVT::i16, 3112 SI::KernelInputOffsets::LOCAL_SIZE_Z); 3113 case Intrinsic::amdgcn_workgroup_id_x: 3114 case Intrinsic::r600_read_tgid_x: 3115 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, 3116 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_X), VT); 3117 case Intrinsic::amdgcn_workgroup_id_y: 3118 case Intrinsic::r600_read_tgid_y: 3119 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, 3120 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Y), VT); 3121 case Intrinsic::amdgcn_workgroup_id_z: 3122 case Intrinsic::r600_read_tgid_z: 3123 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, 3124 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Z), VT); 3125 case Intrinsic::amdgcn_workitem_id_x: 3126 case Intrinsic::r600_read_tidig_x: 3127 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 3128 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X), VT); 3129 case Intrinsic::amdgcn_workitem_id_y: 3130 case Intrinsic::r600_read_tidig_y: 3131 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 3132 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y), VT); 3133 case Intrinsic::amdgcn_workitem_id_z: 3134 case Intrinsic::r600_read_tidig_z: 3135 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 3136 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z), VT); 3137 case AMDGPUIntrinsic::SI_load_const: { 3138 SDValue Ops[] = { 3139 Op.getOperand(1), 3140 Op.getOperand(2) 3141 }; 3142 3143 MachineMemOperand *MMO = MF.getMachineMemOperand( 3144 MachinePointerInfo(), 3145 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 3146 MachineMemOperand::MOInvariant, 3147 VT.getStoreSize(), 4); 3148 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL, 3149 Op->getVTList(), Ops, VT, MMO); 3150 } 3151 case Intrinsic::amdgcn_fdiv_fast: 3152 return lowerFDIV_FAST(Op, DAG); 3153 case Intrinsic::amdgcn_interp_mov: { 3154 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 3155 SDValue Glue = M0.getValue(1); 3156 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1), 3157 Op.getOperand(2), Op.getOperand(3), Glue); 3158 } 3159 case Intrinsic::amdgcn_interp_p1: { 3160 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 3161 SDValue Glue = M0.getValue(1); 3162 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), 3163 Op.getOperand(2), Op.getOperand(3), Glue); 3164 } 3165 case Intrinsic::amdgcn_interp_p2: { 3166 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 3167 SDValue Glue = SDValue(M0.getNode(), 1); 3168 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), 3169 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), 3170 Glue); 3171 } 3172 case Intrinsic::amdgcn_sin: 3173 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); 3174 3175 case Intrinsic::amdgcn_cos: 3176 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); 3177 3178 case Intrinsic::amdgcn_log_clamp: { 3179 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 3180 return SDValue(); 3181 3182 DiagnosticInfoUnsupported BadIntrin( 3183 *MF.getFunction(), "intrinsic not supported on subtarget", 3184 DL.getDebugLoc()); 3185 DAG.getContext()->diagnose(BadIntrin); 3186 return DAG.getUNDEF(VT); 3187 } 3188 case Intrinsic::amdgcn_ldexp: 3189 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, 3190 Op.getOperand(1), Op.getOperand(2)); 3191 3192 case Intrinsic::amdgcn_fract: 3193 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); 3194 3195 case Intrinsic::amdgcn_class: 3196 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, 3197 Op.getOperand(1), Op.getOperand(2)); 3198 case Intrinsic::amdgcn_div_fmas: 3199 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, 3200 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 3201 Op.getOperand(4)); 3202 3203 case Intrinsic::amdgcn_div_fixup: 3204 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, 3205 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3206 3207 case Intrinsic::amdgcn_trig_preop: 3208 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, 3209 Op.getOperand(1), Op.getOperand(2)); 3210 case Intrinsic::amdgcn_div_scale: { 3211 // 3rd parameter required to be a constant. 3212 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 3213 if (!Param) 3214 return DAG.getUNDEF(VT); 3215 3216 // Translate to the operands expected by the machine instruction. The 3217 // first parameter must be the same as the first instruction. 3218 SDValue Numerator = Op.getOperand(1); 3219 SDValue Denominator = Op.getOperand(2); 3220 3221 // Note this order is opposite of the machine instruction's operations, 3222 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The 3223 // intrinsic has the numerator as the first operand to match a normal 3224 // division operation. 3225 3226 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; 3227 3228 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, 3229 Denominator, Numerator); 3230 } 3231 case Intrinsic::amdgcn_icmp: { 3232 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 3233 if (!CD) 3234 return DAG.getUNDEF(VT); 3235 3236 int CondCode = CD->getSExtValue(); 3237 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || 3238 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE) 3239 return DAG.getUNDEF(VT); 3240 3241 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); 3242 ISD::CondCode CCOpcode = getICmpCondCode(IcInput); 3243 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 3244 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 3245 } 3246 case Intrinsic::amdgcn_fcmp: { 3247 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 3248 if (!CD) 3249 return DAG.getUNDEF(VT); 3250 3251 int CondCode = CD->getSExtValue(); 3252 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE || 3253 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) 3254 return DAG.getUNDEF(VT); 3255 3256 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); 3257 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); 3258 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 3259 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 3260 } 3261 case Intrinsic::amdgcn_fmed3: 3262 return DAG.getNode(AMDGPUISD::FMED3, DL, VT, 3263 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3264 case Intrinsic::amdgcn_fmul_legacy: 3265 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, 3266 Op.getOperand(1), Op.getOperand(2)); 3267 case Intrinsic::amdgcn_sffbh: 3268 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); 3269 case Intrinsic::amdgcn_sbfe: 3270 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, 3271 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3272 case Intrinsic::amdgcn_ubfe: 3273 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, 3274 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3275 case Intrinsic::amdgcn_cvt_pkrtz: { 3276 // FIXME: Stop adding cast if v2f16 legal. 3277 EVT VT = Op.getValueType(); 3278 SDValue Node = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, DL, MVT::i32, 3279 Op.getOperand(1), Op.getOperand(2)); 3280 return DAG.getNode(ISD::BITCAST, DL, VT, Node); 3281 } 3282 default: 3283 return Op; 3284 } 3285 } 3286 3287 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 3288 SelectionDAG &DAG) const { 3289 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 3290 SDLoc DL(Op); 3291 switch (IntrID) { 3292 case Intrinsic::amdgcn_atomic_inc: 3293 case Intrinsic::amdgcn_atomic_dec: { 3294 MemSDNode *M = cast<MemSDNode>(Op); 3295 unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ? 3296 AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC; 3297 SDValue Ops[] = { 3298 M->getOperand(0), // Chain 3299 M->getOperand(2), // Ptr 3300 M->getOperand(3) // Value 3301 }; 3302 3303 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, 3304 M->getMemoryVT(), M->getMemOperand()); 3305 } 3306 case Intrinsic::amdgcn_buffer_load: 3307 case Intrinsic::amdgcn_buffer_load_format: { 3308 SDValue Ops[] = { 3309 Op.getOperand(0), // Chain 3310 Op.getOperand(2), // rsrc 3311 Op.getOperand(3), // vindex 3312 Op.getOperand(4), // offset 3313 Op.getOperand(5), // glc 3314 Op.getOperand(6) // slc 3315 }; 3316 MachineFunction &MF = DAG.getMachineFunction(); 3317 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 3318 3319 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? 3320 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; 3321 EVT VT = Op.getValueType(); 3322 EVT IntVT = VT.changeTypeToInteger(); 3323 3324 MachineMemOperand *MMO = MF.getMachineMemOperand( 3325 MachinePointerInfo(MFI->getBufferPSV()), 3326 MachineMemOperand::MOLoad, 3327 VT.getStoreSize(), VT.getStoreSize()); 3328 3329 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, MMO); 3330 } 3331 // Basic sample. 3332 case Intrinsic::amdgcn_image_sample: 3333 case Intrinsic::amdgcn_image_sample_cl: 3334 case Intrinsic::amdgcn_image_sample_d: 3335 case Intrinsic::amdgcn_image_sample_d_cl: 3336 case Intrinsic::amdgcn_image_sample_l: 3337 case Intrinsic::amdgcn_image_sample_b: 3338 case Intrinsic::amdgcn_image_sample_b_cl: 3339 case Intrinsic::amdgcn_image_sample_lz: 3340 case Intrinsic::amdgcn_image_sample_cd: 3341 case Intrinsic::amdgcn_image_sample_cd_cl: 3342 3343 // Sample with comparison. 3344 case Intrinsic::amdgcn_image_sample_c: 3345 case Intrinsic::amdgcn_image_sample_c_cl: 3346 case Intrinsic::amdgcn_image_sample_c_d: 3347 case Intrinsic::amdgcn_image_sample_c_d_cl: 3348 case Intrinsic::amdgcn_image_sample_c_l: 3349 case Intrinsic::amdgcn_image_sample_c_b: 3350 case Intrinsic::amdgcn_image_sample_c_b_cl: 3351 case Intrinsic::amdgcn_image_sample_c_lz: 3352 case Intrinsic::amdgcn_image_sample_c_cd: 3353 case Intrinsic::amdgcn_image_sample_c_cd_cl: 3354 3355 // Sample with offsets. 3356 case Intrinsic::amdgcn_image_sample_o: 3357 case Intrinsic::amdgcn_image_sample_cl_o: 3358 case Intrinsic::amdgcn_image_sample_d_o: 3359 case Intrinsic::amdgcn_image_sample_d_cl_o: 3360 case Intrinsic::amdgcn_image_sample_l_o: 3361 case Intrinsic::amdgcn_image_sample_b_o: 3362 case Intrinsic::amdgcn_image_sample_b_cl_o: 3363 case Intrinsic::amdgcn_image_sample_lz_o: 3364 case Intrinsic::amdgcn_image_sample_cd_o: 3365 case Intrinsic::amdgcn_image_sample_cd_cl_o: 3366 3367 // Sample with comparison and offsets. 3368 case Intrinsic::amdgcn_image_sample_c_o: 3369 case Intrinsic::amdgcn_image_sample_c_cl_o: 3370 case Intrinsic::amdgcn_image_sample_c_d_o: 3371 case Intrinsic::amdgcn_image_sample_c_d_cl_o: 3372 case Intrinsic::amdgcn_image_sample_c_l_o: 3373 case Intrinsic::amdgcn_image_sample_c_b_o: 3374 case Intrinsic::amdgcn_image_sample_c_b_cl_o: 3375 case Intrinsic::amdgcn_image_sample_c_lz_o: 3376 case Intrinsic::amdgcn_image_sample_c_cd_o: 3377 case Intrinsic::amdgcn_image_sample_c_cd_cl_o: 3378 3379 case Intrinsic::amdgcn_image_getlod: { 3380 // Replace dmask with everything disabled with undef. 3381 const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(5)); 3382 if (!DMask || DMask->isNullValue()) { 3383 SDValue Undef = DAG.getUNDEF(Op.getValueType()); 3384 return DAG.getMergeValues({ Undef, Op.getOperand(0) }, SDLoc(Op)); 3385 } 3386 3387 return SDValue(); 3388 } 3389 default: 3390 return SDValue(); 3391 } 3392 } 3393 3394 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, 3395 SelectionDAG &DAG) const { 3396 MachineFunction &MF = DAG.getMachineFunction(); 3397 SDLoc DL(Op); 3398 SDValue Chain = Op.getOperand(0); 3399 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 3400 3401 switch (IntrinsicID) { 3402 case Intrinsic::amdgcn_exp: { 3403 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 3404 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 3405 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8)); 3406 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9)); 3407 3408 const SDValue Ops[] = { 3409 Chain, 3410 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 3411 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 3412 Op.getOperand(4), // src0 3413 Op.getOperand(5), // src1 3414 Op.getOperand(6), // src2 3415 Op.getOperand(7), // src3 3416 DAG.getTargetConstant(0, DL, MVT::i1), // compr 3417 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 3418 }; 3419 3420 unsigned Opc = Done->isNullValue() ? 3421 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 3422 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 3423 } 3424 case Intrinsic::amdgcn_exp_compr: { 3425 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 3426 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 3427 SDValue Src0 = Op.getOperand(4); 3428 SDValue Src1 = Op.getOperand(5); 3429 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); 3430 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7)); 3431 3432 SDValue Undef = DAG.getUNDEF(MVT::f32); 3433 const SDValue Ops[] = { 3434 Chain, 3435 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 3436 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 3437 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), 3438 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), 3439 Undef, // src2 3440 Undef, // src3 3441 DAG.getTargetConstant(1, DL, MVT::i1), // compr 3442 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 3443 }; 3444 3445 unsigned Opc = Done->isNullValue() ? 3446 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 3447 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 3448 } 3449 case Intrinsic::amdgcn_s_sendmsg: 3450 case Intrinsic::amdgcn_s_sendmsghalt: { 3451 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ? 3452 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT; 3453 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 3454 SDValue Glue = Chain.getValue(1); 3455 return DAG.getNode(NodeOp, DL, MVT::Other, Chain, 3456 Op.getOperand(2), Glue); 3457 } 3458 case Intrinsic::amdgcn_init_exec: { 3459 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain, 3460 Op.getOperand(2)); 3461 } 3462 case Intrinsic::amdgcn_init_exec_from_input: { 3463 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain, 3464 Op.getOperand(2), Op.getOperand(3)); 3465 } 3466 case AMDGPUIntrinsic::SI_tbuffer_store: { 3467 SDValue Ops[] = { 3468 Chain, 3469 Op.getOperand(2), 3470 Op.getOperand(3), 3471 Op.getOperand(4), 3472 Op.getOperand(5), 3473 Op.getOperand(6), 3474 Op.getOperand(7), 3475 Op.getOperand(8), 3476 Op.getOperand(9), 3477 Op.getOperand(10), 3478 Op.getOperand(11), 3479 Op.getOperand(12), 3480 Op.getOperand(13), 3481 Op.getOperand(14) 3482 }; 3483 3484 EVT VT = Op.getOperand(3).getValueType(); 3485 3486 MachineMemOperand *MMO = MF.getMachineMemOperand( 3487 MachinePointerInfo(), 3488 MachineMemOperand::MOStore, 3489 VT.getStoreSize(), 4); 3490 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL, 3491 Op->getVTList(), Ops, VT, MMO); 3492 } 3493 case AMDGPUIntrinsic::AMDGPU_kill: { 3494 SDValue Src = Op.getOperand(2); 3495 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) { 3496 if (!K->isNegative()) 3497 return Chain; 3498 3499 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32); 3500 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne); 3501 } 3502 3503 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src); 3504 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast); 3505 } 3506 case Intrinsic::amdgcn_s_barrier: { 3507 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) { 3508 const MachineFunction &MF = DAG.getMachineFunction(); 3509 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 3510 unsigned WGSize = ST.getFlatWorkGroupSizes(*MF.getFunction()).second; 3511 if (WGSize <= ST.getWavefrontSize()) 3512 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other, 3513 Op.getOperand(0)), 0); 3514 } 3515 return SDValue(); 3516 }; 3517 default: 3518 return Op; 3519 } 3520 } 3521 3522 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 3523 SDLoc DL(Op); 3524 LoadSDNode *Load = cast<LoadSDNode>(Op); 3525 ISD::LoadExtType ExtType = Load->getExtensionType(); 3526 EVT MemVT = Load->getMemoryVT(); 3527 3528 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { 3529 // FIXME: Copied from PPC 3530 // First, load into 32 bits, then truncate to 1 bit. 3531 3532 SDValue Chain = Load->getChain(); 3533 SDValue BasePtr = Load->getBasePtr(); 3534 MachineMemOperand *MMO = Load->getMemOperand(); 3535 3536 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; 3537 3538 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 3539 BasePtr, RealMemVT, MMO); 3540 3541 SDValue Ops[] = { 3542 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), 3543 NewLD.getValue(1) 3544 }; 3545 3546 return DAG.getMergeValues(Ops, DL); 3547 } 3548 3549 if (!MemVT.isVector()) 3550 return SDValue(); 3551 3552 assert(Op.getValueType().getVectorElementType() == MVT::i32 && 3553 "Custom lowering for non-i32 vectors hasn't been implemented."); 3554 3555 unsigned AS = Load->getAddressSpace(); 3556 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, 3557 AS, Load->getAlignment())) { 3558 SDValue Ops[2]; 3559 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); 3560 return DAG.getMergeValues(Ops, DL); 3561 } 3562 3563 MachineFunction &MF = DAG.getMachineFunction(); 3564 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 3565 // If there is a possibilty that flat instruction access scratch memory 3566 // then we need to use the same legalization rules we use for private. 3567 if (AS == AMDGPUASI.FLAT_ADDRESS) 3568 AS = MFI->hasFlatScratchInit() ? 3569 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS; 3570 3571 unsigned NumElements = MemVT.getVectorNumElements(); 3572 if (AS == AMDGPUASI.CONSTANT_ADDRESS) { 3573 if (isMemOpUniform(Load)) 3574 return SDValue(); 3575 // Non-uniform loads will be selected to MUBUF instructions, so they 3576 // have the same legalization requirements as global and private 3577 // loads. 3578 // 3579 } 3580 if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS) { 3581 if (Subtarget->getScalarizeGlobalBehavior() && isMemOpUniform(Load) && 3582 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load)) 3583 return SDValue(); 3584 // Non-uniform loads will be selected to MUBUF instructions, so they 3585 // have the same legalization requirements as global and private 3586 // loads. 3587 // 3588 } 3589 if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS || 3590 AS == AMDGPUASI.FLAT_ADDRESS) { 3591 if (NumElements > 4) 3592 return SplitVectorLoad(Op, DAG); 3593 // v4 loads are supported for private and global memory. 3594 return SDValue(); 3595 } 3596 if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 3597 // Depending on the setting of the private_element_size field in the 3598 // resource descriptor, we can only make private accesses up to a certain 3599 // size. 3600 switch (Subtarget->getMaxPrivateElementSize()) { 3601 case 4: 3602 return scalarizeVectorLoad(Load, DAG); 3603 case 8: 3604 if (NumElements > 2) 3605 return SplitVectorLoad(Op, DAG); 3606 return SDValue(); 3607 case 16: 3608 // Same as global/flat 3609 if (NumElements > 4) 3610 return SplitVectorLoad(Op, DAG); 3611 return SDValue(); 3612 default: 3613 llvm_unreachable("unsupported private_element_size"); 3614 } 3615 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 3616 if (NumElements > 2) 3617 return SplitVectorLoad(Op, DAG); 3618 3619 if (NumElements == 2) 3620 return SDValue(); 3621 3622 // If properly aligned, if we split we might be able to use ds_read_b64. 3623 return SplitVectorLoad(Op, DAG); 3624 } 3625 return SDValue(); 3626 } 3627 3628 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 3629 if (Op.getValueType() != MVT::i64) 3630 return SDValue(); 3631 3632 SDLoc DL(Op); 3633 SDValue Cond = Op.getOperand(0); 3634 3635 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 3636 SDValue One = DAG.getConstant(1, DL, MVT::i32); 3637 3638 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); 3639 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); 3640 3641 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); 3642 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); 3643 3644 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); 3645 3646 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); 3647 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); 3648 3649 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); 3650 3651 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); 3652 return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res); 3653 } 3654 3655 // Catch division cases where we can use shortcuts with rcp and rsq 3656 // instructions. 3657 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, 3658 SelectionDAG &DAG) const { 3659 SDLoc SL(Op); 3660 SDValue LHS = Op.getOperand(0); 3661 SDValue RHS = Op.getOperand(1); 3662 EVT VT = Op.getValueType(); 3663 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath; 3664 3665 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals()) 3666 return SDValue(); 3667 3668 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { 3669 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) { 3670 if (CLHS->isExactlyValue(1.0)) { 3671 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 3672 // the CI documentation has a worst case error of 1 ulp. 3673 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 3674 // use it as long as we aren't trying to use denormals. 3675 // 3676 // v_rcp_f16 and v_rsq_f16 DO support denormals. 3677 3678 // 1.0 / sqrt(x) -> rsq(x) 3679 3680 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP 3681 // error seems really high at 2^29 ULP. 3682 if (RHS.getOpcode() == ISD::FSQRT) 3683 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); 3684 3685 // 1.0 / x -> rcp(x) 3686 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 3687 } 3688 3689 // Same as for 1.0, but expand the sign out of the constant. 3690 if (CLHS->isExactlyValue(-1.0)) { 3691 // -1.0 / x -> rcp (fneg x) 3692 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3693 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); 3694 } 3695 } 3696 } 3697 3698 const SDNodeFlags Flags = Op->getFlags(); 3699 3700 if (Unsafe || Flags.hasAllowReciprocal()) { 3701 // Turn into multiply by the reciprocal. 3702 // x / y -> x * (1.0 / y) 3703 SDNodeFlags NewFlags; 3704 NewFlags.setUnsafeAlgebra(true); 3705 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 3706 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, NewFlags); 3707 } 3708 3709 return SDValue(); 3710 } 3711 3712 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 3713 EVT VT, SDValue A, SDValue B, SDValue GlueChain) { 3714 if (GlueChain->getNumValues() <= 1) { 3715 return DAG.getNode(Opcode, SL, VT, A, B); 3716 } 3717 3718 assert(GlueChain->getNumValues() == 3); 3719 3720 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 3721 switch (Opcode) { 3722 default: llvm_unreachable("no chain equivalent for opcode"); 3723 case ISD::FMUL: 3724 Opcode = AMDGPUISD::FMUL_W_CHAIN; 3725 break; 3726 } 3727 3728 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, 3729 GlueChain.getValue(2)); 3730 } 3731 3732 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 3733 EVT VT, SDValue A, SDValue B, SDValue C, 3734 SDValue GlueChain) { 3735 if (GlueChain->getNumValues() <= 1) { 3736 return DAG.getNode(Opcode, SL, VT, A, B, C); 3737 } 3738 3739 assert(GlueChain->getNumValues() == 3); 3740 3741 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 3742 switch (Opcode) { 3743 default: llvm_unreachable("no chain equivalent for opcode"); 3744 case ISD::FMA: 3745 Opcode = AMDGPUISD::FMA_W_CHAIN; 3746 break; 3747 } 3748 3749 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C, 3750 GlueChain.getValue(2)); 3751 } 3752 3753 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { 3754 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 3755 return FastLowered; 3756 3757 SDLoc SL(Op); 3758 SDValue Src0 = Op.getOperand(0); 3759 SDValue Src1 = Op.getOperand(1); 3760 3761 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); 3762 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); 3763 3764 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); 3765 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); 3766 3767 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); 3768 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); 3769 3770 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); 3771 } 3772 3773 // Faster 2.5 ULP division that does not support denormals. 3774 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { 3775 SDLoc SL(Op); 3776 SDValue LHS = Op.getOperand(1); 3777 SDValue RHS = Op.getOperand(2); 3778 3779 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); 3780 3781 const APFloat K0Val(BitsToFloat(0x6f800000)); 3782 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); 3783 3784 const APFloat K1Val(BitsToFloat(0x2f800000)); 3785 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); 3786 3787 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 3788 3789 EVT SetCCVT = 3790 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 3791 3792 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); 3793 3794 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); 3795 3796 // TODO: Should this propagate fast-math-flags? 3797 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); 3798 3799 // rcp does not support denormals. 3800 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); 3801 3802 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); 3803 3804 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); 3805 } 3806 3807 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { 3808 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 3809 return FastLowered; 3810 3811 SDLoc SL(Op); 3812 SDValue LHS = Op.getOperand(0); 3813 SDValue RHS = Op.getOperand(1); 3814 3815 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 3816 3817 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); 3818 3819 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 3820 RHS, RHS, LHS); 3821 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 3822 LHS, RHS, LHS); 3823 3824 // Denominator is scaled to not be denormal, so using rcp is ok. 3825 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, 3826 DenominatorScaled); 3827 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, 3828 DenominatorScaled); 3829 3830 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | 3831 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | 3832 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); 3833 3834 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16); 3835 3836 if (!Subtarget->hasFP32Denormals()) { 3837 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); 3838 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE, 3839 SL, MVT::i32); 3840 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs, 3841 DAG.getEntryNode(), 3842 EnableDenormValue, BitField); 3843 SDValue Ops[3] = { 3844 NegDivScale0, 3845 EnableDenorm.getValue(0), 3846 EnableDenorm.getValue(1) 3847 }; 3848 3849 NegDivScale0 = DAG.getMergeValues(Ops, SL); 3850 } 3851 3852 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, 3853 ApproxRcp, One, NegDivScale0); 3854 3855 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, 3856 ApproxRcp, Fma0); 3857 3858 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, 3859 Fma1, Fma1); 3860 3861 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, 3862 NumeratorScaled, Mul); 3863 3864 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2); 3865 3866 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, 3867 NumeratorScaled, Fma3); 3868 3869 if (!Subtarget->hasFP32Denormals()) { 3870 const SDValue DisableDenormValue = 3871 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32); 3872 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other, 3873 Fma4.getValue(1), 3874 DisableDenormValue, 3875 BitField, 3876 Fma4.getValue(2)); 3877 3878 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 3879 DisableDenorm, DAG.getRoot()); 3880 DAG.setRoot(OutputChain); 3881 } 3882 3883 SDValue Scale = NumeratorScaled.getValue(1); 3884 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, 3885 Fma4, Fma1, Fma3, Scale); 3886 3887 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS); 3888 } 3889 3890 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { 3891 if (DAG.getTarget().Options.UnsafeFPMath) 3892 return lowerFastUnsafeFDIV(Op, DAG); 3893 3894 SDLoc SL(Op); 3895 SDValue X = Op.getOperand(0); 3896 SDValue Y = Op.getOperand(1); 3897 3898 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 3899 3900 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); 3901 3902 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); 3903 3904 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); 3905 3906 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); 3907 3908 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); 3909 3910 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); 3911 3912 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); 3913 3914 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); 3915 3916 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); 3917 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); 3918 3919 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, 3920 NegDivScale0, Mul, DivScale1); 3921 3922 SDValue Scale; 3923 3924 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 3925 // Workaround a hardware bug on SI where the condition output from div_scale 3926 // is not usable. 3927 3928 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); 3929 3930 // Figure out if the scale to use for div_fmas. 3931 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 3932 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); 3933 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); 3934 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); 3935 3936 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); 3937 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); 3938 3939 SDValue Scale0Hi 3940 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); 3941 SDValue Scale1Hi 3942 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); 3943 3944 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); 3945 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); 3946 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); 3947 } else { 3948 Scale = DivScale1.getValue(1); 3949 } 3950 3951 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, 3952 Fma4, Fma3, Mul, Scale); 3953 3954 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); 3955 } 3956 3957 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { 3958 EVT VT = Op.getValueType(); 3959 3960 if (VT == MVT::f32) 3961 return LowerFDIV32(Op, DAG); 3962 3963 if (VT == MVT::f64) 3964 return LowerFDIV64(Op, DAG); 3965 3966 if (VT == MVT::f16) 3967 return LowerFDIV16(Op, DAG); 3968 3969 llvm_unreachable("Unexpected type for fdiv"); 3970 } 3971 3972 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 3973 SDLoc DL(Op); 3974 StoreSDNode *Store = cast<StoreSDNode>(Op); 3975 EVT VT = Store->getMemoryVT(); 3976 3977 if (VT == MVT::i1) { 3978 return DAG.getTruncStore(Store->getChain(), DL, 3979 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), 3980 Store->getBasePtr(), MVT::i1, Store->getMemOperand()); 3981 } 3982 3983 assert(VT.isVector() && 3984 Store->getValue().getValueType().getScalarType() == MVT::i32); 3985 3986 unsigned AS = Store->getAddressSpace(); 3987 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 3988 AS, Store->getAlignment())) { 3989 return expandUnalignedStore(Store, DAG); 3990 } 3991 3992 MachineFunction &MF = DAG.getMachineFunction(); 3993 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 3994 // If there is a possibilty that flat instruction access scratch memory 3995 // then we need to use the same legalization rules we use for private. 3996 if (AS == AMDGPUASI.FLAT_ADDRESS) 3997 AS = MFI->hasFlatScratchInit() ? 3998 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS; 3999 4000 unsigned NumElements = VT.getVectorNumElements(); 4001 if (AS == AMDGPUASI.GLOBAL_ADDRESS || 4002 AS == AMDGPUASI.FLAT_ADDRESS) { 4003 if (NumElements > 4) 4004 return SplitVectorStore(Op, DAG); 4005 return SDValue(); 4006 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 4007 switch (Subtarget->getMaxPrivateElementSize()) { 4008 case 4: 4009 return scalarizeVectorStore(Store, DAG); 4010 case 8: 4011 if (NumElements > 2) 4012 return SplitVectorStore(Op, DAG); 4013 return SDValue(); 4014 case 16: 4015 if (NumElements > 4) 4016 return SplitVectorStore(Op, DAG); 4017 return SDValue(); 4018 default: 4019 llvm_unreachable("unsupported private_element_size"); 4020 } 4021 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 4022 if (NumElements > 2) 4023 return SplitVectorStore(Op, DAG); 4024 4025 if (NumElements == 2) 4026 return Op; 4027 4028 // If properly aligned, if we split we might be able to use ds_write_b64. 4029 return SplitVectorStore(Op, DAG); 4030 } else { 4031 llvm_unreachable("unhandled address space"); 4032 } 4033 } 4034 4035 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 4036 SDLoc DL(Op); 4037 EVT VT = Op.getValueType(); 4038 SDValue Arg = Op.getOperand(0); 4039 // TODO: Should this propagate fast-math-flags? 4040 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, 4041 DAG.getNode(ISD::FMUL, DL, VT, Arg, 4042 DAG.getConstantFP(0.5/M_PI, DL, 4043 VT))); 4044 4045 switch (Op.getOpcode()) { 4046 case ISD::FCOS: 4047 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart); 4048 case ISD::FSIN: 4049 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart); 4050 default: 4051 llvm_unreachable("Wrong trig opcode"); 4052 } 4053 } 4054 4055 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 4056 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); 4057 assert(AtomicNode->isCompareAndSwap()); 4058 unsigned AS = AtomicNode->getAddressSpace(); 4059 4060 // No custom lowering required for local address space 4061 if (!isFlatGlobalAddrSpace(AS, AMDGPUASI)) 4062 return Op; 4063 4064 // Non-local address space requires custom lowering for atomic compare 4065 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 4066 SDLoc DL(Op); 4067 SDValue ChainIn = Op.getOperand(0); 4068 SDValue Addr = Op.getOperand(1); 4069 SDValue Old = Op.getOperand(2); 4070 SDValue New = Op.getOperand(3); 4071 EVT VT = Op.getValueType(); 4072 MVT SimpleVT = VT.getSimpleVT(); 4073 MVT VecType = MVT::getVectorVT(SimpleVT, 2); 4074 4075 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); 4076 SDValue Ops[] = { ChainIn, Addr, NewOld }; 4077 4078 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), 4079 Ops, VT, AtomicNode->getMemOperand()); 4080 } 4081 4082 //===----------------------------------------------------------------------===// 4083 // Custom DAG optimizations 4084 //===----------------------------------------------------------------------===// 4085 4086 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, 4087 DAGCombinerInfo &DCI) const { 4088 EVT VT = N->getValueType(0); 4089 EVT ScalarVT = VT.getScalarType(); 4090 if (ScalarVT != MVT::f32) 4091 return SDValue(); 4092 4093 SelectionDAG &DAG = DCI.DAG; 4094 SDLoc DL(N); 4095 4096 SDValue Src = N->getOperand(0); 4097 EVT SrcVT = Src.getValueType(); 4098 4099 // TODO: We could try to match extracting the higher bytes, which would be 4100 // easier if i8 vectors weren't promoted to i32 vectors, particularly after 4101 // types are legalized. v4i8 -> v4f32 is probably the only case to worry 4102 // about in practice. 4103 if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) { 4104 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { 4105 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); 4106 DCI.AddToWorklist(Cvt.getNode()); 4107 return Cvt; 4108 } 4109 } 4110 4111 return SDValue(); 4112 } 4113 4114 /// \brief Return true if the given offset Size in bytes can be folded into 4115 /// the immediate offsets of a memory instruction for the given address space. 4116 static bool canFoldOffset(unsigned OffsetSize, unsigned AS, 4117 const SISubtarget &STI) { 4118 auto AMDGPUASI = STI.getAMDGPUAS(); 4119 if (AS == AMDGPUASI.GLOBAL_ADDRESS) { 4120 // MUBUF instructions a 12-bit offset in bytes. 4121 return isUInt<12>(OffsetSize); 4122 } 4123 if (AS == AMDGPUASI.CONSTANT_ADDRESS) { 4124 // SMRD instructions have an 8-bit offset in dwords on SI and 4125 // a 20-bit offset in bytes on VI. 4126 if (STI.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 4127 return isUInt<20>(OffsetSize); 4128 else 4129 return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4); 4130 } 4131 if (AS == AMDGPUASI.LOCAL_ADDRESS || 4132 AS == AMDGPUASI.REGION_ADDRESS) { 4133 // The single offset versions have a 16-bit offset in bytes. 4134 return isUInt<16>(OffsetSize); 4135 } 4136 // Indirect register addressing does not use any offsets. 4137 return false; 4138 } 4139 4140 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) 4141 4142 // This is a variant of 4143 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), 4144 // 4145 // The normal DAG combiner will do this, but only if the add has one use since 4146 // that would increase the number of instructions. 4147 // 4148 // This prevents us from seeing a constant offset that can be folded into a 4149 // memory instruction's addressing mode. If we know the resulting add offset of 4150 // a pointer can be folded into an addressing offset, we can replace the pointer 4151 // operand with the add of new constant offset. This eliminates one of the uses, 4152 // and may allow the remaining use to also be simplified. 4153 // 4154 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, 4155 unsigned AddrSpace, 4156 DAGCombinerInfo &DCI) const { 4157 SDValue N0 = N->getOperand(0); 4158 SDValue N1 = N->getOperand(1); 4159 4160 if (N0.getOpcode() != ISD::ADD) 4161 return SDValue(); 4162 4163 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); 4164 if (!CN1) 4165 return SDValue(); 4166 4167 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 4168 if (!CAdd) 4169 return SDValue(); 4170 4171 // If the resulting offset is too large, we can't fold it into the addressing 4172 // mode offset. 4173 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); 4174 if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *getSubtarget())) 4175 return SDValue(); 4176 4177 SelectionDAG &DAG = DCI.DAG; 4178 SDLoc SL(N); 4179 EVT VT = N->getValueType(0); 4180 4181 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); 4182 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); 4183 4184 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset); 4185 } 4186 4187 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, 4188 DAGCombinerInfo &DCI) const { 4189 SDValue Ptr = N->getBasePtr(); 4190 SelectionDAG &DAG = DCI.DAG; 4191 SDLoc SL(N); 4192 4193 // TODO: We could also do this for multiplies. 4194 unsigned AS = N->getAddressSpace(); 4195 if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUASI.PRIVATE_ADDRESS) { 4196 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI); 4197 if (NewPtr) { 4198 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); 4199 4200 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; 4201 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); 4202 } 4203 } 4204 4205 return SDValue(); 4206 } 4207 4208 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { 4209 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || 4210 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || 4211 (Opc == ISD::XOR && Val == 0); 4212 } 4213 4214 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This 4215 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit 4216 // integer combine opportunities since most 64-bit operations are decomposed 4217 // this way. TODO: We won't want this for SALU especially if it is an inline 4218 // immediate. 4219 SDValue SITargetLowering::splitBinaryBitConstantOp( 4220 DAGCombinerInfo &DCI, 4221 const SDLoc &SL, 4222 unsigned Opc, SDValue LHS, 4223 const ConstantSDNode *CRHS) const { 4224 uint64_t Val = CRHS->getZExtValue(); 4225 uint32_t ValLo = Lo_32(Val); 4226 uint32_t ValHi = Hi_32(Val); 4227 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 4228 4229 if ((bitOpWithConstantIsReducible(Opc, ValLo) || 4230 bitOpWithConstantIsReducible(Opc, ValHi)) || 4231 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { 4232 // If we need to materialize a 64-bit immediate, it will be split up later 4233 // anyway. Avoid creating the harder to understand 64-bit immediate 4234 // materialization. 4235 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); 4236 } 4237 4238 return SDValue(); 4239 } 4240 4241 SDValue SITargetLowering::performAndCombine(SDNode *N, 4242 DAGCombinerInfo &DCI) const { 4243 if (DCI.isBeforeLegalize()) 4244 return SDValue(); 4245 4246 SelectionDAG &DAG = DCI.DAG; 4247 EVT VT = N->getValueType(0); 4248 SDValue LHS = N->getOperand(0); 4249 SDValue RHS = N->getOperand(1); 4250 4251 4252 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 4253 if (VT == MVT::i64 && CRHS) { 4254 if (SDValue Split 4255 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) 4256 return Split; 4257 } 4258 4259 if (CRHS && VT == MVT::i32) { 4260 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb 4261 // nb = number of trailing zeroes in mask 4262 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass, 4263 // given that we are selecting 8 or 16 bit fields starting at byte boundary. 4264 uint64_t Mask = CRHS->getZExtValue(); 4265 unsigned Bits = countPopulation(Mask); 4266 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL && 4267 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) { 4268 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) { 4269 unsigned Shift = CShift->getZExtValue(); 4270 unsigned NB = CRHS->getAPIntValue().countTrailingZeros(); 4271 unsigned Offset = NB + Shift; 4272 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary. 4273 SDLoc SL(N); 4274 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 4275 LHS->getOperand(0), 4276 DAG.getConstant(Offset, SL, MVT::i32), 4277 DAG.getConstant(Bits, SL, MVT::i32)); 4278 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits); 4279 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE, 4280 DAG.getValueType(NarrowVT)); 4281 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext, 4282 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32)); 4283 return Shl; 4284 } 4285 } 4286 } 4287 } 4288 4289 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> 4290 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) 4291 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { 4292 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 4293 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); 4294 4295 SDValue X = LHS.getOperand(0); 4296 SDValue Y = RHS.getOperand(0); 4297 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) 4298 return SDValue(); 4299 4300 if (LCC == ISD::SETO) { 4301 if (X != LHS.getOperand(1)) 4302 return SDValue(); 4303 4304 if (RCC == ISD::SETUNE) { 4305 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); 4306 if (!C1 || !C1->isInfinity() || C1->isNegative()) 4307 return SDValue(); 4308 4309 const uint32_t Mask = SIInstrFlags::N_NORMAL | 4310 SIInstrFlags::N_SUBNORMAL | 4311 SIInstrFlags::N_ZERO | 4312 SIInstrFlags::P_ZERO | 4313 SIInstrFlags::P_SUBNORMAL | 4314 SIInstrFlags::P_NORMAL; 4315 4316 static_assert(((~(SIInstrFlags::S_NAN | 4317 SIInstrFlags::Q_NAN | 4318 SIInstrFlags::N_INFINITY | 4319 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, 4320 "mask not equal"); 4321 4322 SDLoc DL(N); 4323 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 4324 X, DAG.getConstant(Mask, DL, MVT::i32)); 4325 } 4326 } 4327 } 4328 4329 return SDValue(); 4330 } 4331 4332 SDValue SITargetLowering::performOrCombine(SDNode *N, 4333 DAGCombinerInfo &DCI) const { 4334 SelectionDAG &DAG = DCI.DAG; 4335 SDValue LHS = N->getOperand(0); 4336 SDValue RHS = N->getOperand(1); 4337 4338 EVT VT = N->getValueType(0); 4339 if (VT == MVT::i1) { 4340 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) 4341 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && 4342 RHS.getOpcode() == AMDGPUISD::FP_CLASS) { 4343 SDValue Src = LHS.getOperand(0); 4344 if (Src != RHS.getOperand(0)) 4345 return SDValue(); 4346 4347 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 4348 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 4349 if (!CLHS || !CRHS) 4350 return SDValue(); 4351 4352 // Only 10 bits are used. 4353 static const uint32_t MaxMask = 0x3ff; 4354 4355 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; 4356 SDLoc DL(N); 4357 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 4358 Src, DAG.getConstant(NewMask, DL, MVT::i32)); 4359 } 4360 4361 return SDValue(); 4362 } 4363 4364 if (VT != MVT::i64) 4365 return SDValue(); 4366 4367 // TODO: This could be a generic combine with a predicate for extracting the 4368 // high half of an integer being free. 4369 4370 // (or i64:x, (zero_extend i32:y)) -> 4371 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) 4372 if (LHS.getOpcode() == ISD::ZERO_EXTEND && 4373 RHS.getOpcode() != ISD::ZERO_EXTEND) 4374 std::swap(LHS, RHS); 4375 4376 if (RHS.getOpcode() == ISD::ZERO_EXTEND) { 4377 SDValue ExtSrc = RHS.getOperand(0); 4378 EVT SrcVT = ExtSrc.getValueType(); 4379 if (SrcVT == MVT::i32) { 4380 SDLoc SL(N); 4381 SDValue LowLHS, HiBits; 4382 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); 4383 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); 4384 4385 DCI.AddToWorklist(LowOr.getNode()); 4386 DCI.AddToWorklist(HiBits.getNode()); 4387 4388 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 4389 LowOr, HiBits); 4390 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 4391 } 4392 } 4393 4394 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 4395 if (CRHS) { 4396 if (SDValue Split 4397 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) 4398 return Split; 4399 } 4400 4401 return SDValue(); 4402 } 4403 4404 SDValue SITargetLowering::performXorCombine(SDNode *N, 4405 DAGCombinerInfo &DCI) const { 4406 EVT VT = N->getValueType(0); 4407 if (VT != MVT::i64) 4408 return SDValue(); 4409 4410 SDValue LHS = N->getOperand(0); 4411 SDValue RHS = N->getOperand(1); 4412 4413 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 4414 if (CRHS) { 4415 if (SDValue Split 4416 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) 4417 return Split; 4418 } 4419 4420 return SDValue(); 4421 } 4422 4423 // Instructions that will be lowered with a final instruction that zeros the 4424 // high result bits. 4425 // XXX - probably only need to list legal operations. 4426 static bool fp16SrcZerosHighBits(unsigned Opc) { 4427 switch (Opc) { 4428 case ISD::FADD: 4429 case ISD::FSUB: 4430 case ISD::FMUL: 4431 case ISD::FDIV: 4432 case ISD::FREM: 4433 case ISD::FMA: 4434 case ISD::FMAD: 4435 case ISD::FCANONICALIZE: 4436 case ISD::FP_ROUND: 4437 case ISD::UINT_TO_FP: 4438 case ISD::SINT_TO_FP: 4439 case ISD::FABS: 4440 // Fabs is lowered to a bit operation, but it's an and which will clear the 4441 // high bits anyway. 4442 case ISD::FSQRT: 4443 case ISD::FSIN: 4444 case ISD::FCOS: 4445 case ISD::FPOWI: 4446 case ISD::FPOW: 4447 case ISD::FLOG: 4448 case ISD::FLOG2: 4449 case ISD::FLOG10: 4450 case ISD::FEXP: 4451 case ISD::FEXP2: 4452 case ISD::FCEIL: 4453 case ISD::FTRUNC: 4454 case ISD::FRINT: 4455 case ISD::FNEARBYINT: 4456 case ISD::FROUND: 4457 case ISD::FFLOOR: 4458 case ISD::FMINNUM: 4459 case ISD::FMAXNUM: 4460 case AMDGPUISD::FRACT: 4461 case AMDGPUISD::CLAMP: 4462 case AMDGPUISD::COS_HW: 4463 case AMDGPUISD::SIN_HW: 4464 case AMDGPUISD::FMIN3: 4465 case AMDGPUISD::FMAX3: 4466 case AMDGPUISD::FMED3: 4467 case AMDGPUISD::FMAD_FTZ: 4468 case AMDGPUISD::RCP: 4469 case AMDGPUISD::RSQ: 4470 case AMDGPUISD::LDEXP: 4471 return true; 4472 default: 4473 // fcopysign, select and others may be lowered to 32-bit bit operations 4474 // which don't zero the high bits. 4475 return false; 4476 } 4477 } 4478 4479 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N, 4480 DAGCombinerInfo &DCI) const { 4481 if (!Subtarget->has16BitInsts() || 4482 DCI.getDAGCombineLevel() < AfterLegalizeDAG) 4483 return SDValue(); 4484 4485 EVT VT = N->getValueType(0); 4486 if (VT != MVT::i32) 4487 return SDValue(); 4488 4489 SDValue Src = N->getOperand(0); 4490 if (Src.getValueType() != MVT::i16) 4491 return SDValue(); 4492 4493 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src 4494 // FIXME: It is not universally true that the high bits are zeroed on gfx9. 4495 if (Src.getOpcode() == ISD::BITCAST) { 4496 SDValue BCSrc = Src.getOperand(0); 4497 if (BCSrc.getValueType() == MVT::f16 && 4498 fp16SrcZerosHighBits(BCSrc.getOpcode())) 4499 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc); 4500 } 4501 4502 return SDValue(); 4503 } 4504 4505 SDValue SITargetLowering::performClassCombine(SDNode *N, 4506 DAGCombinerInfo &DCI) const { 4507 SelectionDAG &DAG = DCI.DAG; 4508 SDValue Mask = N->getOperand(1); 4509 4510 // fp_class x, 0 -> false 4511 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { 4512 if (CMask->isNullValue()) 4513 return DAG.getConstant(0, SDLoc(N), MVT::i1); 4514 } 4515 4516 if (N->getOperand(0).isUndef()) 4517 return DAG.getUNDEF(MVT::i1); 4518 4519 return SDValue(); 4520 } 4521 4522 // Constant fold canonicalize. 4523 SDValue SITargetLowering::performFCanonicalizeCombine( 4524 SDNode *N, 4525 DAGCombinerInfo &DCI) const { 4526 ConstantFPSDNode *CFP = isConstOrConstSplatFP(N->getOperand(0)); 4527 if (!CFP) 4528 return SDValue(); 4529 4530 SelectionDAG &DAG = DCI.DAG; 4531 const APFloat &C = CFP->getValueAPF(); 4532 4533 // Flush denormals to 0 if not enabled. 4534 if (C.isDenormal()) { 4535 EVT VT = N->getValueType(0); 4536 EVT SVT = VT.getScalarType(); 4537 if (SVT == MVT::f32 && !Subtarget->hasFP32Denormals()) 4538 return DAG.getConstantFP(0.0, SDLoc(N), VT); 4539 4540 if (SVT == MVT::f64 && !Subtarget->hasFP64Denormals()) 4541 return DAG.getConstantFP(0.0, SDLoc(N), VT); 4542 4543 if (SVT == MVT::f16 && !Subtarget->hasFP16Denormals()) 4544 return DAG.getConstantFP(0.0, SDLoc(N), VT); 4545 } 4546 4547 if (C.isNaN()) { 4548 EVT VT = N->getValueType(0); 4549 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); 4550 if (C.isSignaling()) { 4551 // Quiet a signaling NaN. 4552 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 4553 } 4554 4555 // Make sure it is the canonical NaN bitpattern. 4556 // 4557 // TODO: Can we use -1 as the canonical NaN value since it's an inline 4558 // immediate? 4559 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) 4560 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 4561 } 4562 4563 return N->getOperand(0); 4564 } 4565 4566 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { 4567 switch (Opc) { 4568 case ISD::FMAXNUM: 4569 return AMDGPUISD::FMAX3; 4570 case ISD::SMAX: 4571 return AMDGPUISD::SMAX3; 4572 case ISD::UMAX: 4573 return AMDGPUISD::UMAX3; 4574 case ISD::FMINNUM: 4575 return AMDGPUISD::FMIN3; 4576 case ISD::SMIN: 4577 return AMDGPUISD::SMIN3; 4578 case ISD::UMIN: 4579 return AMDGPUISD::UMIN3; 4580 default: 4581 llvm_unreachable("Not a min/max opcode"); 4582 } 4583 } 4584 4585 SDValue SITargetLowering::performIntMed3ImmCombine( 4586 SelectionDAG &DAG, const SDLoc &SL, 4587 SDValue Op0, SDValue Op1, bool Signed) const { 4588 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); 4589 if (!K1) 4590 return SDValue(); 4591 4592 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 4593 if (!K0) 4594 return SDValue(); 4595 4596 if (Signed) { 4597 if (K0->getAPIntValue().sge(K1->getAPIntValue())) 4598 return SDValue(); 4599 } else { 4600 if (K0->getAPIntValue().uge(K1->getAPIntValue())) 4601 return SDValue(); 4602 } 4603 4604 EVT VT = K0->getValueType(0); 4605 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3; 4606 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) { 4607 return DAG.getNode(Med3Opc, SL, VT, 4608 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); 4609 } 4610 4611 // If there isn't a 16-bit med3 operation, convert to 32-bit. 4612 MVT NVT = MVT::i32; 4613 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4614 4615 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); 4616 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); 4617 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); 4618 4619 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3); 4620 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3); 4621 } 4622 4623 static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) { 4624 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions()) 4625 return true; 4626 4627 return DAG.isKnownNeverNaN(Op); 4628 } 4629 4630 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG, 4631 const SDLoc &SL, 4632 SDValue Op0, 4633 SDValue Op1) const { 4634 ConstantFPSDNode *K1 = dyn_cast<ConstantFPSDNode>(Op1); 4635 if (!K1) 4636 return SDValue(); 4637 4638 ConstantFPSDNode *K0 = dyn_cast<ConstantFPSDNode>(Op0.getOperand(1)); 4639 if (!K0) 4640 return SDValue(); 4641 4642 // Ordered >= (although NaN inputs should have folded away by now). 4643 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF()); 4644 if (Cmp == APFloat::cmpGreaterThan) 4645 return SDValue(); 4646 4647 // TODO: Check IEEE bit enabled? 4648 EVT VT = K0->getValueType(0); 4649 if (Subtarget->enableDX10Clamp()) { 4650 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the 4651 // hardware fmed3 behavior converting to a min. 4652 // FIXME: Should this be allowing -0.0? 4653 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0)) 4654 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0)); 4655 } 4656 4657 // med3 for f16 is only available on gfx9+. 4658 if (VT == MVT::f64 || (VT == MVT::f16 && !Subtarget->hasMed3_16())) 4659 return SDValue(); 4660 4661 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a 4662 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would then 4663 // give the other result, which is different from med3 with a NaN input. 4664 SDValue Var = Op0.getOperand(0); 4665 if (!isKnownNeverSNan(DAG, Var)) 4666 return SDValue(); 4667 4668 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), 4669 Var, SDValue(K0, 0), SDValue(K1, 0)); 4670 } 4671 4672 SDValue SITargetLowering::performMinMaxCombine(SDNode *N, 4673 DAGCombinerInfo &DCI) const { 4674 SelectionDAG &DAG = DCI.DAG; 4675 4676 EVT VT = N->getValueType(0); 4677 unsigned Opc = N->getOpcode(); 4678 SDValue Op0 = N->getOperand(0); 4679 SDValue Op1 = N->getOperand(1); 4680 4681 // Only do this if the inner op has one use since this will just increases 4682 // register pressure for no benefit. 4683 4684 4685 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && 4686 VT != MVT::f64 && 4687 ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) { 4688 // max(max(a, b), c) -> max3(a, b, c) 4689 // min(min(a, b), c) -> min3(a, b, c) 4690 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { 4691 SDLoc DL(N); 4692 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 4693 DL, 4694 N->getValueType(0), 4695 Op0.getOperand(0), 4696 Op0.getOperand(1), 4697 Op1); 4698 } 4699 4700 // Try commuted. 4701 // max(a, max(b, c)) -> max3(a, b, c) 4702 // min(a, min(b, c)) -> min3(a, b, c) 4703 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { 4704 SDLoc DL(N); 4705 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 4706 DL, 4707 N->getValueType(0), 4708 Op0, 4709 Op1.getOperand(0), 4710 Op1.getOperand(1)); 4711 } 4712 } 4713 4714 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) 4715 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { 4716 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) 4717 return Med3; 4718 } 4719 4720 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { 4721 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) 4722 return Med3; 4723 } 4724 4725 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) 4726 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || 4727 (Opc == AMDGPUISD::FMIN_LEGACY && 4728 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && 4729 (VT == MVT::f32 || VT == MVT::f64 || 4730 (VT == MVT::f16 && Subtarget->has16BitInsts())) && 4731 Op0.hasOneUse()) { 4732 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) 4733 return Res; 4734 } 4735 4736 return SDValue(); 4737 } 4738 4739 static bool isClampZeroToOne(SDValue A, SDValue B) { 4740 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) { 4741 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) { 4742 // FIXME: Should this be allowing -0.0? 4743 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) || 4744 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0)); 4745 } 4746 } 4747 4748 return false; 4749 } 4750 4751 // FIXME: Should only worry about snans for version with chain. 4752 SDValue SITargetLowering::performFMed3Combine(SDNode *N, 4753 DAGCombinerInfo &DCI) const { 4754 EVT VT = N->getValueType(0); 4755 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and 4756 // NaNs. With a NaN input, the order of the operands may change the result. 4757 4758 SelectionDAG &DAG = DCI.DAG; 4759 SDLoc SL(N); 4760 4761 SDValue Src0 = N->getOperand(0); 4762 SDValue Src1 = N->getOperand(1); 4763 SDValue Src2 = N->getOperand(2); 4764 4765 if (isClampZeroToOne(Src0, Src1)) { 4766 // const_a, const_b, x -> clamp is safe in all cases including signaling 4767 // nans. 4768 // FIXME: Should this be allowing -0.0? 4769 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2); 4770 } 4771 4772 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother 4773 // handling no dx10-clamp? 4774 if (Subtarget->enableDX10Clamp()) { 4775 // If NaNs is clamped to 0, we are free to reorder the inputs. 4776 4777 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 4778 std::swap(Src0, Src1); 4779 4780 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2)) 4781 std::swap(Src1, Src2); 4782 4783 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 4784 std::swap(Src0, Src1); 4785 4786 if (isClampZeroToOne(Src1, Src2)) 4787 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0); 4788 } 4789 4790 return SDValue(); 4791 } 4792 4793 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N, 4794 DAGCombinerInfo &DCI) const { 4795 SDValue Src0 = N->getOperand(0); 4796 SDValue Src1 = N->getOperand(1); 4797 if (Src0.isUndef() && Src1.isUndef()) 4798 return DCI.DAG.getUNDEF(N->getValueType(0)); 4799 return SDValue(); 4800 } 4801 4802 SDValue SITargetLowering::performExtractVectorEltCombine( 4803 SDNode *N, DAGCombinerInfo &DCI) const { 4804 SDValue Vec = N->getOperand(0); 4805 4806 SelectionDAG &DAG= DCI.DAG; 4807 if (Vec.getOpcode() == ISD::FNEG && allUsesHaveSourceMods(N)) { 4808 SDLoc SL(N); 4809 EVT EltVT = N->getValueType(0); 4810 SDValue Idx = N->getOperand(1); 4811 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 4812 Vec.getOperand(0), Idx); 4813 return DAG.getNode(ISD::FNEG, SL, EltVT, Elt); 4814 } 4815 4816 return SDValue(); 4817 } 4818 4819 4820 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, 4821 const SDNode *N0, 4822 const SDNode *N1) const { 4823 EVT VT = N0->getValueType(0); 4824 4825 // Only do this if we are not trying to support denormals. v_mad_f32 does not 4826 // support denormals ever. 4827 if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) || 4828 (VT == MVT::f16 && !Subtarget->hasFP16Denormals())) 4829 return ISD::FMAD; 4830 4831 const TargetOptions &Options = DAG.getTarget().Options; 4832 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || 4833 (N0->getFlags().hasUnsafeAlgebra() && 4834 N1->getFlags().hasUnsafeAlgebra())) && 4835 isFMAFasterThanFMulAndFAdd(VT)) { 4836 return ISD::FMA; 4837 } 4838 4839 return 0; 4840 } 4841 4842 SDValue SITargetLowering::performFAddCombine(SDNode *N, 4843 DAGCombinerInfo &DCI) const { 4844 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 4845 return SDValue(); 4846 4847 SelectionDAG &DAG = DCI.DAG; 4848 EVT VT = N->getValueType(0); 4849 4850 SDLoc SL(N); 4851 SDValue LHS = N->getOperand(0); 4852 SDValue RHS = N->getOperand(1); 4853 4854 // These should really be instruction patterns, but writing patterns with 4855 // source modiifiers is a pain. 4856 4857 // fadd (fadd (a, a), b) -> mad 2.0, a, b 4858 if (LHS.getOpcode() == ISD::FADD) { 4859 SDValue A = LHS.getOperand(0); 4860 if (A == LHS.getOperand(1)) { 4861 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 4862 if (FusedOp != 0) { 4863 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 4864 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); 4865 } 4866 } 4867 } 4868 4869 // fadd (b, fadd (a, a)) -> mad 2.0, a, b 4870 if (RHS.getOpcode() == ISD::FADD) { 4871 SDValue A = RHS.getOperand(0); 4872 if (A == RHS.getOperand(1)) { 4873 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 4874 if (FusedOp != 0) { 4875 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 4876 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); 4877 } 4878 } 4879 } 4880 4881 return SDValue(); 4882 } 4883 4884 SDValue SITargetLowering::performFSubCombine(SDNode *N, 4885 DAGCombinerInfo &DCI) const { 4886 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 4887 return SDValue(); 4888 4889 SelectionDAG &DAG = DCI.DAG; 4890 SDLoc SL(N); 4891 EVT VT = N->getValueType(0); 4892 assert(!VT.isVector()); 4893 4894 // Try to get the fneg to fold into the source modifier. This undoes generic 4895 // DAG combines and folds them into the mad. 4896 // 4897 // Only do this if we are not trying to support denormals. v_mad_f32 does 4898 // not support denormals ever. 4899 SDValue LHS = N->getOperand(0); 4900 SDValue RHS = N->getOperand(1); 4901 if (LHS.getOpcode() == ISD::FADD) { 4902 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) 4903 SDValue A = LHS.getOperand(0); 4904 if (A == LHS.getOperand(1)) { 4905 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 4906 if (FusedOp != 0){ 4907 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 4908 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 4909 4910 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); 4911 } 4912 } 4913 } 4914 4915 if (RHS.getOpcode() == ISD::FADD) { 4916 // (fsub c, (fadd a, a)) -> mad -2.0, a, c 4917 4918 SDValue A = RHS.getOperand(0); 4919 if (A == RHS.getOperand(1)) { 4920 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 4921 if (FusedOp != 0){ 4922 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); 4923 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); 4924 } 4925 } 4926 } 4927 4928 return SDValue(); 4929 } 4930 4931 SDValue SITargetLowering::performSetCCCombine(SDNode *N, 4932 DAGCombinerInfo &DCI) const { 4933 SelectionDAG &DAG = DCI.DAG; 4934 SDLoc SL(N); 4935 4936 SDValue LHS = N->getOperand(0); 4937 SDValue RHS = N->getOperand(1); 4938 EVT VT = LHS.getValueType(); 4939 4940 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && 4941 VT != MVT::f16)) 4942 return SDValue(); 4943 4944 // Match isinf pattern 4945 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) 4946 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 4947 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { 4948 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 4949 if (!CRHS) 4950 return SDValue(); 4951 4952 const APFloat &APF = CRHS->getValueAPF(); 4953 if (APF.isInfinity() && !APF.isNegative()) { 4954 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY; 4955 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), 4956 DAG.getConstant(Mask, SL, MVT::i32)); 4957 } 4958 } 4959 4960 return SDValue(); 4961 } 4962 4963 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, 4964 DAGCombinerInfo &DCI) const { 4965 SelectionDAG &DAG = DCI.DAG; 4966 SDLoc SL(N); 4967 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; 4968 4969 SDValue Src = N->getOperand(0); 4970 SDValue Srl = N->getOperand(0); 4971 if (Srl.getOpcode() == ISD::ZERO_EXTEND) 4972 Srl = Srl.getOperand(0); 4973 4974 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero. 4975 if (Srl.getOpcode() == ISD::SRL) { 4976 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x 4977 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x 4978 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x 4979 4980 if (const ConstantSDNode *C = 4981 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) { 4982 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)), 4983 EVT(MVT::i32)); 4984 4985 unsigned SrcOffset = C->getZExtValue() + 8 * Offset; 4986 if (SrcOffset < 32 && SrcOffset % 8 == 0) { 4987 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL, 4988 MVT::f32, Srl); 4989 } 4990 } 4991 } 4992 4993 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); 4994 4995 KnownBits Known; 4996 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 4997 !DCI.isBeforeLegalizeOps()); 4998 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4999 if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) || 5000 TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) { 5001 DCI.CommitTargetLoweringOpt(TLO); 5002 } 5003 5004 return SDValue(); 5005 } 5006 5007 SDValue SITargetLowering::PerformDAGCombine(SDNode *N, 5008 DAGCombinerInfo &DCI) const { 5009 switch (N->getOpcode()) { 5010 default: 5011 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 5012 case ISD::FADD: 5013 return performFAddCombine(N, DCI); 5014 case ISD::FSUB: 5015 return performFSubCombine(N, DCI); 5016 case ISD::SETCC: 5017 return performSetCCCombine(N, DCI); 5018 case ISD::FMAXNUM: 5019 case ISD::FMINNUM: 5020 case ISD::SMAX: 5021 case ISD::SMIN: 5022 case ISD::UMAX: 5023 case ISD::UMIN: 5024 case AMDGPUISD::FMIN_LEGACY: 5025 case AMDGPUISD::FMAX_LEGACY: { 5026 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG && 5027 getTargetMachine().getOptLevel() > CodeGenOpt::None) 5028 return performMinMaxCombine(N, DCI); 5029 break; 5030 } 5031 case ISD::LOAD: 5032 case ISD::STORE: 5033 case ISD::ATOMIC_LOAD: 5034 case ISD::ATOMIC_STORE: 5035 case ISD::ATOMIC_CMP_SWAP: 5036 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 5037 case ISD::ATOMIC_SWAP: 5038 case ISD::ATOMIC_LOAD_ADD: 5039 case ISD::ATOMIC_LOAD_SUB: 5040 case ISD::ATOMIC_LOAD_AND: 5041 case ISD::ATOMIC_LOAD_OR: 5042 case ISD::ATOMIC_LOAD_XOR: 5043 case ISD::ATOMIC_LOAD_NAND: 5044 case ISD::ATOMIC_LOAD_MIN: 5045 case ISD::ATOMIC_LOAD_MAX: 5046 case ISD::ATOMIC_LOAD_UMIN: 5047 case ISD::ATOMIC_LOAD_UMAX: 5048 case AMDGPUISD::ATOMIC_INC: 5049 case AMDGPUISD::ATOMIC_DEC: // TODO: Target mem intrinsics. 5050 if (DCI.isBeforeLegalize()) 5051 break; 5052 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI); 5053 case ISD::AND: 5054 return performAndCombine(N, DCI); 5055 case ISD::OR: 5056 return performOrCombine(N, DCI); 5057 case ISD::XOR: 5058 return performXorCombine(N, DCI); 5059 case ISD::ZERO_EXTEND: 5060 return performZeroExtendCombine(N, DCI); 5061 case AMDGPUISD::FP_CLASS: 5062 return performClassCombine(N, DCI); 5063 case ISD::FCANONICALIZE: 5064 return performFCanonicalizeCombine(N, DCI); 5065 case AMDGPUISD::FRACT: 5066 case AMDGPUISD::RCP: 5067 case AMDGPUISD::RSQ: 5068 case AMDGPUISD::RCP_LEGACY: 5069 case AMDGPUISD::RSQ_LEGACY: 5070 case AMDGPUISD::RSQ_CLAMP: 5071 case AMDGPUISD::LDEXP: { 5072 SDValue Src = N->getOperand(0); 5073 if (Src.isUndef()) 5074 return Src; 5075 break; 5076 } 5077 case ISD::SINT_TO_FP: 5078 case ISD::UINT_TO_FP: 5079 return performUCharToFloatCombine(N, DCI); 5080 case AMDGPUISD::CVT_F32_UBYTE0: 5081 case AMDGPUISD::CVT_F32_UBYTE1: 5082 case AMDGPUISD::CVT_F32_UBYTE2: 5083 case AMDGPUISD::CVT_F32_UBYTE3: 5084 return performCvtF32UByteNCombine(N, DCI); 5085 case AMDGPUISD::FMED3: 5086 return performFMed3Combine(N, DCI); 5087 case AMDGPUISD::CVT_PKRTZ_F16_F32: 5088 return performCvtPkRTZCombine(N, DCI); 5089 case ISD::SCALAR_TO_VECTOR: { 5090 SelectionDAG &DAG = DCI.DAG; 5091 EVT VT = N->getValueType(0); 5092 5093 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x)) 5094 if (VT == MVT::v2i16 || VT == MVT::v2f16) { 5095 SDLoc SL(N); 5096 SDValue Src = N->getOperand(0); 5097 EVT EltVT = Src.getValueType(); 5098 if (EltVT == MVT::f16) 5099 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src); 5100 5101 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src); 5102 return DAG.getNode(ISD::BITCAST, SL, VT, Ext); 5103 } 5104 5105 break; 5106 } 5107 case ISD::EXTRACT_VECTOR_ELT: 5108 return performExtractVectorEltCombine(N, DCI); 5109 } 5110 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 5111 } 5112 5113 /// \brief Helper function for adjustWritemask 5114 static unsigned SubIdx2Lane(unsigned Idx) { 5115 switch (Idx) { 5116 default: return 0; 5117 case AMDGPU::sub0: return 0; 5118 case AMDGPU::sub1: return 1; 5119 case AMDGPU::sub2: return 2; 5120 case AMDGPU::sub3: return 3; 5121 } 5122 } 5123 5124 /// \brief Adjust the writemask of MIMG instructions 5125 void SITargetLowering::adjustWritemask(MachineSDNode *&Node, 5126 SelectionDAG &DAG) const { 5127 SDNode *Users[4] = { }; 5128 unsigned Lane = 0; 5129 unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3; 5130 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); 5131 unsigned NewDmask = 0; 5132 5133 // Try to figure out the used register components 5134 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); 5135 I != E; ++I) { 5136 5137 // Don't look at users of the chain. 5138 if (I.getUse().getResNo() != 0) 5139 continue; 5140 5141 // Abort if we can't understand the usage 5142 if (!I->isMachineOpcode() || 5143 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) 5144 return; 5145 5146 // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used. 5147 // Note that subregs are packed, i.e. Lane==0 is the first bit set 5148 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 5149 // set, etc. 5150 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 5151 5152 // Set which texture component corresponds to the lane. 5153 unsigned Comp; 5154 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) { 5155 assert(Dmask); 5156 Comp = countTrailingZeros(Dmask); 5157 Dmask &= ~(1 << Comp); 5158 } 5159 5160 // Abort if we have more than one user per component 5161 if (Users[Lane]) 5162 return; 5163 5164 Users[Lane] = *I; 5165 NewDmask |= 1 << Comp; 5166 } 5167 5168 // Abort if there's no change 5169 if (NewDmask == OldDmask) 5170 return; 5171 5172 // Adjust the writemask in the node 5173 std::vector<SDValue> Ops; 5174 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); 5175 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); 5176 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); 5177 Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops); 5178 5179 // If we only got one lane, replace it with a copy 5180 // (if NewDmask has only one bit set...) 5181 if (NewDmask && (NewDmask & (NewDmask-1)) == 0) { 5182 SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(), 5183 MVT::i32); 5184 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 5185 SDLoc(), Users[Lane]->getValueType(0), 5186 SDValue(Node, 0), RC); 5187 DAG.ReplaceAllUsesWith(Users[Lane], Copy); 5188 return; 5189 } 5190 5191 // Update the users of the node with the new indices 5192 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { 5193 SDNode *User = Users[i]; 5194 if (!User) 5195 continue; 5196 5197 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); 5198 DAG.UpdateNodeOperands(User, User->getOperand(0), Op); 5199 5200 switch (Idx) { 5201 default: break; 5202 case AMDGPU::sub0: Idx = AMDGPU::sub1; break; 5203 case AMDGPU::sub1: Idx = AMDGPU::sub2; break; 5204 case AMDGPU::sub2: Idx = AMDGPU::sub3; break; 5205 } 5206 } 5207 } 5208 5209 static bool isFrameIndexOp(SDValue Op) { 5210 if (Op.getOpcode() == ISD::AssertZext) 5211 Op = Op.getOperand(0); 5212 5213 return isa<FrameIndexSDNode>(Op); 5214 } 5215 5216 /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) 5217 /// with frame index operands. 5218 /// LLVM assumes that inputs are to these instructions are registers. 5219 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, 5220 SelectionDAG &DAG) const { 5221 if (Node->getOpcode() == ISD::CopyToReg) { 5222 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1)); 5223 SDValue SrcVal = Node->getOperand(2); 5224 5225 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have 5226 // to try understanding copies to physical registers. 5227 if (SrcVal.getValueType() == MVT::i1 && 5228 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) { 5229 SDLoc SL(Node); 5230 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 5231 SDValue VReg = DAG.getRegister( 5232 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1); 5233 5234 SDNode *Glued = Node->getGluedNode(); 5235 SDValue ToVReg 5236 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal, 5237 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0)); 5238 SDValue ToResultReg 5239 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0), 5240 VReg, ToVReg.getValue(1)); 5241 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode()); 5242 DAG.RemoveDeadNode(Node); 5243 return ToResultReg.getNode(); 5244 } 5245 } 5246 5247 SmallVector<SDValue, 8> Ops; 5248 for (unsigned i = 0; i < Node->getNumOperands(); ++i) { 5249 if (!isFrameIndexOp(Node->getOperand(i))) { 5250 Ops.push_back(Node->getOperand(i)); 5251 continue; 5252 } 5253 5254 SDLoc DL(Node); 5255 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, 5256 Node->getOperand(i).getValueType(), 5257 Node->getOperand(i)), 0)); 5258 } 5259 5260 DAG.UpdateNodeOperands(Node, Ops); 5261 return Node; 5262 } 5263 5264 /// \brief Fold the instructions after selecting them. 5265 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, 5266 SelectionDAG &DAG) const { 5267 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 5268 unsigned Opcode = Node->getMachineOpcode(); 5269 5270 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && 5271 !TII->isGather4(Opcode)) 5272 adjustWritemask(Node, DAG); 5273 5274 if (Opcode == AMDGPU::INSERT_SUBREG || 5275 Opcode == AMDGPU::REG_SEQUENCE) { 5276 legalizeTargetIndependentNode(Node, DAG); 5277 return Node; 5278 } 5279 return Node; 5280 } 5281 5282 /// \brief Assign the register class depending on the number of 5283 /// bits set in the writemask 5284 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 5285 SDNode *Node) const { 5286 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 5287 5288 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 5289 5290 if (TII->isVOP3(MI.getOpcode())) { 5291 // Make sure constant bus requirements are respected. 5292 TII->legalizeOperandsVOP3(MRI, MI); 5293 return; 5294 } 5295 5296 if (TII->isMIMG(MI)) { 5297 unsigned VReg = MI.getOperand(0).getReg(); 5298 const TargetRegisterClass *RC = MRI.getRegClass(VReg); 5299 // TODO: Need mapping tables to handle other cases (register classes). 5300 if (RC != &AMDGPU::VReg_128RegClass) 5301 return; 5302 5303 unsigned DmaskIdx = MI.getNumOperands() == 12 ? 3 : 4; 5304 unsigned Writemask = MI.getOperand(DmaskIdx).getImm(); 5305 unsigned BitsSet = 0; 5306 for (unsigned i = 0; i < 4; ++i) 5307 BitsSet += Writemask & (1 << i) ? 1 : 0; 5308 switch (BitsSet) { 5309 default: return; 5310 case 1: RC = &AMDGPU::VGPR_32RegClass; break; 5311 case 2: RC = &AMDGPU::VReg_64RegClass; break; 5312 case 3: RC = &AMDGPU::VReg_96RegClass; break; 5313 } 5314 5315 unsigned NewOpcode = TII->getMaskedMIMGOp(MI.getOpcode(), BitsSet); 5316 MI.setDesc(TII->get(NewOpcode)); 5317 MRI.setRegClass(VReg, RC); 5318 return; 5319 } 5320 5321 // Replace unused atomics with the no return version. 5322 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); 5323 if (NoRetAtomicOp != -1) { 5324 if (!Node->hasAnyUseOfValue(0)) { 5325 MI.setDesc(TII->get(NoRetAtomicOp)); 5326 MI.RemoveOperand(0); 5327 return; 5328 } 5329 5330 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg 5331 // instruction, because the return type of these instructions is a vec2 of 5332 // the memory type, so it can be tied to the input operand. 5333 // This means these instructions always have a use, so we need to add a 5334 // special case to check if the atomic has only one extract_subreg use, 5335 // which itself has no uses. 5336 if ((Node->hasNUsesOfValue(1, 0) && 5337 Node->use_begin()->isMachineOpcode() && 5338 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && 5339 !Node->use_begin()->hasAnyUseOfValue(0))) { 5340 unsigned Def = MI.getOperand(0).getReg(); 5341 5342 // Change this into a noret atomic. 5343 MI.setDesc(TII->get(NoRetAtomicOp)); 5344 MI.RemoveOperand(0); 5345 5346 // If we only remove the def operand from the atomic instruction, the 5347 // extract_subreg will be left with a use of a vreg without a def. 5348 // So we need to insert an implicit_def to avoid machine verifier 5349 // errors. 5350 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), 5351 TII->get(AMDGPU::IMPLICIT_DEF), Def); 5352 } 5353 return; 5354 } 5355 } 5356 5357 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, 5358 uint64_t Val) { 5359 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); 5360 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); 5361 } 5362 5363 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, 5364 const SDLoc &DL, 5365 SDValue Ptr) const { 5366 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 5367 5368 // Build the half of the subregister with the constants before building the 5369 // full 128-bit register. If we are building multiple resource descriptors, 5370 // this will allow CSEing of the 2-component register. 5371 const SDValue Ops0[] = { 5372 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), 5373 buildSMovImm32(DAG, DL, 0), 5374 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 5375 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), 5376 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) 5377 }; 5378 5379 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, 5380 MVT::v2i32, Ops0), 0); 5381 5382 // Combine the constants and the pointer. 5383 const SDValue Ops1[] = { 5384 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 5385 Ptr, 5386 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), 5387 SubRegHi, 5388 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) 5389 }; 5390 5391 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); 5392 } 5393 5394 /// \brief Return a resource descriptor with the 'Add TID' bit enabled 5395 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] 5396 /// of the resource descriptor) to create an offset, which is added to 5397 /// the resource pointer. 5398 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, 5399 SDValue Ptr, uint32_t RsrcDword1, 5400 uint64_t RsrcDword2And3) const { 5401 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); 5402 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); 5403 if (RsrcDword1) { 5404 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, 5405 DAG.getConstant(RsrcDword1, DL, MVT::i32)), 5406 0); 5407 } 5408 5409 SDValue DataLo = buildSMovImm32(DAG, DL, 5410 RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); 5411 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); 5412 5413 const SDValue Ops[] = { 5414 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 5415 PtrLo, 5416 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 5417 PtrHi, 5418 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), 5419 DataLo, 5420 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), 5421 DataHi, 5422 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) 5423 }; 5424 5425 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); 5426 } 5427 5428 SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG, 5429 const TargetRegisterClass *RC, 5430 unsigned Reg, EVT VT) const { 5431 SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT); 5432 5433 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()), 5434 cast<RegisterSDNode>(VReg)->getReg(), VT); 5435 } 5436 5437 //===----------------------------------------------------------------------===// 5438 // SI Inline Assembly Support 5439 //===----------------------------------------------------------------------===// 5440 5441 std::pair<unsigned, const TargetRegisterClass *> 5442 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 5443 StringRef Constraint, 5444 MVT VT) const { 5445 if (!isTypeLegal(VT)) 5446 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 5447 5448 if (Constraint.size() == 1) { 5449 switch (Constraint[0]) { 5450 case 's': 5451 case 'r': 5452 switch (VT.getSizeInBits()) { 5453 default: 5454 return std::make_pair(0U, nullptr); 5455 case 32: 5456 case 16: 5457 return std::make_pair(0U, &AMDGPU::SReg_32_XM0RegClass); 5458 case 64: 5459 return std::make_pair(0U, &AMDGPU::SGPR_64RegClass); 5460 case 128: 5461 return std::make_pair(0U, &AMDGPU::SReg_128RegClass); 5462 case 256: 5463 return std::make_pair(0U, &AMDGPU::SReg_256RegClass); 5464 case 512: 5465 return std::make_pair(0U, &AMDGPU::SReg_512RegClass); 5466 } 5467 5468 case 'v': 5469 switch (VT.getSizeInBits()) { 5470 default: 5471 return std::make_pair(0U, nullptr); 5472 case 32: 5473 case 16: 5474 return std::make_pair(0U, &AMDGPU::VGPR_32RegClass); 5475 case 64: 5476 return std::make_pair(0U, &AMDGPU::VReg_64RegClass); 5477 case 96: 5478 return std::make_pair(0U, &AMDGPU::VReg_96RegClass); 5479 case 128: 5480 return std::make_pair(0U, &AMDGPU::VReg_128RegClass); 5481 case 256: 5482 return std::make_pair(0U, &AMDGPU::VReg_256RegClass); 5483 case 512: 5484 return std::make_pair(0U, &AMDGPU::VReg_512RegClass); 5485 } 5486 } 5487 } 5488 5489 if (Constraint.size() > 1) { 5490 const TargetRegisterClass *RC = nullptr; 5491 if (Constraint[1] == 'v') { 5492 RC = &AMDGPU::VGPR_32RegClass; 5493 } else if (Constraint[1] == 's') { 5494 RC = &AMDGPU::SGPR_32RegClass; 5495 } 5496 5497 if (RC) { 5498 uint32_t Idx; 5499 bool Failed = Constraint.substr(2).getAsInteger(10, Idx); 5500 if (!Failed && Idx < RC->getNumRegs()) 5501 return std::make_pair(RC->getRegister(Idx), RC); 5502 } 5503 } 5504 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 5505 } 5506 5507 SITargetLowering::ConstraintType 5508 SITargetLowering::getConstraintType(StringRef Constraint) const { 5509 if (Constraint.size() == 1) { 5510 switch (Constraint[0]) { 5511 default: break; 5512 case 's': 5513 case 'v': 5514 return C_RegisterClass; 5515 } 5516 } 5517 return TargetLowering::getConstraintType(Constraint); 5518 } 5519