1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief Custom DAG lowering for SI 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifdef _MSC_VER 16 // Provide M_PI. 17 #define _USE_MATH_DEFINES 18 #endif 19 20 #include "SIISelLowering.h" 21 #include "AMDGPU.h" 22 #include "AMDGPUIntrinsicInfo.h" 23 #include "AMDGPUSubtarget.h" 24 #include "AMDGPUTargetMachine.h" 25 #include "SIDefines.h" 26 #include "SIInstrInfo.h" 27 #include "SIMachineFunctionInfo.h" 28 #include "SIRegisterInfo.h" 29 #include "Utils/AMDGPUBaseInfo.h" 30 #include "llvm/ADT/APFloat.h" 31 #include "llvm/ADT/APInt.h" 32 #include "llvm/ADT/ArrayRef.h" 33 #include "llvm/ADT/BitVector.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/StringRef.h" 36 #include "llvm/ADT/StringSwitch.h" 37 #include "llvm/ADT/Twine.h" 38 #include "llvm/CodeGen/Analysis.h" 39 #include "llvm/CodeGen/CallingConvLower.h" 40 #include "llvm/CodeGen/DAGCombine.h" 41 #include "llvm/CodeGen/ISDOpcodes.h" 42 #include "llvm/CodeGen/MachineBasicBlock.h" 43 #include "llvm/CodeGen/MachineFrameInfo.h" 44 #include "llvm/CodeGen/MachineFunction.h" 45 #include "llvm/CodeGen/MachineInstr.h" 46 #include "llvm/CodeGen/MachineInstrBuilder.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/MachineValueType.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/ValueTypes.h" 54 #include "llvm/IR/Constants.h" 55 #include "llvm/IR/DataLayout.h" 56 #include "llvm/IR/DebugLoc.h" 57 #include "llvm/IR/DerivedTypes.h" 58 #include "llvm/IR/DiagnosticInfo.h" 59 #include "llvm/IR/Function.h" 60 #include "llvm/IR/GlobalValue.h" 61 #include "llvm/IR/InstrTypes.h" 62 #include "llvm/IR/Instruction.h" 63 #include "llvm/IR/Instructions.h" 64 #include "llvm/IR/IntrinsicInst.h" 65 #include "llvm/IR/Type.h" 66 #include "llvm/Support/Casting.h" 67 #include "llvm/Support/CodeGen.h" 68 #include "llvm/Support/CommandLine.h" 69 #include "llvm/Support/Compiler.h" 70 #include "llvm/Support/ErrorHandling.h" 71 #include "llvm/Support/KnownBits.h" 72 #include "llvm/Support/MathExtras.h" 73 #include "llvm/Target/TargetCallingConv.h" 74 #include "llvm/Target/TargetOptions.h" 75 #include "llvm/Target/TargetRegisterInfo.h" 76 #include <cassert> 77 #include <cmath> 78 #include <cstdint> 79 #include <iterator> 80 #include <tuple> 81 #include <utility> 82 #include <vector> 83 84 using namespace llvm; 85 86 static cl::opt<bool> EnableVGPRIndexMode( 87 "amdgpu-vgpr-index-mode", 88 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), 89 cl::init(false)); 90 91 static unsigned findFirstFreeSGPR(CCState &CCInfo) { 92 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); 93 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { 94 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { 95 return AMDGPU::SGPR0 + Reg; 96 } 97 } 98 llvm_unreachable("Cannot allocate sgpr"); 99 } 100 101 SITargetLowering::SITargetLowering(const TargetMachine &TM, 102 const SISubtarget &STI) 103 : AMDGPUTargetLowering(TM, STI) { 104 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); 105 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); 106 107 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass); 108 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); 109 110 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); 111 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); 112 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); 113 114 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); 115 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); 116 117 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); 118 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); 119 120 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); 121 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); 122 123 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); 124 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); 125 126 if (Subtarget->has16BitInsts()) { 127 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass); 128 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass); 129 } 130 131 if (Subtarget->hasVOP3PInsts()) { 132 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass); 133 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass); 134 } 135 136 computeRegisterProperties(STI.getRegisterInfo()); 137 138 // We need to custom lower vector stores from local memory 139 setOperationAction(ISD::LOAD, MVT::v2i32, Custom); 140 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 141 setOperationAction(ISD::LOAD, MVT::v8i32, Custom); 142 setOperationAction(ISD::LOAD, MVT::v16i32, Custom); 143 setOperationAction(ISD::LOAD, MVT::i1, Custom); 144 145 setOperationAction(ISD::STORE, MVT::v2i32, Custom); 146 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 147 setOperationAction(ISD::STORE, MVT::v8i32, Custom); 148 setOperationAction(ISD::STORE, MVT::v16i32, Custom); 149 setOperationAction(ISD::STORE, MVT::i1, Custom); 150 151 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); 152 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); 153 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); 154 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); 155 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); 156 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); 157 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); 158 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); 159 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); 160 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); 161 162 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 163 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 164 setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand); 165 166 setOperationAction(ISD::SELECT, MVT::i1, Promote); 167 setOperationAction(ISD::SELECT, MVT::i64, Custom); 168 setOperationAction(ISD::SELECT, MVT::f64, Promote); 169 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); 170 171 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 172 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 173 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 174 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 175 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); 176 177 setOperationAction(ISD::SETCC, MVT::i1, Promote); 178 setOperationAction(ISD::SETCC, MVT::v2i1, Expand); 179 setOperationAction(ISD::SETCC, MVT::v4i1, Expand); 180 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); 181 182 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); 183 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 184 185 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); 186 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); 187 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 188 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); 189 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 190 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); 191 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); 192 193 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 194 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); 195 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); 196 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom); 197 198 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 199 200 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 201 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); 202 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); 203 204 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 205 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 206 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 207 setOperationAction(ISD::BR_CC, MVT::i64, Expand); 208 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 209 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 210 211 setOperationAction(ISD::UADDO, MVT::i32, Legal); 212 setOperationAction(ISD::USUBO, MVT::i32, Legal); 213 214 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal); 215 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal); 216 217 // We only support LOAD/STORE and vector manipulation ops for vectors 218 // with > 4 elements. 219 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, 220 MVT::v2i64, MVT::v2f64}) { 221 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 222 switch (Op) { 223 case ISD::LOAD: 224 case ISD::STORE: 225 case ISD::BUILD_VECTOR: 226 case ISD::BITCAST: 227 case ISD::EXTRACT_VECTOR_ELT: 228 case ISD::INSERT_VECTOR_ELT: 229 case ISD::INSERT_SUBVECTOR: 230 case ISD::EXTRACT_SUBVECTOR: 231 case ISD::SCALAR_TO_VECTOR: 232 break; 233 case ISD::CONCAT_VECTORS: 234 setOperationAction(Op, VT, Custom); 235 break; 236 default: 237 setOperationAction(Op, VT, Expand); 238 break; 239 } 240 } 241 } 242 243 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that 244 // is expanded to avoid having two separate loops in case the index is a VGPR. 245 246 // Most operations are naturally 32-bit vector operations. We only support 247 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. 248 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { 249 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 250 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); 251 252 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 253 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); 254 255 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 256 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); 257 258 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 259 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); 260 } 261 262 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); 263 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); 264 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); 265 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); 266 267 // Avoid stack access for these. 268 // TODO: Generalize to more vector types. 269 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); 270 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); 271 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 272 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 273 274 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, 275 // and output demarshalling 276 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 277 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 278 279 // We can't return success/failure, only the old value, 280 // let LLVM add the comparison 281 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); 282 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); 283 284 if (getSubtarget()->hasFlatAddressSpace()) { 285 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); 286 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); 287 } 288 289 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 290 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 291 292 // On SI this is s_memtime and s_memrealtime on VI. 293 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 294 setOperationAction(ISD::TRAP, MVT::Other, Custom); 295 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom); 296 297 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 298 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 299 300 if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) { 301 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 302 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 303 setOperationAction(ISD::FRINT, MVT::f64, Legal); 304 } 305 306 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 307 308 setOperationAction(ISD::FSIN, MVT::f32, Custom); 309 setOperationAction(ISD::FCOS, MVT::f32, Custom); 310 setOperationAction(ISD::FDIV, MVT::f32, Custom); 311 setOperationAction(ISD::FDIV, MVT::f64, Custom); 312 313 if (Subtarget->has16BitInsts()) { 314 setOperationAction(ISD::Constant, MVT::i16, Legal); 315 316 setOperationAction(ISD::SMIN, MVT::i16, Legal); 317 setOperationAction(ISD::SMAX, MVT::i16, Legal); 318 319 setOperationAction(ISD::UMIN, MVT::i16, Legal); 320 setOperationAction(ISD::UMAX, MVT::i16, Legal); 321 322 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); 323 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); 324 325 setOperationAction(ISD::ROTR, MVT::i16, Promote); 326 setOperationAction(ISD::ROTL, MVT::i16, Promote); 327 328 setOperationAction(ISD::SDIV, MVT::i16, Promote); 329 setOperationAction(ISD::UDIV, MVT::i16, Promote); 330 setOperationAction(ISD::SREM, MVT::i16, Promote); 331 setOperationAction(ISD::UREM, MVT::i16, Promote); 332 333 setOperationAction(ISD::BSWAP, MVT::i16, Promote); 334 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); 335 336 setOperationAction(ISD::CTTZ, MVT::i16, Promote); 337 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); 338 setOperationAction(ISD::CTLZ, MVT::i16, Promote); 339 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); 340 341 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); 342 343 setOperationAction(ISD::BR_CC, MVT::i16, Expand); 344 345 setOperationAction(ISD::LOAD, MVT::i16, Custom); 346 347 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 348 349 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); 350 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); 351 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); 352 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); 353 354 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); 355 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); 356 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); 357 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); 358 359 // F16 - Constant Actions. 360 setOperationAction(ISD::ConstantFP, MVT::f16, Legal); 361 362 // F16 - Load/Store Actions. 363 setOperationAction(ISD::LOAD, MVT::f16, Promote); 364 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); 365 setOperationAction(ISD::STORE, MVT::f16, Promote); 366 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); 367 368 // F16 - VOP1 Actions. 369 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); 370 setOperationAction(ISD::FCOS, MVT::f16, Promote); 371 setOperationAction(ISD::FSIN, MVT::f16, Promote); 372 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); 373 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); 374 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); 375 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); 376 setOperationAction(ISD::FROUND, MVT::f16, Custom); 377 378 // F16 - VOP2 Actions. 379 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 380 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 381 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 382 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 383 setOperationAction(ISD::FDIV, MVT::f16, Custom); 384 385 // F16 - VOP3 Actions. 386 setOperationAction(ISD::FMA, MVT::f16, Legal); 387 if (!Subtarget->hasFP16Denormals()) 388 setOperationAction(ISD::FMAD, MVT::f16, Legal); 389 } 390 391 if (Subtarget->hasVOP3PInsts()) { 392 for (MVT VT : {MVT::v2i16, MVT::v2f16}) { 393 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 394 switch (Op) { 395 case ISD::LOAD: 396 case ISD::STORE: 397 case ISD::BUILD_VECTOR: 398 case ISD::BITCAST: 399 case ISD::EXTRACT_VECTOR_ELT: 400 case ISD::INSERT_VECTOR_ELT: 401 case ISD::INSERT_SUBVECTOR: 402 case ISD::EXTRACT_SUBVECTOR: 403 case ISD::SCALAR_TO_VECTOR: 404 break; 405 case ISD::CONCAT_VECTORS: 406 setOperationAction(Op, VT, Custom); 407 break; 408 default: 409 setOperationAction(Op, VT, Expand); 410 break; 411 } 412 } 413 } 414 415 // XXX - Do these do anything? Vector constants turn into build_vector. 416 setOperationAction(ISD::Constant, MVT::v2i16, Legal); 417 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal); 418 419 setOperationAction(ISD::STORE, MVT::v2i16, Promote); 420 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32); 421 setOperationAction(ISD::STORE, MVT::v2f16, Promote); 422 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32); 423 424 setOperationAction(ISD::LOAD, MVT::v2i16, Promote); 425 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32); 426 setOperationAction(ISD::LOAD, MVT::v2f16, Promote); 427 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32); 428 429 setOperationAction(ISD::AND, MVT::v2i16, Promote); 430 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32); 431 setOperationAction(ISD::OR, MVT::v2i16, Promote); 432 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32); 433 setOperationAction(ISD::XOR, MVT::v2i16, Promote); 434 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32); 435 setOperationAction(ISD::SELECT, MVT::v2i16, Promote); 436 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32); 437 setOperationAction(ISD::SELECT, MVT::v2f16, Promote); 438 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32); 439 440 setOperationAction(ISD::ADD, MVT::v2i16, Legal); 441 setOperationAction(ISD::SUB, MVT::v2i16, Legal); 442 setOperationAction(ISD::MUL, MVT::v2i16, Legal); 443 setOperationAction(ISD::SHL, MVT::v2i16, Legal); 444 setOperationAction(ISD::SRL, MVT::v2i16, Legal); 445 setOperationAction(ISD::SRA, MVT::v2i16, Legal); 446 setOperationAction(ISD::SMIN, MVT::v2i16, Legal); 447 setOperationAction(ISD::UMIN, MVT::v2i16, Legal); 448 setOperationAction(ISD::SMAX, MVT::v2i16, Legal); 449 setOperationAction(ISD::UMAX, MVT::v2i16, Legal); 450 451 setOperationAction(ISD::FADD, MVT::v2f16, Legal); 452 setOperationAction(ISD::FNEG, MVT::v2f16, Legal); 453 setOperationAction(ISD::FMUL, MVT::v2f16, Legal); 454 setOperationAction(ISD::FMA, MVT::v2f16, Legal); 455 setOperationAction(ISD::FMINNUM, MVT::v2f16, Legal); 456 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Legal); 457 458 // This isn't really legal, but this avoids the legalizer unrolling it (and 459 // allows matching fneg (fabs x) patterns) 460 setOperationAction(ISD::FABS, MVT::v2f16, Legal); 461 462 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 463 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 464 465 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand); 466 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand); 467 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); 468 } else { 469 setOperationAction(ISD::SELECT, MVT::v2i16, Custom); 470 setOperationAction(ISD::SELECT, MVT::v2f16, Custom); 471 } 472 473 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) { 474 setOperationAction(ISD::SELECT, VT, Custom); 475 } 476 477 setTargetDAGCombine(ISD::ADD); 478 setTargetDAGCombine(ISD::ADDCARRY); 479 setTargetDAGCombine(ISD::SUB); 480 setTargetDAGCombine(ISD::SUBCARRY); 481 setTargetDAGCombine(ISD::FADD); 482 setTargetDAGCombine(ISD::FSUB); 483 setTargetDAGCombine(ISD::FMINNUM); 484 setTargetDAGCombine(ISD::FMAXNUM); 485 setTargetDAGCombine(ISD::SMIN); 486 setTargetDAGCombine(ISD::SMAX); 487 setTargetDAGCombine(ISD::UMIN); 488 setTargetDAGCombine(ISD::UMAX); 489 setTargetDAGCombine(ISD::SETCC); 490 setTargetDAGCombine(ISD::AND); 491 setTargetDAGCombine(ISD::OR); 492 setTargetDAGCombine(ISD::XOR); 493 setTargetDAGCombine(ISD::SINT_TO_FP); 494 setTargetDAGCombine(ISD::UINT_TO_FP); 495 setTargetDAGCombine(ISD::FCANONICALIZE); 496 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); 497 setTargetDAGCombine(ISD::ZERO_EXTEND); 498 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 499 500 // All memory operations. Some folding on the pointer operand is done to help 501 // matching the constant offsets in the addressing modes. 502 setTargetDAGCombine(ISD::LOAD); 503 setTargetDAGCombine(ISD::STORE); 504 setTargetDAGCombine(ISD::ATOMIC_LOAD); 505 setTargetDAGCombine(ISD::ATOMIC_STORE); 506 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); 507 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 508 setTargetDAGCombine(ISD::ATOMIC_SWAP); 509 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); 510 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); 511 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); 512 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); 513 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); 514 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); 515 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); 516 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); 517 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); 518 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); 519 520 setSchedulingPreference(Sched::RegPressure); 521 } 522 523 const SISubtarget *SITargetLowering::getSubtarget() const { 524 return static_cast<const SISubtarget *>(Subtarget); 525 } 526 527 //===----------------------------------------------------------------------===// 528 // TargetLowering queries 529 //===----------------------------------------------------------------------===// 530 531 bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &, 532 EVT) const { 533 // SI has some legal vector types, but no legal vector operations. Say no 534 // shuffles are legal in order to prefer scalarizing some vector operations. 535 return false; 536 } 537 538 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 539 const CallInst &CI, 540 unsigned IntrID) const { 541 switch (IntrID) { 542 case Intrinsic::amdgcn_atomic_inc: 543 case Intrinsic::amdgcn_atomic_dec: { 544 Info.opc = ISD::INTRINSIC_W_CHAIN; 545 Info.memVT = MVT::getVT(CI.getType()); 546 Info.ptrVal = CI.getOperand(0); 547 Info.align = 0; 548 549 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4)); 550 Info.vol = !Vol || !Vol->isNullValue(); 551 Info.readMem = true; 552 Info.writeMem = true; 553 return true; 554 } 555 default: 556 return false; 557 } 558 } 559 560 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II, 561 SmallVectorImpl<Value*> &Ops, 562 Type *&AccessTy) const { 563 switch (II->getIntrinsicID()) { 564 case Intrinsic::amdgcn_atomic_inc: 565 case Intrinsic::amdgcn_atomic_dec: { 566 Value *Ptr = II->getArgOperand(0); 567 AccessTy = II->getType(); 568 Ops.push_back(Ptr); 569 return true; 570 } 571 default: 572 return false; 573 } 574 } 575 576 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { 577 if (!Subtarget->hasFlatInstOffsets()) { 578 // Flat instructions do not have offsets, and only have the register 579 // address. 580 return AM.BaseOffs == 0 && AM.Scale == 0; 581 } 582 583 // GFX9 added a 13-bit signed offset. When using regular flat instructions, 584 // the sign bit is ignored and is treated as a 12-bit unsigned offset. 585 586 // Just r + i 587 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0; 588 } 589 590 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { 591 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and 592 // additionally can do r + r + i with addr64. 32-bit has more addressing 593 // mode options. Depending on the resource constant, it can also do 594 // (i64 r0) + (i32 r1) * (i14 i). 595 // 596 // Private arrays end up using a scratch buffer most of the time, so also 597 // assume those use MUBUF instructions. Scratch loads / stores are currently 598 // implemented as mubuf instructions with offen bit set, so slightly 599 // different than the normal addr64. 600 if (!isUInt<12>(AM.BaseOffs)) 601 return false; 602 603 // FIXME: Since we can split immediate into soffset and immediate offset, 604 // would it make sense to allow any immediate? 605 606 switch (AM.Scale) { 607 case 0: // r + i or just i, depending on HasBaseReg. 608 return true; 609 case 1: 610 return true; // We have r + r or r + i. 611 case 2: 612 if (AM.HasBaseReg) { 613 // Reject 2 * r + r. 614 return false; 615 } 616 617 // Allow 2 * r as r + r 618 // Or 2 * r + i is allowed as r + r + i. 619 return true; 620 default: // Don't allow n * r 621 return false; 622 } 623 } 624 625 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, 626 const AddrMode &AM, Type *Ty, 627 unsigned AS) const { 628 // No global is ever allowed as a base. 629 if (AM.BaseGV) 630 return false; 631 632 if (AS == AMDGPUASI.GLOBAL_ADDRESS) { 633 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 634 // Assume the we will use FLAT for all global memory accesses 635 // on VI. 636 // FIXME: This assumption is currently wrong. On VI we still use 637 // MUBUF instructions for the r + i addressing mode. As currently 638 // implemented, the MUBUF instructions only work on buffer < 4GB. 639 // It may be possible to support > 4GB buffers with MUBUF instructions, 640 // by setting the stride value in the resource descriptor which would 641 // increase the size limit to (stride * 4GB). However, this is risky, 642 // because it has never been validated. 643 return isLegalFlatAddressingMode(AM); 644 } 645 646 return isLegalMUBUFAddressingMode(AM); 647 } else if (AS == AMDGPUASI.CONSTANT_ADDRESS) { 648 // If the offset isn't a multiple of 4, it probably isn't going to be 649 // correctly aligned. 650 // FIXME: Can we get the real alignment here? 651 if (AM.BaseOffs % 4 != 0) 652 return isLegalMUBUFAddressingMode(AM); 653 654 // There are no SMRD extloads, so if we have to do a small type access we 655 // will use a MUBUF load. 656 // FIXME?: We also need to do this if unaligned, but we don't know the 657 // alignment here. 658 if (DL.getTypeStoreSize(Ty) < 4) 659 return isLegalMUBUFAddressingMode(AM); 660 661 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 662 // SMRD instructions have an 8-bit, dword offset on SI. 663 if (!isUInt<8>(AM.BaseOffs / 4)) 664 return false; 665 } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) { 666 // On CI+, this can also be a 32-bit literal constant offset. If it fits 667 // in 8-bits, it can use a smaller encoding. 668 if (!isUInt<32>(AM.BaseOffs / 4)) 669 return false; 670 } else if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 671 // On VI, these use the SMEM format and the offset is 20-bit in bytes. 672 if (!isUInt<20>(AM.BaseOffs)) 673 return false; 674 } else 675 llvm_unreachable("unhandled generation"); 676 677 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 678 return true; 679 680 if (AM.Scale == 1 && AM.HasBaseReg) 681 return true; 682 683 return false; 684 685 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 686 return isLegalMUBUFAddressingMode(AM); 687 } else if (AS == AMDGPUASI.LOCAL_ADDRESS || 688 AS == AMDGPUASI.REGION_ADDRESS) { 689 // Basic, single offset DS instructions allow a 16-bit unsigned immediate 690 // field. 691 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have 692 // an 8-bit dword offset but we don't know the alignment here. 693 if (!isUInt<16>(AM.BaseOffs)) 694 return false; 695 696 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 697 return true; 698 699 if (AM.Scale == 1 && AM.HasBaseReg) 700 return true; 701 702 return false; 703 } else if (AS == AMDGPUASI.FLAT_ADDRESS || 704 AS == AMDGPUASI.UNKNOWN_ADDRESS_SPACE) { 705 // For an unknown address space, this usually means that this is for some 706 // reason being used for pure arithmetic, and not based on some addressing 707 // computation. We don't have instructions that compute pointers with any 708 // addressing modes, so treat them as having no offset like flat 709 // instructions. 710 return isLegalFlatAddressingMode(AM); 711 } else { 712 llvm_unreachable("unhandled address space"); 713 } 714 } 715 716 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT) const { 717 if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) { 718 return (MemVT.getSizeInBits() <= 4 * 32); 719 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 720 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize(); 721 return (MemVT.getSizeInBits() <= MaxPrivateBits); 722 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 723 return (MemVT.getSizeInBits() <= 2 * 32); 724 } 725 return true; 726 } 727 728 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 729 unsigned AddrSpace, 730 unsigned Align, 731 bool *IsFast) const { 732 if (IsFast) 733 *IsFast = false; 734 735 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, 736 // which isn't a simple VT. 737 // Until MVT is extended to handle this, simply check for the size and 738 // rely on the condition below: allow accesses if the size is a multiple of 4. 739 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && 740 VT.getStoreSize() > 16)) { 741 return false; 742 } 743 744 if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS || 745 AddrSpace == AMDGPUASI.REGION_ADDRESS) { 746 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte 747 // aligned, 8 byte access in a single operation using ds_read2/write2_b32 748 // with adjacent offsets. 749 bool AlignedBy4 = (Align % 4 == 0); 750 if (IsFast) 751 *IsFast = AlignedBy4; 752 753 return AlignedBy4; 754 } 755 756 // FIXME: We have to be conservative here and assume that flat operations 757 // will access scratch. If we had access to the IR function, then we 758 // could determine if any private memory was used in the function. 759 if (!Subtarget->hasUnalignedScratchAccess() && 760 (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS || 761 AddrSpace == AMDGPUASI.FLAT_ADDRESS)) { 762 return false; 763 } 764 765 if (Subtarget->hasUnalignedBufferAccess()) { 766 // If we have an uniform constant load, it still requires using a slow 767 // buffer instruction if unaligned. 768 if (IsFast) { 769 *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS) ? 770 (Align % 4 == 0) : true; 771 } 772 773 return true; 774 } 775 776 // Smaller than dword value must be aligned. 777 if (VT.bitsLT(MVT::i32)) 778 return false; 779 780 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the 781 // byte-address are ignored, thus forcing Dword alignment. 782 // This applies to private, global, and constant memory. 783 if (IsFast) 784 *IsFast = true; 785 786 return VT.bitsGT(MVT::i32) && Align % 4 == 0; 787 } 788 789 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, 790 unsigned SrcAlign, bool IsMemset, 791 bool ZeroMemset, 792 bool MemcpyStrSrc, 793 MachineFunction &MF) const { 794 // FIXME: Should account for address space here. 795 796 // The default fallback uses the private pointer size as a guess for a type to 797 // use. Make sure we switch these to 64-bit accesses. 798 799 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global 800 return MVT::v4i32; 801 802 if (Size >= 8 && DstAlign >= 4) 803 return MVT::v2i32; 804 805 // Use the default. 806 return MVT::Other; 807 } 808 809 static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) { 810 return AS == AMDGPUASI.GLOBAL_ADDRESS || 811 AS == AMDGPUASI.FLAT_ADDRESS || 812 AS == AMDGPUASI.CONSTANT_ADDRESS; 813 } 814 815 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, 816 unsigned DestAS) const { 817 return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) && 818 isFlatGlobalAddrSpace(DestAS, AMDGPUASI); 819 } 820 821 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { 822 const MemSDNode *MemNode = cast<MemSDNode>(N); 823 const Value *Ptr = MemNode->getMemOperand()->getValue(); 824 const Instruction *I = dyn_cast<Instruction>(Ptr); 825 return I && I->getMetadata("amdgpu.noclobber"); 826 } 827 828 bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS, 829 unsigned DestAS) const { 830 // Flat -> private/local is a simple truncate. 831 // Flat -> global is no-op 832 if (SrcAS == AMDGPUASI.FLAT_ADDRESS) 833 return true; 834 835 return isNoopAddrSpaceCast(SrcAS, DestAS); 836 } 837 838 bool SITargetLowering::isMemOpUniform(const SDNode *N) const { 839 const MemSDNode *MemNode = cast<MemSDNode>(N); 840 841 return AMDGPU::isUniformMMO(MemNode->getMemOperand()); 842 } 843 844 TargetLoweringBase::LegalizeTypeAction 845 SITargetLowering::getPreferredVectorAction(EVT VT) const { 846 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) 847 return TypeSplitVector; 848 849 return TargetLoweringBase::getPreferredVectorAction(VT); 850 } 851 852 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 853 Type *Ty) const { 854 // FIXME: Could be smarter if called for vector constants. 855 return true; 856 } 857 858 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { 859 if (Subtarget->has16BitInsts() && VT == MVT::i16) { 860 switch (Op) { 861 case ISD::LOAD: 862 case ISD::STORE: 863 864 // These operations are done with 32-bit instructions anyway. 865 case ISD::AND: 866 case ISD::OR: 867 case ISD::XOR: 868 case ISD::SELECT: 869 // TODO: Extensions? 870 return true; 871 default: 872 return false; 873 } 874 } 875 876 // SimplifySetCC uses this function to determine whether or not it should 877 // create setcc with i1 operands. We don't have instructions for i1 setcc. 878 if (VT == MVT::i1 && Op == ISD::SETCC) 879 return false; 880 881 return TargetLowering::isTypeDesirableForOp(Op, VT); 882 } 883 884 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG, 885 const SDLoc &SL, 886 SDValue Chain, 887 uint64_t Offset) const { 888 const DataLayout &DL = DAG.getDataLayout(); 889 MachineFunction &MF = DAG.getMachineFunction(); 890 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 891 unsigned InputPtrReg = TRI->getPreloadedValue(MF, 892 SIRegisterInfo::KERNARG_SEGMENT_PTR); 893 894 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 895 MVT PtrVT = getPointerTy(DL, AMDGPUASI.CONSTANT_ADDRESS); 896 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, 897 MRI.getLiveInVirtReg(InputPtrReg), PtrVT); 898 return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, 899 DAG.getConstant(Offset, SL, PtrVT)); 900 } 901 902 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, 903 const SDLoc &SL, SDValue Val, 904 bool Signed, 905 const ISD::InputArg *Arg) const { 906 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && 907 VT.bitsLT(MemVT)) { 908 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; 909 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); 910 } 911 912 if (MemVT.isFloatingPoint()) 913 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT); 914 else if (Signed) 915 Val = DAG.getSExtOrTrunc(Val, SL, VT); 916 else 917 Val = DAG.getZExtOrTrunc(Val, SL, VT); 918 919 return Val; 920 } 921 922 SDValue SITargetLowering::lowerKernargMemParameter( 923 SelectionDAG &DAG, EVT VT, EVT MemVT, 924 const SDLoc &SL, SDValue Chain, 925 uint64_t Offset, bool Signed, 926 const ISD::InputArg *Arg) const { 927 const DataLayout &DL = DAG.getDataLayout(); 928 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 929 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS); 930 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 931 932 unsigned Align = DL.getABITypeAlignment(Ty); 933 934 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset); 935 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align, 936 MachineMemOperand::MONonTemporal | 937 MachineMemOperand::MODereferenceable | 938 MachineMemOperand::MOInvariant); 939 940 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg); 941 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); 942 } 943 944 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA, 945 const SDLoc &SL, SDValue Chain, 946 const ISD::InputArg &Arg) const { 947 MachineFunction &MF = DAG.getMachineFunction(); 948 MachineFrameInfo &MFI = MF.getFrameInfo(); 949 950 if (Arg.Flags.isByVal()) { 951 unsigned Size = Arg.Flags.getByValSize(); 952 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false); 953 return DAG.getFrameIndex(FrameIdx, MVT::i32); 954 } 955 956 unsigned ArgOffset = VA.getLocMemOffset(); 957 unsigned ArgSize = VA.getValVT().getStoreSize(); 958 959 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true); 960 961 // Create load nodes to retrieve arguments from the stack. 962 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 963 SDValue ArgValue; 964 965 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) 966 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 967 MVT MemVT = VA.getValVT(); 968 969 switch (VA.getLocInfo()) { 970 default: 971 break; 972 case CCValAssign::BCvt: 973 MemVT = VA.getLocVT(); 974 break; 975 case CCValAssign::SExt: 976 ExtType = ISD::SEXTLOAD; 977 break; 978 case CCValAssign::ZExt: 979 ExtType = ISD::ZEXTLOAD; 980 break; 981 case CCValAssign::AExt: 982 ExtType = ISD::EXTLOAD; 983 break; 984 } 985 986 ArgValue = DAG.getExtLoad( 987 ExtType, SL, VA.getLocVT(), Chain, FIN, 988 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), 989 MemVT); 990 return ArgValue; 991 } 992 993 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits, 994 CallingConv::ID CallConv, 995 ArrayRef<ISD::InputArg> Ins, 996 BitVector &Skipped, 997 FunctionType *FType, 998 SIMachineFunctionInfo *Info) { 999 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) { 1000 const ISD::InputArg &Arg = Ins[I]; 1001 1002 // First check if it's a PS input addr. 1003 if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() && 1004 !Arg.Flags.isByVal() && PSInputNum <= 15) { 1005 1006 if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) { 1007 // We can safely skip PS inputs. 1008 Skipped.set(I); 1009 ++PSInputNum; 1010 continue; 1011 } 1012 1013 Info->markPSInputAllocated(PSInputNum); 1014 if (Arg.Used) 1015 Info->markPSInputEnabled(PSInputNum); 1016 1017 ++PSInputNum; 1018 } 1019 1020 // Second split vertices into their elements. 1021 if (Arg.VT.isVector()) { 1022 ISD::InputArg NewArg = Arg; 1023 NewArg.Flags.setSplit(); 1024 NewArg.VT = Arg.VT.getVectorElementType(); 1025 1026 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a 1027 // three or five element vertex only needs three or five registers, 1028 // NOT four or eight. 1029 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 1030 unsigned NumElements = ParamType->getVectorNumElements(); 1031 1032 for (unsigned J = 0; J != NumElements; ++J) { 1033 Splits.push_back(NewArg); 1034 NewArg.PartOffset += NewArg.VT.getStoreSize(); 1035 } 1036 } else { 1037 Splits.push_back(Arg); 1038 } 1039 } 1040 } 1041 1042 // Allocate special inputs passed in VGPRs. 1043 static void allocateSpecialInputVGPRs(CCState &CCInfo, 1044 MachineFunction &MF, 1045 const SIRegisterInfo &TRI, 1046 SIMachineFunctionInfo &Info) { 1047 if (Info.hasWorkItemIDX()) { 1048 unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X); 1049 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1050 CCInfo.AllocateReg(Reg); 1051 } 1052 1053 if (Info.hasWorkItemIDY()) { 1054 unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y); 1055 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1056 CCInfo.AllocateReg(Reg); 1057 } 1058 1059 if (Info.hasWorkItemIDZ()) { 1060 unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z); 1061 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1062 CCInfo.AllocateReg(Reg); 1063 } 1064 } 1065 1066 // Allocate special inputs passed in user SGPRs. 1067 static void allocateHSAUserSGPRs(CCState &CCInfo, 1068 MachineFunction &MF, 1069 const SIRegisterInfo &TRI, 1070 SIMachineFunctionInfo &Info) { 1071 if (Info.hasImplicitBufferPtr()) { 1072 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI); 1073 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 1074 CCInfo.AllocateReg(ImplicitBufferPtrReg); 1075 } 1076 1077 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 1078 if (Info.hasPrivateSegmentBuffer()) { 1079 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 1080 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 1081 CCInfo.AllocateReg(PrivateSegmentBufferReg); 1082 } 1083 1084 if (Info.hasDispatchPtr()) { 1085 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI); 1086 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 1087 CCInfo.AllocateReg(DispatchPtrReg); 1088 } 1089 1090 if (Info.hasQueuePtr()) { 1091 unsigned QueuePtrReg = Info.addQueuePtr(TRI); 1092 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 1093 CCInfo.AllocateReg(QueuePtrReg); 1094 } 1095 1096 if (Info.hasKernargSegmentPtr()) { 1097 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI); 1098 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); 1099 CCInfo.AllocateReg(InputPtrReg); 1100 } 1101 1102 if (Info.hasDispatchID()) { 1103 unsigned DispatchIDReg = Info.addDispatchID(TRI); 1104 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 1105 CCInfo.AllocateReg(DispatchIDReg); 1106 } 1107 1108 if (Info.hasFlatScratchInit()) { 1109 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI); 1110 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 1111 CCInfo.AllocateReg(FlatScratchInitReg); 1112 } 1113 1114 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 1115 // these from the dispatch pointer. 1116 } 1117 1118 // Allocate special input registers that are initialized per-wave. 1119 static void allocateSystemSGPRs(CCState &CCInfo, 1120 MachineFunction &MF, 1121 SIMachineFunctionInfo &Info, 1122 CallingConv::ID CallConv, 1123 bool IsShader) { 1124 if (Info.hasWorkGroupIDX()) { 1125 unsigned Reg = Info.addWorkGroupIDX(); 1126 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1127 CCInfo.AllocateReg(Reg); 1128 } 1129 1130 if (Info.hasWorkGroupIDY()) { 1131 unsigned Reg = Info.addWorkGroupIDY(); 1132 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1133 CCInfo.AllocateReg(Reg); 1134 } 1135 1136 if (Info.hasWorkGroupIDZ()) { 1137 unsigned Reg = Info.addWorkGroupIDZ(); 1138 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1139 CCInfo.AllocateReg(Reg); 1140 } 1141 1142 if (Info.hasWorkGroupInfo()) { 1143 unsigned Reg = Info.addWorkGroupInfo(); 1144 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1145 CCInfo.AllocateReg(Reg); 1146 } 1147 1148 if (Info.hasPrivateSegmentWaveByteOffset()) { 1149 // Scratch wave offset passed in system SGPR. 1150 unsigned PrivateSegmentWaveByteOffsetReg; 1151 1152 if (IsShader) { 1153 PrivateSegmentWaveByteOffsetReg = 1154 Info.getPrivateSegmentWaveByteOffsetSystemSGPR(); 1155 1156 // This is true if the scratch wave byte offset doesn't have a fixed 1157 // location. 1158 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) { 1159 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); 1160 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); 1161 } 1162 } else 1163 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset(); 1164 1165 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); 1166 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); 1167 } 1168 } 1169 1170 static void reservePrivateMemoryRegs(const TargetMachine &TM, 1171 MachineFunction &MF, 1172 const SIRegisterInfo &TRI, 1173 SIMachineFunctionInfo &Info, 1174 bool NeedSP) { 1175 // Now that we've figured out where the scratch register inputs are, see if 1176 // should reserve the arguments and use them directly. 1177 MachineFrameInfo &MFI = MF.getFrameInfo(); 1178 bool HasStackObjects = MFI.hasStackObjects(); 1179 1180 // Record that we know we have non-spill stack objects so we don't need to 1181 // check all stack objects later. 1182 if (HasStackObjects) 1183 Info.setHasNonSpillStackObjects(true); 1184 1185 // Everything live out of a block is spilled with fast regalloc, so it's 1186 // almost certain that spilling will be required. 1187 if (TM.getOptLevel() == CodeGenOpt::None) 1188 HasStackObjects = true; 1189 1190 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 1191 if (ST.isAmdCodeObjectV2(MF)) { 1192 if (HasStackObjects) { 1193 // If we have stack objects, we unquestionably need the private buffer 1194 // resource. For the Code Object V2 ABI, this will be the first 4 user 1195 // SGPR inputs. We can reserve those and use them directly. 1196 1197 unsigned PrivateSegmentBufferReg = TRI.getPreloadedValue( 1198 MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); 1199 Info.setScratchRSrcReg(PrivateSegmentBufferReg); 1200 1201 unsigned PrivateSegmentWaveByteOffsetReg = TRI.getPreloadedValue( 1202 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1203 Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg); 1204 } else { 1205 unsigned ReservedBufferReg 1206 = TRI.reservedPrivateSegmentBufferReg(MF); 1207 unsigned ReservedOffsetReg 1208 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1209 1210 // We tentatively reserve the last registers (skipping the last two 1211 // which may contain VCC). After register allocation, we'll replace 1212 // these with the ones immediately after those which were really 1213 // allocated. In the prologue copies will be inserted from the argument 1214 // to these reserved registers. 1215 Info.setScratchRSrcReg(ReservedBufferReg); 1216 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1217 } 1218 } else { 1219 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF); 1220 1221 // Without HSA, relocations are used for the scratch pointer and the 1222 // buffer resource setup is always inserted in the prologue. Scratch wave 1223 // offset is still in an input SGPR. 1224 Info.setScratchRSrcReg(ReservedBufferReg); 1225 1226 if (HasStackObjects) { 1227 unsigned ScratchWaveOffsetReg = TRI.getPreloadedValue( 1228 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1229 Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg); 1230 } else { 1231 unsigned ReservedOffsetReg 1232 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1233 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1234 } 1235 } 1236 1237 if (NeedSP) { 1238 unsigned ReservedStackPtrOffsetReg = TRI.reservedStackPtrOffsetReg(MF); 1239 Info.setStackPtrOffsetReg(ReservedStackPtrOffsetReg); 1240 1241 assert(Info.getStackPtrOffsetReg() != Info.getFrameOffsetReg()); 1242 assert(!TRI.isSubRegister(Info.getScratchRSrcReg(), 1243 Info.getStackPtrOffsetReg())); 1244 } 1245 } 1246 1247 SDValue SITargetLowering::LowerFormalArguments( 1248 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1249 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1250 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1251 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 1252 1253 MachineFunction &MF = DAG.getMachineFunction(); 1254 FunctionType *FType = MF.getFunction()->getFunctionType(); 1255 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1256 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 1257 1258 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { 1259 const Function *Fn = MF.getFunction(); 1260 DiagnosticInfoUnsupported NoGraphicsHSA( 1261 *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); 1262 DAG.getContext()->diagnose(NoGraphicsHSA); 1263 return DAG.getEntryNode(); 1264 } 1265 1266 // Create stack objects that are used for emitting debugger prologue if 1267 // "amdgpu-debugger-emit-prologue" attribute was specified. 1268 if (ST.debuggerEmitPrologue()) 1269 createDebuggerPrologueStackObjects(MF); 1270 1271 SmallVector<ISD::InputArg, 16> Splits; 1272 SmallVector<CCValAssign, 16> ArgLocs; 1273 BitVector Skipped(Ins.size()); 1274 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1275 *DAG.getContext()); 1276 1277 bool IsShader = AMDGPU::isShader(CallConv); 1278 bool IsKernel = AMDGPU::isKernel(CallConv); 1279 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv); 1280 1281 if (IsShader) { 1282 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info); 1283 1284 // At least one interpolation mode must be enabled or else the GPU will 1285 // hang. 1286 // 1287 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 1288 // set PSInputAddr, the user wants to enable some bits after the compilation 1289 // based on run-time states. Since we can't know what the final PSInputEna 1290 // will look like, so we shouldn't do anything here and the user should take 1291 // responsibility for the correct programming. 1292 // 1293 // Otherwise, the following restrictions apply: 1294 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 1295 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 1296 // enabled too. 1297 if (CallConv == CallingConv::AMDGPU_PS && 1298 ((Info->getPSInputAddr() & 0x7F) == 0 || 1299 ((Info->getPSInputAddr() & 0xF) == 0 && 1300 Info->isPSInputAllocated(11)))) { 1301 CCInfo.AllocateReg(AMDGPU::VGPR0); 1302 CCInfo.AllocateReg(AMDGPU::VGPR1); 1303 Info->markPSInputAllocated(0); 1304 Info->markPSInputEnabled(0); 1305 } 1306 1307 assert(!Info->hasDispatchPtr() && 1308 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && 1309 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && 1310 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && 1311 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && 1312 !Info->hasWorkItemIDZ()); 1313 } else if (IsKernel) { 1314 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); 1315 } else { 1316 Splits.append(Ins.begin(), Ins.end()); 1317 } 1318 1319 if (IsEntryFunc) { 1320 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); 1321 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info); 1322 } 1323 1324 if (IsKernel) { 1325 analyzeFormalArgumentsCompute(CCInfo, Ins); 1326 } else { 1327 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg); 1328 CCInfo.AnalyzeFormalArguments(Splits, AssignFn); 1329 } 1330 1331 SmallVector<SDValue, 16> Chains; 1332 1333 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { 1334 const ISD::InputArg &Arg = Ins[i]; 1335 if (Skipped[i]) { 1336 InVals.push_back(DAG.getUNDEF(Arg.VT)); 1337 continue; 1338 } 1339 1340 CCValAssign &VA = ArgLocs[ArgIdx++]; 1341 MVT VT = VA.getLocVT(); 1342 1343 if (IsEntryFunc && VA.isMemLoc()) { 1344 VT = Ins[i].VT; 1345 EVT MemVT = VA.getLocVT(); 1346 1347 const uint64_t Offset = Subtarget->getExplicitKernelArgOffset(MF) + 1348 VA.getLocMemOffset(); 1349 Info->setABIArgOffset(Offset + MemVT.getStoreSize()); 1350 1351 // The first 36 bytes of the input buffer contains information about 1352 // thread group and global sizes. 1353 SDValue Arg = lowerKernargMemParameter( 1354 DAG, VT, MemVT, DL, Chain, Offset, Ins[i].Flags.isSExt(), &Ins[i]); 1355 Chains.push_back(Arg.getValue(1)); 1356 1357 auto *ParamTy = 1358 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); 1359 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 1360 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 1361 // On SI local pointers are just offsets into LDS, so they are always 1362 // less than 16-bits. On CI and newer they could potentially be 1363 // real pointers, so we can't guarantee their size. 1364 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, 1365 DAG.getValueType(MVT::i16)); 1366 } 1367 1368 InVals.push_back(Arg); 1369 continue; 1370 } else if (!IsEntryFunc && VA.isMemLoc()) { 1371 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg); 1372 InVals.push_back(Val); 1373 if (!Arg.Flags.isByVal()) 1374 Chains.push_back(Val.getValue(1)); 1375 continue; 1376 } 1377 1378 assert(VA.isRegLoc() && "Parameter must be in a register!"); 1379 1380 unsigned Reg = VA.getLocReg(); 1381 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 1382 1383 Reg = MF.addLiveIn(Reg, RC); 1384 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1385 1386 if (IsShader && Arg.VT.isVector()) { 1387 // Build a vector from the registers 1388 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 1389 unsigned NumElements = ParamType->getVectorNumElements(); 1390 1391 SmallVector<SDValue, 4> Regs; 1392 Regs.push_back(Val); 1393 for (unsigned j = 1; j != NumElements; ++j) { 1394 Reg = ArgLocs[ArgIdx++].getLocReg(); 1395 Reg = MF.addLiveIn(Reg, RC); 1396 1397 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1398 Regs.push_back(Copy); 1399 } 1400 1401 // Fill up the missing vector elements 1402 NumElements = Arg.VT.getVectorNumElements() - NumElements; 1403 Regs.append(NumElements, DAG.getUNDEF(VT)); 1404 1405 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs)); 1406 continue; 1407 } 1408 1409 InVals.push_back(Val); 1410 } 1411 1412 const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 1413 1414 // TODO: Could maybe omit SP if only tail calls? 1415 bool NeedSP = FrameInfo.hasCalls() || FrameInfo.hasVarSizedObjects(); 1416 1417 // Start adding system SGPRs. 1418 if (IsEntryFunc) { 1419 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader); 1420 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info, NeedSP); 1421 } else { 1422 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 1423 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg()); 1424 CCInfo.AllocateReg(Info->getFrameOffsetReg()); 1425 1426 if (NeedSP) { 1427 unsigned StackPtrReg = findFirstFreeSGPR(CCInfo); 1428 CCInfo.AllocateReg(StackPtrReg); 1429 Info->setStackPtrOffsetReg(StackPtrReg); 1430 } 1431 } 1432 1433 return Chains.empty() ? Chain : 1434 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 1435 } 1436 1437 // TODO: If return values can't fit in registers, we should return as many as 1438 // possible in registers before passing on stack. 1439 bool SITargetLowering::CanLowerReturn( 1440 CallingConv::ID CallConv, 1441 MachineFunction &MF, bool IsVarArg, 1442 const SmallVectorImpl<ISD::OutputArg> &Outs, 1443 LLVMContext &Context) const { 1444 // Replacing returns with sret/stack usage doesn't make sense for shaders. 1445 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn 1446 // for shaders. Vector types should be explicitly handled by CC. 1447 if (AMDGPU::isEntryFunctionCC(CallConv)) 1448 return true; 1449 1450 SmallVector<CCValAssign, 16> RVLocs; 1451 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 1452 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); 1453 } 1454 1455 SDValue 1456 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1457 bool isVarArg, 1458 const SmallVectorImpl<ISD::OutputArg> &Outs, 1459 const SmallVectorImpl<SDValue> &OutVals, 1460 const SDLoc &DL, SelectionDAG &DAG) const { 1461 MachineFunction &MF = DAG.getMachineFunction(); 1462 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1463 1464 if (AMDGPU::isKernel(CallConv)) { 1465 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, 1466 OutVals, DL, DAG); 1467 } 1468 1469 bool IsShader = AMDGPU::isShader(CallConv); 1470 1471 Info->setIfReturnsVoid(Outs.size() == 0); 1472 bool IsWaveEnd = Info->returnsVoid() && IsShader; 1473 1474 SmallVector<ISD::OutputArg, 48> Splits; 1475 SmallVector<SDValue, 48> SplitVals; 1476 1477 // Split vectors into their elements. 1478 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 1479 const ISD::OutputArg &Out = Outs[i]; 1480 1481 if (IsShader && Out.VT.isVector()) { 1482 MVT VT = Out.VT.getVectorElementType(); 1483 ISD::OutputArg NewOut = Out; 1484 NewOut.Flags.setSplit(); 1485 NewOut.VT = VT; 1486 1487 // We want the original number of vector elements here, e.g. 1488 // three or five, not four or eight. 1489 unsigned NumElements = Out.ArgVT.getVectorNumElements(); 1490 1491 for (unsigned j = 0; j != NumElements; ++j) { 1492 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i], 1493 DAG.getConstant(j, DL, MVT::i32)); 1494 SplitVals.push_back(Elem); 1495 Splits.push_back(NewOut); 1496 NewOut.PartOffset += NewOut.VT.getStoreSize(); 1497 } 1498 } else { 1499 SplitVals.push_back(OutVals[i]); 1500 Splits.push_back(Out); 1501 } 1502 } 1503 1504 // CCValAssign - represent the assignment of the return value to a location. 1505 SmallVector<CCValAssign, 48> RVLocs; 1506 1507 // CCState - Info about the registers and stack slots. 1508 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1509 *DAG.getContext()); 1510 1511 // Analyze outgoing return values. 1512 CCInfo.AnalyzeReturn(Splits, CCAssignFnForReturn(CallConv, isVarArg)); 1513 1514 SDValue Flag; 1515 SmallVector<SDValue, 48> RetOps; 1516 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1517 1518 // Add return address for callable functions. 1519 if (!Info->isEntryFunction()) { 1520 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 1521 SDValue ReturnAddrReg = CreateLiveInRegister( 1522 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); 1523 1524 // FIXME: Should be able to use a vreg here, but need a way to prevent it 1525 // from being allcoated to a CSR. 1526 1527 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), 1528 MVT::i64); 1529 1530 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag); 1531 Flag = Chain.getValue(1); 1532 1533 RetOps.push_back(PhysReturnAddrReg); 1534 } 1535 1536 // Copy the result values into the output registers. 1537 for (unsigned i = 0, realRVLocIdx = 0; 1538 i != RVLocs.size(); 1539 ++i, ++realRVLocIdx) { 1540 CCValAssign &VA = RVLocs[i]; 1541 assert(VA.isRegLoc() && "Can only return in registers!"); 1542 // TODO: Partially return in registers if return values don't fit. 1543 1544 SDValue Arg = SplitVals[realRVLocIdx]; 1545 1546 // Copied from other backends. 1547 switch (VA.getLocInfo()) { 1548 case CCValAssign::Full: 1549 break; 1550 case CCValAssign::BCvt: 1551 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 1552 break; 1553 case CCValAssign::SExt: 1554 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 1555 break; 1556 case CCValAssign::ZExt: 1557 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 1558 break; 1559 case CCValAssign::AExt: 1560 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 1561 break; 1562 default: 1563 llvm_unreachable("Unknown loc info!"); 1564 } 1565 1566 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 1567 Flag = Chain.getValue(1); 1568 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1569 } 1570 1571 // FIXME: Does sret work properly? 1572 1573 // Update chain and glue. 1574 RetOps[0] = Chain; 1575 if (Flag.getNode()) 1576 RetOps.push_back(Flag); 1577 1578 unsigned Opc = AMDGPUISD::ENDPGM; 1579 if (!IsWaveEnd) 1580 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG; 1581 return DAG.getNode(Opc, DL, MVT::Other, RetOps); 1582 } 1583 1584 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT, 1585 SelectionDAG &DAG) const { 1586 unsigned Reg = StringSwitch<unsigned>(RegName) 1587 .Case("m0", AMDGPU::M0) 1588 .Case("exec", AMDGPU::EXEC) 1589 .Case("exec_lo", AMDGPU::EXEC_LO) 1590 .Case("exec_hi", AMDGPU::EXEC_HI) 1591 .Case("flat_scratch", AMDGPU::FLAT_SCR) 1592 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) 1593 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) 1594 .Default(AMDGPU::NoRegister); 1595 1596 if (Reg == AMDGPU::NoRegister) { 1597 report_fatal_error(Twine("invalid register name \"" 1598 + StringRef(RegName) + "\".")); 1599 1600 } 1601 1602 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 1603 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { 1604 report_fatal_error(Twine("invalid register \"" 1605 + StringRef(RegName) + "\" for subtarget.")); 1606 } 1607 1608 switch (Reg) { 1609 case AMDGPU::M0: 1610 case AMDGPU::EXEC_LO: 1611 case AMDGPU::EXEC_HI: 1612 case AMDGPU::FLAT_SCR_LO: 1613 case AMDGPU::FLAT_SCR_HI: 1614 if (VT.getSizeInBits() == 32) 1615 return Reg; 1616 break; 1617 case AMDGPU::EXEC: 1618 case AMDGPU::FLAT_SCR: 1619 if (VT.getSizeInBits() == 64) 1620 return Reg; 1621 break; 1622 default: 1623 llvm_unreachable("missing register type checking"); 1624 } 1625 1626 report_fatal_error(Twine("invalid type for register \"" 1627 + StringRef(RegName) + "\".")); 1628 } 1629 1630 // If kill is not the last instruction, split the block so kill is always a 1631 // proper terminator. 1632 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI, 1633 MachineBasicBlock *BB) const { 1634 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 1635 1636 MachineBasicBlock::iterator SplitPoint(&MI); 1637 ++SplitPoint; 1638 1639 if (SplitPoint == BB->end()) { 1640 // Don't bother with a new block. 1641 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR)); 1642 return BB; 1643 } 1644 1645 MachineFunction *MF = BB->getParent(); 1646 MachineBasicBlock *SplitBB 1647 = MF->CreateMachineBasicBlock(BB->getBasicBlock()); 1648 1649 MF->insert(++MachineFunction::iterator(BB), SplitBB); 1650 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end()); 1651 1652 SplitBB->transferSuccessorsAndUpdatePHIs(BB); 1653 BB->addSuccessor(SplitBB); 1654 1655 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR)); 1656 return SplitBB; 1657 } 1658 1659 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the 1660 // wavefront. If the value is uniform and just happens to be in a VGPR, this 1661 // will only do one iteration. In the worst case, this will loop 64 times. 1662 // 1663 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. 1664 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop( 1665 const SIInstrInfo *TII, 1666 MachineRegisterInfo &MRI, 1667 MachineBasicBlock &OrigBB, 1668 MachineBasicBlock &LoopBB, 1669 const DebugLoc &DL, 1670 const MachineOperand &IdxReg, 1671 unsigned InitReg, 1672 unsigned ResultReg, 1673 unsigned PhiReg, 1674 unsigned InitSaveExecReg, 1675 int Offset, 1676 bool UseGPRIdxMode) { 1677 MachineBasicBlock::iterator I = LoopBB.begin(); 1678 1679 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1680 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1681 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1682 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1683 1684 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) 1685 .addReg(InitReg) 1686 .addMBB(&OrigBB) 1687 .addReg(ResultReg) 1688 .addMBB(&LoopBB); 1689 1690 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) 1691 .addReg(InitSaveExecReg) 1692 .addMBB(&OrigBB) 1693 .addReg(NewExec) 1694 .addMBB(&LoopBB); 1695 1696 // Read the next variant <- also loop target. 1697 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) 1698 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef())); 1699 1700 // Compare the just read M0 value to all possible Idx values. 1701 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) 1702 .addReg(CurrentIdxReg) 1703 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg()); 1704 1705 if (UseGPRIdxMode) { 1706 unsigned IdxReg; 1707 if (Offset == 0) { 1708 IdxReg = CurrentIdxReg; 1709 } else { 1710 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1711 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg) 1712 .addReg(CurrentIdxReg, RegState::Kill) 1713 .addImm(Offset); 1714 } 1715 1716 MachineInstr *SetIdx = 1717 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_IDX)) 1718 .addReg(IdxReg, RegState::Kill); 1719 SetIdx->getOperand(2).setIsUndef(); 1720 } else { 1721 // Move index from VCC into M0 1722 if (Offset == 0) { 1723 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1724 .addReg(CurrentIdxReg, RegState::Kill); 1725 } else { 1726 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 1727 .addReg(CurrentIdxReg, RegState::Kill) 1728 .addImm(Offset); 1729 } 1730 } 1731 1732 // Update EXEC, save the original EXEC value to VCC. 1733 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec) 1734 .addReg(CondReg, RegState::Kill); 1735 1736 MRI.setSimpleHint(NewExec, CondReg); 1737 1738 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 1739 MachineInstr *InsertPt = 1740 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 1741 .addReg(AMDGPU::EXEC) 1742 .addReg(NewExec); 1743 1744 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use 1745 // s_cbranch_scc0? 1746 1747 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. 1748 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 1749 .addMBB(&LoopBB); 1750 1751 return InsertPt->getIterator(); 1752 } 1753 1754 // This has slightly sub-optimal regalloc when the source vector is killed by 1755 // the read. The register allocator does not understand that the kill is 1756 // per-workitem, so is kept alive for the whole loop so we end up not re-using a 1757 // subregister from it, using 1 more VGPR than necessary. This was saved when 1758 // this was expanded after register allocation. 1759 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII, 1760 MachineBasicBlock &MBB, 1761 MachineInstr &MI, 1762 unsigned InitResultReg, 1763 unsigned PhiReg, 1764 int Offset, 1765 bool UseGPRIdxMode) { 1766 MachineFunction *MF = MBB.getParent(); 1767 MachineRegisterInfo &MRI = MF->getRegInfo(); 1768 const DebugLoc &DL = MI.getDebugLoc(); 1769 MachineBasicBlock::iterator I(&MI); 1770 1771 unsigned DstReg = MI.getOperand(0).getReg(); 1772 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1773 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1774 1775 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); 1776 1777 // Save the EXEC mask 1778 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec) 1779 .addReg(AMDGPU::EXEC); 1780 1781 // To insert the loop we need to split the block. Move everything after this 1782 // point to a new block, and insert a new empty block between the two. 1783 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); 1784 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); 1785 MachineFunction::iterator MBBI(MBB); 1786 ++MBBI; 1787 1788 MF->insert(MBBI, LoopBB); 1789 MF->insert(MBBI, RemainderBB); 1790 1791 LoopBB->addSuccessor(LoopBB); 1792 LoopBB->addSuccessor(RemainderBB); 1793 1794 // Move the rest of the block into a new block. 1795 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 1796 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 1797 1798 MBB.addSuccessor(LoopBB); 1799 1800 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1801 1802 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, 1803 InitResultReg, DstReg, PhiReg, TmpExec, 1804 Offset, UseGPRIdxMode); 1805 1806 MachineBasicBlock::iterator First = RemainderBB->begin(); 1807 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 1808 .addReg(SaveExec); 1809 1810 return InsPt; 1811 } 1812 1813 // Returns subreg index, offset 1814 static std::pair<unsigned, int> 1815 computeIndirectRegAndOffset(const SIRegisterInfo &TRI, 1816 const TargetRegisterClass *SuperRC, 1817 unsigned VecReg, 1818 int Offset) { 1819 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32; 1820 1821 // Skip out of bounds offsets, or else we would end up using an undefined 1822 // register. 1823 if (Offset >= NumElts || Offset < 0) 1824 return std::make_pair(AMDGPU::sub0, Offset); 1825 1826 return std::make_pair(AMDGPU::sub0 + Offset, 0); 1827 } 1828 1829 // Return true if the index is an SGPR and was set. 1830 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, 1831 MachineRegisterInfo &MRI, 1832 MachineInstr &MI, 1833 int Offset, 1834 bool UseGPRIdxMode, 1835 bool IsIndirectSrc) { 1836 MachineBasicBlock *MBB = MI.getParent(); 1837 const DebugLoc &DL = MI.getDebugLoc(); 1838 MachineBasicBlock::iterator I(&MI); 1839 1840 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1841 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); 1842 1843 assert(Idx->getReg() != AMDGPU::NoRegister); 1844 1845 if (!TII->getRegisterInfo().isSGPRClass(IdxRC)) 1846 return false; 1847 1848 if (UseGPRIdxMode) { 1849 unsigned IdxMode = IsIndirectSrc ? 1850 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE; 1851 if (Offset == 0) { 1852 MachineInstr *SetOn = 1853 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1854 .add(*Idx) 1855 .addImm(IdxMode); 1856 1857 SetOn->getOperand(3).setIsUndef(); 1858 } else { 1859 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 1860 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) 1861 .add(*Idx) 1862 .addImm(Offset); 1863 MachineInstr *SetOn = 1864 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1865 .addReg(Tmp, RegState::Kill) 1866 .addImm(IdxMode); 1867 1868 SetOn->getOperand(3).setIsUndef(); 1869 } 1870 1871 return true; 1872 } 1873 1874 if (Offset == 0) { 1875 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1876 .add(*Idx); 1877 } else { 1878 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 1879 .add(*Idx) 1880 .addImm(Offset); 1881 } 1882 1883 return true; 1884 } 1885 1886 // Control flow needs to be inserted if indexing with a VGPR. 1887 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, 1888 MachineBasicBlock &MBB, 1889 const SISubtarget &ST) { 1890 const SIInstrInfo *TII = ST.getInstrInfo(); 1891 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1892 MachineFunction *MF = MBB.getParent(); 1893 MachineRegisterInfo &MRI = MF->getRegInfo(); 1894 1895 unsigned Dst = MI.getOperand(0).getReg(); 1896 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); 1897 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 1898 1899 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); 1900 1901 unsigned SubReg; 1902 std::tie(SubReg, Offset) 1903 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); 1904 1905 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode); 1906 1907 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) { 1908 MachineBasicBlock::iterator I(&MI); 1909 const DebugLoc &DL = MI.getDebugLoc(); 1910 1911 if (UseGPRIdxMode) { 1912 // TODO: Look at the uses to avoid the copy. This may require rescheduling 1913 // to avoid interfering with other uses, so probably requires a new 1914 // optimization pass. 1915 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 1916 .addReg(SrcReg, RegState::Undef, SubReg) 1917 .addReg(SrcReg, RegState::Implicit) 1918 .addReg(AMDGPU::M0, RegState::Implicit); 1919 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1920 } else { 1921 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 1922 .addReg(SrcReg, RegState::Undef, SubReg) 1923 .addReg(SrcReg, RegState::Implicit); 1924 } 1925 1926 MI.eraseFromParent(); 1927 1928 return &MBB; 1929 } 1930 1931 const DebugLoc &DL = MI.getDebugLoc(); 1932 MachineBasicBlock::iterator I(&MI); 1933 1934 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1935 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1936 1937 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); 1938 1939 if (UseGPRIdxMode) { 1940 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 1941 .addImm(0) // Reset inside loop. 1942 .addImm(VGPRIndexMode::SRC0_ENABLE); 1943 SetOn->getOperand(3).setIsUndef(); 1944 1945 // Disable again after the loop. 1946 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 1947 } 1948 1949 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, UseGPRIdxMode); 1950 MachineBasicBlock *LoopBB = InsPt->getParent(); 1951 1952 if (UseGPRIdxMode) { 1953 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 1954 .addReg(SrcReg, RegState::Undef, SubReg) 1955 .addReg(SrcReg, RegState::Implicit) 1956 .addReg(AMDGPU::M0, RegState::Implicit); 1957 } else { 1958 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 1959 .addReg(SrcReg, RegState::Undef, SubReg) 1960 .addReg(SrcReg, RegState::Implicit); 1961 } 1962 1963 MI.eraseFromParent(); 1964 1965 return LoopBB; 1966 } 1967 1968 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI, 1969 const TargetRegisterClass *VecRC) { 1970 switch (TRI.getRegSizeInBits(*VecRC)) { 1971 case 32: // 4 bytes 1972 return AMDGPU::V_MOVRELD_B32_V1; 1973 case 64: // 8 bytes 1974 return AMDGPU::V_MOVRELD_B32_V2; 1975 case 128: // 16 bytes 1976 return AMDGPU::V_MOVRELD_B32_V4; 1977 case 256: // 32 bytes 1978 return AMDGPU::V_MOVRELD_B32_V8; 1979 case 512: // 64 bytes 1980 return AMDGPU::V_MOVRELD_B32_V16; 1981 default: 1982 llvm_unreachable("unsupported size for MOVRELD pseudos"); 1983 } 1984 } 1985 1986 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, 1987 MachineBasicBlock &MBB, 1988 const SISubtarget &ST) { 1989 const SIInstrInfo *TII = ST.getInstrInfo(); 1990 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1991 MachineFunction *MF = MBB.getParent(); 1992 MachineRegisterInfo &MRI = MF->getRegInfo(); 1993 1994 unsigned Dst = MI.getOperand(0).getReg(); 1995 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); 1996 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 1997 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); 1998 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 1999 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); 2000 2001 // This can be an immediate, but will be folded later. 2002 assert(Val->getReg()); 2003 2004 unsigned SubReg; 2005 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, 2006 SrcVec->getReg(), 2007 Offset); 2008 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode); 2009 2010 if (Idx->getReg() == AMDGPU::NoRegister) { 2011 MachineBasicBlock::iterator I(&MI); 2012 const DebugLoc &DL = MI.getDebugLoc(); 2013 2014 assert(Offset == 0); 2015 2016 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) 2017 .add(*SrcVec) 2018 .add(*Val) 2019 .addImm(SubReg); 2020 2021 MI.eraseFromParent(); 2022 return &MBB; 2023 } 2024 2025 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) { 2026 MachineBasicBlock::iterator I(&MI); 2027 const DebugLoc &DL = MI.getDebugLoc(); 2028 2029 if (UseGPRIdxMode) { 2030 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 2031 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst 2032 .add(*Val) 2033 .addReg(Dst, RegState::ImplicitDefine) 2034 .addReg(SrcVec->getReg(), RegState::Implicit) 2035 .addReg(AMDGPU::M0, RegState::Implicit); 2036 2037 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 2038 } else { 2039 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC)); 2040 2041 BuildMI(MBB, I, DL, MovRelDesc) 2042 .addReg(Dst, RegState::Define) 2043 .addReg(SrcVec->getReg()) 2044 .add(*Val) 2045 .addImm(SubReg - AMDGPU::sub0); 2046 } 2047 2048 MI.eraseFromParent(); 2049 return &MBB; 2050 } 2051 2052 if (Val->isReg()) 2053 MRI.clearKillFlags(Val->getReg()); 2054 2055 const DebugLoc &DL = MI.getDebugLoc(); 2056 2057 if (UseGPRIdxMode) { 2058 MachineBasicBlock::iterator I(&MI); 2059 2060 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 2061 .addImm(0) // Reset inside loop. 2062 .addImm(VGPRIndexMode::DST_ENABLE); 2063 SetOn->getOperand(3).setIsUndef(); 2064 2065 // Disable again after the loop. 2066 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 2067 } 2068 2069 unsigned PhiReg = MRI.createVirtualRegister(VecRC); 2070 2071 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, 2072 Offset, UseGPRIdxMode); 2073 MachineBasicBlock *LoopBB = InsPt->getParent(); 2074 2075 if (UseGPRIdxMode) { 2076 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 2077 .addReg(PhiReg, RegState::Undef, SubReg) // vdst 2078 .add(*Val) // src0 2079 .addReg(Dst, RegState::ImplicitDefine) 2080 .addReg(PhiReg, RegState::Implicit) 2081 .addReg(AMDGPU::M0, RegState::Implicit); 2082 } else { 2083 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC)); 2084 2085 BuildMI(*LoopBB, InsPt, DL, MovRelDesc) 2086 .addReg(Dst, RegState::Define) 2087 .addReg(PhiReg) 2088 .add(*Val) 2089 .addImm(SubReg - AMDGPU::sub0); 2090 } 2091 2092 MI.eraseFromParent(); 2093 2094 return LoopBB; 2095 } 2096 2097 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( 2098 MachineInstr &MI, MachineBasicBlock *BB) const { 2099 2100 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 2101 MachineFunction *MF = BB->getParent(); 2102 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 2103 2104 if (TII->isMIMG(MI)) { 2105 if (!MI.memoperands_empty()) 2106 return BB; 2107 // Add a memoperand for mimg instructions so that they aren't assumed to 2108 // be ordered memory instuctions. 2109 2110 MachinePointerInfo PtrInfo(MFI->getImagePSV()); 2111 MachineMemOperand::Flags Flags = MachineMemOperand::MODereferenceable; 2112 if (MI.mayStore()) 2113 Flags |= MachineMemOperand::MOStore; 2114 2115 if (MI.mayLoad()) 2116 Flags |= MachineMemOperand::MOLoad; 2117 2118 auto MMO = MF->getMachineMemOperand(PtrInfo, Flags, 0, 0); 2119 MI.addMemOperand(*MF, MMO); 2120 return BB; 2121 } 2122 2123 switch (MI.getOpcode()) { 2124 case AMDGPU::SI_INIT_M0: 2125 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), 2126 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 2127 .add(MI.getOperand(0)); 2128 MI.eraseFromParent(); 2129 return BB; 2130 2131 case AMDGPU::SI_INIT_EXEC: 2132 // This should be before all vector instructions. 2133 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64), 2134 AMDGPU::EXEC) 2135 .addImm(MI.getOperand(0).getImm()); 2136 MI.eraseFromParent(); 2137 return BB; 2138 2139 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: { 2140 // Extract the thread count from an SGPR input and set EXEC accordingly. 2141 // Since BFM can't shift by 64, handle that case with CMP + CMOV. 2142 // 2143 // S_BFE_U32 count, input, {shift, 7} 2144 // S_BFM_B64 exec, count, 0 2145 // S_CMP_EQ_U32 count, 64 2146 // S_CMOV_B64 exec, -1 2147 MachineInstr *FirstMI = &*BB->begin(); 2148 MachineRegisterInfo &MRI = MF->getRegInfo(); 2149 unsigned InputReg = MI.getOperand(0).getReg(); 2150 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2151 bool Found = false; 2152 2153 // Move the COPY of the input reg to the beginning, so that we can use it. 2154 for (auto I = BB->begin(); I != &MI; I++) { 2155 if (I->getOpcode() != TargetOpcode::COPY || 2156 I->getOperand(0).getReg() != InputReg) 2157 continue; 2158 2159 if (I == FirstMI) { 2160 FirstMI = &*++BB->begin(); 2161 } else { 2162 I->removeFromParent(); 2163 BB->insert(FirstMI, &*I); 2164 } 2165 Found = true; 2166 break; 2167 } 2168 assert(Found); 2169 (void)Found; 2170 2171 // This should be before all vector instructions. 2172 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg) 2173 .addReg(InputReg) 2174 .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000); 2175 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64), 2176 AMDGPU::EXEC) 2177 .addReg(CountReg) 2178 .addImm(0); 2179 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32)) 2180 .addReg(CountReg, RegState::Kill) 2181 .addImm(64); 2182 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64), 2183 AMDGPU::EXEC) 2184 .addImm(-1); 2185 MI.eraseFromParent(); 2186 return BB; 2187 } 2188 2189 case AMDGPU::GET_GROUPSTATICSIZE: { 2190 DebugLoc DL = MI.getDebugLoc(); 2191 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) 2192 .add(MI.getOperand(0)) 2193 .addImm(MFI->getLDSSize()); 2194 MI.eraseFromParent(); 2195 return BB; 2196 } 2197 case AMDGPU::SI_INDIRECT_SRC_V1: 2198 case AMDGPU::SI_INDIRECT_SRC_V2: 2199 case AMDGPU::SI_INDIRECT_SRC_V4: 2200 case AMDGPU::SI_INDIRECT_SRC_V8: 2201 case AMDGPU::SI_INDIRECT_SRC_V16: 2202 return emitIndirectSrc(MI, *BB, *getSubtarget()); 2203 case AMDGPU::SI_INDIRECT_DST_V1: 2204 case AMDGPU::SI_INDIRECT_DST_V2: 2205 case AMDGPU::SI_INDIRECT_DST_V4: 2206 case AMDGPU::SI_INDIRECT_DST_V8: 2207 case AMDGPU::SI_INDIRECT_DST_V16: 2208 return emitIndirectDst(MI, *BB, *getSubtarget()); 2209 case AMDGPU::SI_KILL: 2210 return splitKillBlock(MI, BB); 2211 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 2212 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 2213 2214 unsigned Dst = MI.getOperand(0).getReg(); 2215 unsigned Src0 = MI.getOperand(1).getReg(); 2216 unsigned Src1 = MI.getOperand(2).getReg(); 2217 const DebugLoc &DL = MI.getDebugLoc(); 2218 unsigned SrcCond = MI.getOperand(3).getReg(); 2219 2220 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2221 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2222 2223 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 2224 .addReg(Src0, 0, AMDGPU::sub0) 2225 .addReg(Src1, 0, AMDGPU::sub0) 2226 .addReg(SrcCond); 2227 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 2228 .addReg(Src0, 0, AMDGPU::sub1) 2229 .addReg(Src1, 0, AMDGPU::sub1) 2230 .addReg(SrcCond); 2231 2232 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) 2233 .addReg(DstLo) 2234 .addImm(AMDGPU::sub0) 2235 .addReg(DstHi) 2236 .addImm(AMDGPU::sub1); 2237 MI.eraseFromParent(); 2238 return BB; 2239 } 2240 case AMDGPU::SI_BR_UNDEF: { 2241 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 2242 const DebugLoc &DL = MI.getDebugLoc(); 2243 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) 2244 .add(MI.getOperand(0)); 2245 Br->getOperand(1).setIsUndef(true); // read undef SCC 2246 MI.eraseFromParent(); 2247 return BB; 2248 } 2249 default: 2250 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 2251 } 2252 } 2253 2254 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { 2255 // This currently forces unfolding various combinations of fsub into fma with 2256 // free fneg'd operands. As long as we have fast FMA (controlled by 2257 // isFMAFasterThanFMulAndFAdd), we should perform these. 2258 2259 // When fma is quarter rate, for f64 where add / sub are at best half rate, 2260 // most of these combines appear to be cycle neutral but save on instruction 2261 // count / code size. 2262 return true; 2263 } 2264 2265 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, 2266 EVT VT) const { 2267 if (!VT.isVector()) { 2268 return MVT::i1; 2269 } 2270 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); 2271 } 2272 2273 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { 2274 // TODO: Should i16 be used always if legal? For now it would force VALU 2275 // shifts. 2276 return (VT == MVT::i16) ? MVT::i16 : MVT::i32; 2277 } 2278 2279 // Answering this is somewhat tricky and depends on the specific device which 2280 // have different rates for fma or all f64 operations. 2281 // 2282 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other 2283 // regardless of which device (although the number of cycles differs between 2284 // devices), so it is always profitable for f64. 2285 // 2286 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable 2287 // only on full rate devices. Normally, we should prefer selecting v_mad_f32 2288 // which we can always do even without fused FP ops since it returns the same 2289 // result as the separate operations and since it is always full 2290 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 2291 // however does not support denormals, so we do report fma as faster if we have 2292 // a fast fma device and require denormals. 2293 // 2294 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 2295 VT = VT.getScalarType(); 2296 2297 switch (VT.getSimpleVT().SimpleTy) { 2298 case MVT::f32: 2299 // This is as fast on some subtargets. However, we always have full rate f32 2300 // mad available which returns the same result as the separate operations 2301 // which we should prefer over fma. We can't use this if we want to support 2302 // denormals, so only report this in these cases. 2303 return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32(); 2304 case MVT::f64: 2305 return true; 2306 case MVT::f16: 2307 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals(); 2308 default: 2309 break; 2310 } 2311 2312 return false; 2313 } 2314 2315 //===----------------------------------------------------------------------===// 2316 // Custom DAG Lowering Operations 2317 //===----------------------------------------------------------------------===// 2318 2319 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 2320 switch (Op.getOpcode()) { 2321 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 2322 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 2323 case ISD::LOAD: { 2324 SDValue Result = LowerLOAD(Op, DAG); 2325 assert((!Result.getNode() || 2326 Result.getNode()->getNumValues() == 2) && 2327 "Load should return a value and a chain"); 2328 return Result; 2329 } 2330 2331 case ISD::FSIN: 2332 case ISD::FCOS: 2333 return LowerTrig(Op, DAG); 2334 case ISD::SELECT: return LowerSELECT(Op, DAG); 2335 case ISD::FDIV: return LowerFDIV(Op, DAG); 2336 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); 2337 case ISD::STORE: return LowerSTORE(Op, DAG); 2338 case ISD::GlobalAddress: { 2339 MachineFunction &MF = DAG.getMachineFunction(); 2340 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 2341 return LowerGlobalAddress(MFI, Op, DAG); 2342 } 2343 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 2344 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 2345 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 2346 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); 2347 case ISD::INSERT_VECTOR_ELT: 2348 return lowerINSERT_VECTOR_ELT(Op, DAG); 2349 case ISD::EXTRACT_VECTOR_ELT: 2350 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 2351 case ISD::FP_ROUND: 2352 return lowerFP_ROUND(Op, DAG); 2353 2354 case ISD::TRAP: 2355 case ISD::DEBUGTRAP: 2356 return lowerTRAP(Op, DAG); 2357 } 2358 return SDValue(); 2359 } 2360 2361 void SITargetLowering::ReplaceNodeResults(SDNode *N, 2362 SmallVectorImpl<SDValue> &Results, 2363 SelectionDAG &DAG) const { 2364 switch (N->getOpcode()) { 2365 case ISD::INSERT_VECTOR_ELT: { 2366 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) 2367 Results.push_back(Res); 2368 return; 2369 } 2370 case ISD::EXTRACT_VECTOR_ELT: { 2371 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) 2372 Results.push_back(Res); 2373 return; 2374 } 2375 case ISD::INTRINSIC_WO_CHAIN: { 2376 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 2377 switch (IID) { 2378 case Intrinsic::amdgcn_cvt_pkrtz: { 2379 SDValue Src0 = N->getOperand(1); 2380 SDValue Src1 = N->getOperand(2); 2381 SDLoc SL(N); 2382 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32, 2383 Src0, Src1); 2384 2385 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt)); 2386 return; 2387 } 2388 default: 2389 break; 2390 } 2391 } 2392 case ISD::SELECT: { 2393 SDLoc SL(N); 2394 EVT VT = N->getValueType(0); 2395 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 2396 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1)); 2397 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2)); 2398 2399 EVT SelectVT = NewVT; 2400 if (NewVT.bitsLT(MVT::i32)) { 2401 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS); 2402 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS); 2403 SelectVT = MVT::i32; 2404 } 2405 2406 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT, 2407 N->getOperand(0), LHS, RHS); 2408 2409 if (NewVT != SelectVT) 2410 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect); 2411 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect)); 2412 return; 2413 } 2414 default: 2415 break; 2416 } 2417 } 2418 2419 /// \brief Helper function for LowerBRCOND 2420 static SDNode *findUser(SDValue Value, unsigned Opcode) { 2421 2422 SDNode *Parent = Value.getNode(); 2423 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); 2424 I != E; ++I) { 2425 2426 if (I.getUse().get() != Value) 2427 continue; 2428 2429 if (I->getOpcode() == Opcode) 2430 return *I; 2431 } 2432 return nullptr; 2433 } 2434 2435 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { 2436 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 2437 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { 2438 case Intrinsic::amdgcn_if: 2439 return AMDGPUISD::IF; 2440 case Intrinsic::amdgcn_else: 2441 return AMDGPUISD::ELSE; 2442 case Intrinsic::amdgcn_loop: 2443 return AMDGPUISD::LOOP; 2444 case Intrinsic::amdgcn_end_cf: 2445 llvm_unreachable("should not occur"); 2446 default: 2447 return 0; 2448 } 2449 } 2450 2451 // break, if_break, else_break are all only used as inputs to loop, not 2452 // directly as branch conditions. 2453 return 0; 2454 } 2455 2456 void SITargetLowering::createDebuggerPrologueStackObjects( 2457 MachineFunction &MF) const { 2458 // Create stack objects that are used for emitting debugger prologue. 2459 // 2460 // Debugger prologue writes work group IDs and work item IDs to scratch memory 2461 // at fixed location in the following format: 2462 // offset 0: work group ID x 2463 // offset 4: work group ID y 2464 // offset 8: work group ID z 2465 // offset 16: work item ID x 2466 // offset 20: work item ID y 2467 // offset 24: work item ID z 2468 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2469 int ObjectIdx = 0; 2470 2471 // For each dimension: 2472 for (unsigned i = 0; i < 3; ++i) { 2473 // Create fixed stack object for work group ID. 2474 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true); 2475 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx); 2476 // Create fixed stack object for work item ID. 2477 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true); 2478 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx); 2479 } 2480 } 2481 2482 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { 2483 const Triple &TT = getTargetMachine().getTargetTriple(); 2484 return GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS && 2485 AMDGPU::shouldEmitConstantsToTextSection(TT); 2486 } 2487 2488 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { 2489 return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS || 2490 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) && 2491 !shouldEmitFixup(GV) && 2492 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 2493 } 2494 2495 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { 2496 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); 2497 } 2498 2499 /// This transforms the control flow intrinsics to get the branch destination as 2500 /// last parameter, also switches branch target with BR if the need arise 2501 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, 2502 SelectionDAG &DAG) const { 2503 SDLoc DL(BRCOND); 2504 2505 SDNode *Intr = BRCOND.getOperand(1).getNode(); 2506 SDValue Target = BRCOND.getOperand(2); 2507 SDNode *BR = nullptr; 2508 SDNode *SetCC = nullptr; 2509 2510 if (Intr->getOpcode() == ISD::SETCC) { 2511 // As long as we negate the condition everything is fine 2512 SetCC = Intr; 2513 Intr = SetCC->getOperand(0).getNode(); 2514 2515 } else { 2516 // Get the target from BR if we don't negate the condition 2517 BR = findUser(BRCOND, ISD::BR); 2518 Target = BR->getOperand(1); 2519 } 2520 2521 // FIXME: This changes the types of the intrinsics instead of introducing new 2522 // nodes with the correct types. 2523 // e.g. llvm.amdgcn.loop 2524 2525 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3 2526 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088> 2527 2528 unsigned CFNode = isCFIntrinsic(Intr); 2529 if (CFNode == 0) { 2530 // This is a uniform branch so we don't need to legalize. 2531 return BRCOND; 2532 } 2533 2534 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || 2535 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; 2536 2537 assert(!SetCC || 2538 (SetCC->getConstantOperandVal(1) == 1 && 2539 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == 2540 ISD::SETNE)); 2541 2542 // operands of the new intrinsic call 2543 SmallVector<SDValue, 4> Ops; 2544 if (HaveChain) 2545 Ops.push_back(BRCOND.getOperand(0)); 2546 2547 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end()); 2548 Ops.push_back(Target); 2549 2550 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); 2551 2552 // build the new intrinsic call 2553 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode(); 2554 2555 if (!HaveChain) { 2556 SDValue Ops[] = { 2557 SDValue(Result, 0), 2558 BRCOND.getOperand(0) 2559 }; 2560 2561 Result = DAG.getMergeValues(Ops, DL).getNode(); 2562 } 2563 2564 if (BR) { 2565 // Give the branch instruction our target 2566 SDValue Ops[] = { 2567 BR->getOperand(0), 2568 BRCOND.getOperand(2) 2569 }; 2570 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); 2571 DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); 2572 BR = NewBR.getNode(); 2573 } 2574 2575 SDValue Chain = SDValue(Result, Result->getNumValues() - 1); 2576 2577 // Copy the intrinsic results to registers 2578 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { 2579 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); 2580 if (!CopyToReg) 2581 continue; 2582 2583 Chain = DAG.getCopyToReg( 2584 Chain, DL, 2585 CopyToReg->getOperand(1), 2586 SDValue(Result, i - 1), 2587 SDValue()); 2588 2589 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); 2590 } 2591 2592 // Remove the old intrinsic from the chain 2593 DAG.ReplaceAllUsesOfValueWith( 2594 SDValue(Intr, Intr->getNumValues() - 1), 2595 Intr->getOperand(0)); 2596 2597 return Chain; 2598 } 2599 2600 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG, 2601 SDValue Op, 2602 const SDLoc &DL, 2603 EVT VT) const { 2604 return Op.getValueType().bitsLE(VT) ? 2605 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : 2606 DAG.getNode(ISD::FTRUNC, DL, VT, Op); 2607 } 2608 2609 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 2610 assert(Op.getValueType() == MVT::f16 && 2611 "Do not know how to custom lower FP_ROUND for non-f16 type"); 2612 2613 SDValue Src = Op.getOperand(0); 2614 EVT SrcVT = Src.getValueType(); 2615 if (SrcVT != MVT::f64) 2616 return Op; 2617 2618 SDLoc DL(Op); 2619 2620 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); 2621 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); 2622 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc); 2623 } 2624 2625 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { 2626 SDLoc SL(Op); 2627 MachineFunction &MF = DAG.getMachineFunction(); 2628 SDValue Chain = Op.getOperand(0); 2629 2630 unsigned TrapID = Op.getOpcode() == ISD::DEBUGTRAP ? 2631 SISubtarget::TrapIDLLVMDebugTrap : SISubtarget::TrapIDLLVMTrap; 2632 2633 if (Subtarget->getTrapHandlerAbi() == SISubtarget::TrapHandlerAbiHsa && 2634 Subtarget->isTrapHandlerEnabled()) { 2635 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2636 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 2637 assert(UserSGPR != AMDGPU::NoRegister); 2638 2639 SDValue QueuePtr = CreateLiveInRegister( 2640 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 2641 2642 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64); 2643 2644 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01, 2645 QueuePtr, SDValue()); 2646 2647 SDValue Ops[] = { 2648 ToReg, 2649 DAG.getTargetConstant(TrapID, SL, MVT::i16), 2650 SGPR01, 2651 ToReg.getValue(1) 2652 }; 2653 2654 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); 2655 } 2656 2657 switch (TrapID) { 2658 case SISubtarget::TrapIDLLVMTrap: 2659 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain); 2660 case SISubtarget::TrapIDLLVMDebugTrap: { 2661 DiagnosticInfoUnsupported NoTrap(*MF.getFunction(), 2662 "debugtrap handler not supported", 2663 Op.getDebugLoc(), 2664 DS_Warning); 2665 LLVMContext &Ctx = MF.getFunction()->getContext(); 2666 Ctx.diagnose(NoTrap); 2667 return Chain; 2668 } 2669 default: 2670 llvm_unreachable("unsupported trap handler type!"); 2671 } 2672 2673 return Chain; 2674 } 2675 2676 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL, 2677 SelectionDAG &DAG) const { 2678 // FIXME: Use inline constants (src_{shared, private}_base) instead. 2679 if (Subtarget->hasApertureRegs()) { 2680 unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ? 2681 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE : 2682 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE; 2683 unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ? 2684 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE : 2685 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE; 2686 unsigned Encoding = 2687 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ | 2688 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ | 2689 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_; 2690 2691 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16); 2692 SDValue ApertureReg = SDValue( 2693 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0); 2694 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32); 2695 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount); 2696 } 2697 2698 MachineFunction &MF = DAG.getMachineFunction(); 2699 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2700 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 2701 assert(UserSGPR != AMDGPU::NoRegister); 2702 2703 SDValue QueuePtr = CreateLiveInRegister( 2704 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 2705 2706 // Offset into amd_queue_t for group_segment_aperture_base_hi / 2707 // private_segment_aperture_base_hi. 2708 uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44; 2709 2710 SDValue Ptr = DAG.getNode(ISD::ADD, DL, MVT::i64, QueuePtr, 2711 DAG.getConstant(StructOffset, DL, MVT::i64)); 2712 2713 // TODO: Use custom target PseudoSourceValue. 2714 // TODO: We should use the value from the IR intrinsic call, but it might not 2715 // be available and how do we get it? 2716 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()), 2717 AMDGPUASI.CONSTANT_ADDRESS)); 2718 2719 MachinePointerInfo PtrInfo(V, StructOffset); 2720 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo, 2721 MinAlign(64, StructOffset), 2722 MachineMemOperand::MODereferenceable | 2723 MachineMemOperand::MOInvariant); 2724 } 2725 2726 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, 2727 SelectionDAG &DAG) const { 2728 SDLoc SL(Op); 2729 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); 2730 2731 SDValue Src = ASC->getOperand(0); 2732 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); 2733 2734 const AMDGPUTargetMachine &TM = 2735 static_cast<const AMDGPUTargetMachine &>(getTargetMachine()); 2736 2737 // flat -> local/private 2738 if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) { 2739 unsigned DestAS = ASC->getDestAddressSpace(); 2740 2741 if (DestAS == AMDGPUASI.LOCAL_ADDRESS || 2742 DestAS == AMDGPUASI.PRIVATE_ADDRESS) { 2743 unsigned NullVal = TM.getNullPointerValue(DestAS); 2744 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 2745 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); 2746 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); 2747 2748 return DAG.getNode(ISD::SELECT, SL, MVT::i32, 2749 NonNull, Ptr, SegmentNullPtr); 2750 } 2751 } 2752 2753 // local/private -> flat 2754 if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) { 2755 unsigned SrcAS = ASC->getSrcAddressSpace(); 2756 2757 if (SrcAS == AMDGPUASI.LOCAL_ADDRESS || 2758 SrcAS == AMDGPUASI.PRIVATE_ADDRESS) { 2759 unsigned NullVal = TM.getNullPointerValue(SrcAS); 2760 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 2761 2762 SDValue NonNull 2763 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); 2764 2765 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG); 2766 SDValue CvtPtr 2767 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); 2768 2769 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, 2770 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), 2771 FlatNullPtr); 2772 } 2773 } 2774 2775 // global <-> flat are no-ops and never emitted. 2776 2777 const MachineFunction &MF = DAG.getMachineFunction(); 2778 DiagnosticInfoUnsupported InvalidAddrSpaceCast( 2779 *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); 2780 DAG.getContext()->diagnose(InvalidAddrSpaceCast); 2781 2782 return DAG.getUNDEF(ASC->getValueType(0)); 2783 } 2784 2785 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 2786 SelectionDAG &DAG) const { 2787 SDValue Idx = Op.getOperand(2); 2788 if (isa<ConstantSDNode>(Idx)) 2789 return SDValue(); 2790 2791 // Avoid stack access for dynamic indexing. 2792 SDLoc SL(Op); 2793 SDValue Vec = Op.getOperand(0); 2794 SDValue Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Op.getOperand(1)); 2795 2796 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec 2797 SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Val); 2798 2799 // Convert vector index to bit-index. 2800 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, 2801 DAG.getConstant(16, SL, MVT::i32)); 2802 2803 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 2804 2805 SDValue BFM = DAG.getNode(ISD::SHL, SL, MVT::i32, 2806 DAG.getConstant(0xffff, SL, MVT::i32), 2807 ScaledIdx); 2808 2809 SDValue LHS = DAG.getNode(ISD::AND, SL, MVT::i32, BFM, ExtVal); 2810 SDValue RHS = DAG.getNode(ISD::AND, SL, MVT::i32, 2811 DAG.getNOT(SL, BFM, MVT::i32), BCVec); 2812 2813 SDValue BFI = DAG.getNode(ISD::OR, SL, MVT::i32, LHS, RHS); 2814 return DAG.getNode(ISD::BITCAST, SL, Op.getValueType(), BFI); 2815 } 2816 2817 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 2818 SelectionDAG &DAG) const { 2819 SDLoc SL(Op); 2820 2821 EVT ResultVT = Op.getValueType(); 2822 SDValue Vec = Op.getOperand(0); 2823 SDValue Idx = Op.getOperand(1); 2824 2825 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); 2826 2827 // Make sure we we do any optimizations that will make it easier to fold 2828 // source modifiers before obscuring it with bit operations. 2829 2830 // XXX - Why doesn't this get called when vector_shuffle is expanded? 2831 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI)) 2832 return Combined; 2833 2834 if (const ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) { 2835 SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 2836 2837 if (CIdx->getZExtValue() == 1) { 2838 Result = DAG.getNode(ISD::SRL, SL, MVT::i32, Result, 2839 DAG.getConstant(16, SL, MVT::i32)); 2840 } else { 2841 assert(CIdx->getZExtValue() == 0); 2842 } 2843 2844 if (ResultVT.bitsLT(MVT::i32)) 2845 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result); 2846 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 2847 } 2848 2849 SDValue Sixteen = DAG.getConstant(16, SL, MVT::i32); 2850 2851 // Convert vector index to bit-index. 2852 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, Sixteen); 2853 2854 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 2855 SDValue Elt = DAG.getNode(ISD::SRL, SL, MVT::i32, BC, ScaledIdx); 2856 2857 SDValue Result = Elt; 2858 if (ResultVT.bitsLT(MVT::i32)) 2859 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result); 2860 2861 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 2862 } 2863 2864 bool 2865 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 2866 // We can fold offsets for anything that doesn't require a GOT relocation. 2867 return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS || 2868 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) && 2869 !shouldEmitGOTReloc(GA->getGlobal()); 2870 } 2871 2872 static SDValue 2873 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, 2874 const SDLoc &DL, unsigned Offset, EVT PtrVT, 2875 unsigned GAFlags = SIInstrInfo::MO_NONE) { 2876 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is 2877 // lowered to the following code sequence: 2878 // 2879 // For constant address space: 2880 // s_getpc_b64 s[0:1] 2881 // s_add_u32 s0, s0, $symbol 2882 // s_addc_u32 s1, s1, 0 2883 // 2884 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 2885 // a fixup or relocation is emitted to replace $symbol with a literal 2886 // constant, which is a pc-relative offset from the encoding of the $symbol 2887 // operand to the global variable. 2888 // 2889 // For global address space: 2890 // s_getpc_b64 s[0:1] 2891 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo 2892 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi 2893 // 2894 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 2895 // fixups or relocations are emitted to replace $symbol@*@lo and 2896 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, 2897 // which is a 64-bit pc-relative offset from the encoding of the $symbol 2898 // operand to the global variable. 2899 // 2900 // What we want here is an offset from the value returned by s_getpc 2901 // (which is the address of the s_add_u32 instruction) to the global 2902 // variable, but since the encoding of $symbol starts 4 bytes after the start 2903 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too 2904 // small. This requires us to add 4 to the global variable offset in order to 2905 // compute the correct address. 2906 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 2907 GAFlags); 2908 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 2909 GAFlags == SIInstrInfo::MO_NONE ? 2910 GAFlags : GAFlags + 1); 2911 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); 2912 } 2913 2914 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 2915 SDValue Op, 2916 SelectionDAG &DAG) const { 2917 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 2918 2919 if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS && 2920 GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS) 2921 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 2922 2923 SDLoc DL(GSD); 2924 const GlobalValue *GV = GSD->getGlobal(); 2925 EVT PtrVT = Op.getValueType(); 2926 2927 if (shouldEmitFixup(GV)) 2928 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); 2929 else if (shouldEmitPCReloc(GV)) 2930 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, 2931 SIInstrInfo::MO_REL32); 2932 2933 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, 2934 SIInstrInfo::MO_GOTPCREL32); 2935 2936 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); 2937 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS); 2938 const DataLayout &DataLayout = DAG.getDataLayout(); 2939 unsigned Align = DataLayout.getABITypeAlignment(PtrTy); 2940 // FIXME: Use a PseudoSourceValue once those can be assigned an address space. 2941 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 2942 2943 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align, 2944 MachineMemOperand::MODereferenceable | 2945 MachineMemOperand::MOInvariant); 2946 } 2947 2948 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, 2949 const SDLoc &DL, SDValue V) const { 2950 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as 2951 // the destination register. 2952 // 2953 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, 2954 // so we will end up with redundant moves to m0. 2955 // 2956 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. 2957 2958 // A Null SDValue creates a glue result. 2959 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, 2960 V, Chain); 2961 return SDValue(M0, 0); 2962 } 2963 2964 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, 2965 SDValue Op, 2966 MVT VT, 2967 unsigned Offset) const { 2968 SDLoc SL(Op); 2969 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL, 2970 DAG.getEntryNode(), Offset, false); 2971 // The local size values will have the hi 16-bits as zero. 2972 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, 2973 DAG.getValueType(VT)); 2974 } 2975 2976 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 2977 EVT VT) { 2978 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), 2979 "non-hsa intrinsic with hsa target", 2980 DL.getDebugLoc()); 2981 DAG.getContext()->diagnose(BadIntrin); 2982 return DAG.getUNDEF(VT); 2983 } 2984 2985 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 2986 EVT VT) { 2987 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), 2988 "intrinsic not supported on subtarget", 2989 DL.getDebugLoc()); 2990 DAG.getContext()->diagnose(BadIntrin); 2991 return DAG.getUNDEF(VT); 2992 } 2993 2994 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 2995 SelectionDAG &DAG) const { 2996 MachineFunction &MF = DAG.getMachineFunction(); 2997 auto MFI = MF.getInfo<SIMachineFunctionInfo>(); 2998 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2999 3000 EVT VT = Op.getValueType(); 3001 SDLoc DL(Op); 3002 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3003 3004 // TODO: Should this propagate fast-math-flags? 3005 3006 switch (IntrinsicID) { 3007 case Intrinsic::amdgcn_implicit_buffer_ptr: { 3008 if (getSubtarget()->isAmdCodeObjectV2(MF)) 3009 return emitNonHSAIntrinsicError(DAG, DL, VT); 3010 3011 unsigned Reg = TRI->getPreloadedValue(MF, 3012 SIRegisterInfo::IMPLICIT_BUFFER_PTR); 3013 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 3014 } 3015 case Intrinsic::amdgcn_dispatch_ptr: 3016 case Intrinsic::amdgcn_queue_ptr: { 3017 if (!Subtarget->isAmdCodeObjectV2(MF)) { 3018 DiagnosticInfoUnsupported BadIntrin( 3019 *MF.getFunction(), "unsupported hsa intrinsic without hsa target", 3020 DL.getDebugLoc()); 3021 DAG.getContext()->diagnose(BadIntrin); 3022 return DAG.getUNDEF(VT); 3023 } 3024 3025 auto Reg = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? 3026 SIRegisterInfo::DISPATCH_PTR : SIRegisterInfo::QUEUE_PTR; 3027 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, 3028 TRI->getPreloadedValue(MF, Reg), VT); 3029 } 3030 case Intrinsic::amdgcn_implicitarg_ptr: { 3031 unsigned offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT); 3032 return lowerKernArgParameterPtr(DAG, DL, DAG.getEntryNode(), offset); 3033 } 3034 case Intrinsic::amdgcn_kernarg_segment_ptr: { 3035 unsigned Reg 3036 = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 3037 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 3038 } 3039 case Intrinsic::amdgcn_dispatch_id: { 3040 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::DISPATCH_ID); 3041 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); 3042 } 3043 case Intrinsic::amdgcn_rcp: 3044 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); 3045 case Intrinsic::amdgcn_rsq: 3046 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 3047 case Intrinsic::amdgcn_rsq_legacy: 3048 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 3049 return emitRemovedIntrinsicError(DAG, DL, VT); 3050 3051 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); 3052 case Intrinsic::amdgcn_rcp_legacy: 3053 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 3054 return emitRemovedIntrinsicError(DAG, DL, VT); 3055 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); 3056 case Intrinsic::amdgcn_rsq_clamp: { 3057 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 3058 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 3059 3060 Type *Type = VT.getTypeForEVT(*DAG.getContext()); 3061 APFloat Max = APFloat::getLargest(Type->getFltSemantics()); 3062 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); 3063 3064 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 3065 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, 3066 DAG.getConstantFP(Max, DL, VT)); 3067 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, 3068 DAG.getConstantFP(Min, DL, VT)); 3069 } 3070 case Intrinsic::r600_read_ngroups_x: 3071 if (Subtarget->isAmdHsaOS()) 3072 return emitNonHSAIntrinsicError(DAG, DL, VT); 3073 3074 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 3075 SI::KernelInputOffsets::NGROUPS_X, false); 3076 case Intrinsic::r600_read_ngroups_y: 3077 if (Subtarget->isAmdHsaOS()) 3078 return emitNonHSAIntrinsicError(DAG, DL, VT); 3079 3080 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 3081 SI::KernelInputOffsets::NGROUPS_Y, false); 3082 case Intrinsic::r600_read_ngroups_z: 3083 if (Subtarget->isAmdHsaOS()) 3084 return emitNonHSAIntrinsicError(DAG, DL, VT); 3085 3086 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 3087 SI::KernelInputOffsets::NGROUPS_Z, false); 3088 case Intrinsic::r600_read_global_size_x: 3089 if (Subtarget->isAmdHsaOS()) 3090 return emitNonHSAIntrinsicError(DAG, DL, VT); 3091 3092 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 3093 SI::KernelInputOffsets::GLOBAL_SIZE_X, false); 3094 case Intrinsic::r600_read_global_size_y: 3095 if (Subtarget->isAmdHsaOS()) 3096 return emitNonHSAIntrinsicError(DAG, DL, VT); 3097 3098 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 3099 SI::KernelInputOffsets::GLOBAL_SIZE_Y, false); 3100 case Intrinsic::r600_read_global_size_z: 3101 if (Subtarget->isAmdHsaOS()) 3102 return emitNonHSAIntrinsicError(DAG, DL, VT); 3103 3104 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 3105 SI::KernelInputOffsets::GLOBAL_SIZE_Z, false); 3106 case Intrinsic::r600_read_local_size_x: 3107 if (Subtarget->isAmdHsaOS()) 3108 return emitNonHSAIntrinsicError(DAG, DL, VT); 3109 3110 return lowerImplicitZextParam(DAG, Op, MVT::i16, 3111 SI::KernelInputOffsets::LOCAL_SIZE_X); 3112 case Intrinsic::r600_read_local_size_y: 3113 if (Subtarget->isAmdHsaOS()) 3114 return emitNonHSAIntrinsicError(DAG, DL, VT); 3115 3116 return lowerImplicitZextParam(DAG, Op, MVT::i16, 3117 SI::KernelInputOffsets::LOCAL_SIZE_Y); 3118 case Intrinsic::r600_read_local_size_z: 3119 if (Subtarget->isAmdHsaOS()) 3120 return emitNonHSAIntrinsicError(DAG, DL, VT); 3121 3122 return lowerImplicitZextParam(DAG, Op, MVT::i16, 3123 SI::KernelInputOffsets::LOCAL_SIZE_Z); 3124 case Intrinsic::amdgcn_workgroup_id_x: 3125 case Intrinsic::r600_read_tgid_x: 3126 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, 3127 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_X), VT); 3128 case Intrinsic::amdgcn_workgroup_id_y: 3129 case Intrinsic::r600_read_tgid_y: 3130 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, 3131 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Y), VT); 3132 case Intrinsic::amdgcn_workgroup_id_z: 3133 case Intrinsic::r600_read_tgid_z: 3134 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, 3135 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Z), VT); 3136 case Intrinsic::amdgcn_workitem_id_x: 3137 case Intrinsic::r600_read_tidig_x: 3138 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 3139 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X), VT); 3140 case Intrinsic::amdgcn_workitem_id_y: 3141 case Intrinsic::r600_read_tidig_y: 3142 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 3143 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y), VT); 3144 case Intrinsic::amdgcn_workitem_id_z: 3145 case Intrinsic::r600_read_tidig_z: 3146 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 3147 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z), VT); 3148 case AMDGPUIntrinsic::SI_load_const: { 3149 SDValue Ops[] = { 3150 Op.getOperand(1), 3151 Op.getOperand(2) 3152 }; 3153 3154 MachineMemOperand *MMO = MF.getMachineMemOperand( 3155 MachinePointerInfo(), 3156 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 3157 MachineMemOperand::MOInvariant, 3158 VT.getStoreSize(), 4); 3159 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL, 3160 Op->getVTList(), Ops, VT, MMO); 3161 } 3162 case Intrinsic::amdgcn_fdiv_fast: 3163 return lowerFDIV_FAST(Op, DAG); 3164 case Intrinsic::amdgcn_interp_mov: { 3165 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 3166 SDValue Glue = M0.getValue(1); 3167 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1), 3168 Op.getOperand(2), Op.getOperand(3), Glue); 3169 } 3170 case Intrinsic::amdgcn_interp_p1: { 3171 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 3172 SDValue Glue = M0.getValue(1); 3173 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), 3174 Op.getOperand(2), Op.getOperand(3), Glue); 3175 } 3176 case Intrinsic::amdgcn_interp_p2: { 3177 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 3178 SDValue Glue = SDValue(M0.getNode(), 1); 3179 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), 3180 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), 3181 Glue); 3182 } 3183 case Intrinsic::amdgcn_sin: 3184 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); 3185 3186 case Intrinsic::amdgcn_cos: 3187 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); 3188 3189 case Intrinsic::amdgcn_log_clamp: { 3190 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 3191 return SDValue(); 3192 3193 DiagnosticInfoUnsupported BadIntrin( 3194 *MF.getFunction(), "intrinsic not supported on subtarget", 3195 DL.getDebugLoc()); 3196 DAG.getContext()->diagnose(BadIntrin); 3197 return DAG.getUNDEF(VT); 3198 } 3199 case Intrinsic::amdgcn_ldexp: 3200 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, 3201 Op.getOperand(1), Op.getOperand(2)); 3202 3203 case Intrinsic::amdgcn_fract: 3204 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); 3205 3206 case Intrinsic::amdgcn_class: 3207 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, 3208 Op.getOperand(1), Op.getOperand(2)); 3209 case Intrinsic::amdgcn_div_fmas: 3210 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, 3211 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 3212 Op.getOperand(4)); 3213 3214 case Intrinsic::amdgcn_div_fixup: 3215 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, 3216 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3217 3218 case Intrinsic::amdgcn_trig_preop: 3219 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, 3220 Op.getOperand(1), Op.getOperand(2)); 3221 case Intrinsic::amdgcn_div_scale: { 3222 // 3rd parameter required to be a constant. 3223 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 3224 if (!Param) 3225 return DAG.getUNDEF(VT); 3226 3227 // Translate to the operands expected by the machine instruction. The 3228 // first parameter must be the same as the first instruction. 3229 SDValue Numerator = Op.getOperand(1); 3230 SDValue Denominator = Op.getOperand(2); 3231 3232 // Note this order is opposite of the machine instruction's operations, 3233 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The 3234 // intrinsic has the numerator as the first operand to match a normal 3235 // division operation. 3236 3237 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; 3238 3239 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, 3240 Denominator, Numerator); 3241 } 3242 case Intrinsic::amdgcn_icmp: { 3243 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 3244 if (!CD) 3245 return DAG.getUNDEF(VT); 3246 3247 int CondCode = CD->getSExtValue(); 3248 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || 3249 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE) 3250 return DAG.getUNDEF(VT); 3251 3252 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); 3253 ISD::CondCode CCOpcode = getICmpCondCode(IcInput); 3254 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 3255 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 3256 } 3257 case Intrinsic::amdgcn_fcmp: { 3258 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 3259 if (!CD) 3260 return DAG.getUNDEF(VT); 3261 3262 int CondCode = CD->getSExtValue(); 3263 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE || 3264 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) 3265 return DAG.getUNDEF(VT); 3266 3267 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); 3268 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); 3269 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 3270 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 3271 } 3272 case Intrinsic::amdgcn_fmed3: 3273 return DAG.getNode(AMDGPUISD::FMED3, DL, VT, 3274 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3275 case Intrinsic::amdgcn_fmul_legacy: 3276 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, 3277 Op.getOperand(1), Op.getOperand(2)); 3278 case Intrinsic::amdgcn_sffbh: 3279 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); 3280 case Intrinsic::amdgcn_sbfe: 3281 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, 3282 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3283 case Intrinsic::amdgcn_ubfe: 3284 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, 3285 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3286 case Intrinsic::amdgcn_cvt_pkrtz: { 3287 // FIXME: Stop adding cast if v2f16 legal. 3288 EVT VT = Op.getValueType(); 3289 SDValue Node = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, DL, MVT::i32, 3290 Op.getOperand(1), Op.getOperand(2)); 3291 return DAG.getNode(ISD::BITCAST, DL, VT, Node); 3292 } 3293 default: 3294 return Op; 3295 } 3296 } 3297 3298 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 3299 SelectionDAG &DAG) const { 3300 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 3301 SDLoc DL(Op); 3302 MachineFunction &MF = DAG.getMachineFunction(); 3303 3304 switch (IntrID) { 3305 case Intrinsic::amdgcn_atomic_inc: 3306 case Intrinsic::amdgcn_atomic_dec: { 3307 MemSDNode *M = cast<MemSDNode>(Op); 3308 unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ? 3309 AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC; 3310 SDValue Ops[] = { 3311 M->getOperand(0), // Chain 3312 M->getOperand(2), // Ptr 3313 M->getOperand(3) // Value 3314 }; 3315 3316 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, 3317 M->getMemoryVT(), M->getMemOperand()); 3318 } 3319 case Intrinsic::amdgcn_buffer_load: 3320 case Intrinsic::amdgcn_buffer_load_format: { 3321 SDValue Ops[] = { 3322 Op.getOperand(0), // Chain 3323 Op.getOperand(2), // rsrc 3324 Op.getOperand(3), // vindex 3325 Op.getOperand(4), // offset 3326 Op.getOperand(5), // glc 3327 Op.getOperand(6) // slc 3328 }; 3329 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 3330 3331 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? 3332 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; 3333 EVT VT = Op.getValueType(); 3334 EVT IntVT = VT.changeTypeToInteger(); 3335 3336 MachineMemOperand *MMO = MF.getMachineMemOperand( 3337 MachinePointerInfo(MFI->getBufferPSV()), 3338 MachineMemOperand::MOLoad, 3339 VT.getStoreSize(), VT.getStoreSize()); 3340 3341 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, MMO); 3342 } 3343 case Intrinsic::amdgcn_tbuffer_load: { 3344 SDValue Ops[] = { 3345 Op.getOperand(0), // Chain 3346 Op.getOperand(2), // rsrc 3347 Op.getOperand(3), // vindex 3348 Op.getOperand(4), // voffset 3349 Op.getOperand(5), // soffset 3350 Op.getOperand(6), // offset 3351 Op.getOperand(7), // dfmt 3352 Op.getOperand(8), // nfmt 3353 Op.getOperand(9), // glc 3354 Op.getOperand(10) // slc 3355 }; 3356 3357 EVT VT = Op.getOperand(2).getValueType(); 3358 3359 MachineMemOperand *MMO = MF.getMachineMemOperand( 3360 MachinePointerInfo(), 3361 MachineMemOperand::MOLoad, 3362 VT.getStoreSize(), VT.getStoreSize()); 3363 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, 3364 Op->getVTList(), Ops, VT, MMO); 3365 } 3366 // Basic sample. 3367 case Intrinsic::amdgcn_image_sample: 3368 case Intrinsic::amdgcn_image_sample_cl: 3369 case Intrinsic::amdgcn_image_sample_d: 3370 case Intrinsic::amdgcn_image_sample_d_cl: 3371 case Intrinsic::amdgcn_image_sample_l: 3372 case Intrinsic::amdgcn_image_sample_b: 3373 case Intrinsic::amdgcn_image_sample_b_cl: 3374 case Intrinsic::amdgcn_image_sample_lz: 3375 case Intrinsic::amdgcn_image_sample_cd: 3376 case Intrinsic::amdgcn_image_sample_cd_cl: 3377 3378 // Sample with comparison. 3379 case Intrinsic::amdgcn_image_sample_c: 3380 case Intrinsic::amdgcn_image_sample_c_cl: 3381 case Intrinsic::amdgcn_image_sample_c_d: 3382 case Intrinsic::amdgcn_image_sample_c_d_cl: 3383 case Intrinsic::amdgcn_image_sample_c_l: 3384 case Intrinsic::amdgcn_image_sample_c_b: 3385 case Intrinsic::amdgcn_image_sample_c_b_cl: 3386 case Intrinsic::amdgcn_image_sample_c_lz: 3387 case Intrinsic::amdgcn_image_sample_c_cd: 3388 case Intrinsic::amdgcn_image_sample_c_cd_cl: 3389 3390 // Sample with offsets. 3391 case Intrinsic::amdgcn_image_sample_o: 3392 case Intrinsic::amdgcn_image_sample_cl_o: 3393 case Intrinsic::amdgcn_image_sample_d_o: 3394 case Intrinsic::amdgcn_image_sample_d_cl_o: 3395 case Intrinsic::amdgcn_image_sample_l_o: 3396 case Intrinsic::amdgcn_image_sample_b_o: 3397 case Intrinsic::amdgcn_image_sample_b_cl_o: 3398 case Intrinsic::amdgcn_image_sample_lz_o: 3399 case Intrinsic::amdgcn_image_sample_cd_o: 3400 case Intrinsic::amdgcn_image_sample_cd_cl_o: 3401 3402 // Sample with comparison and offsets. 3403 case Intrinsic::amdgcn_image_sample_c_o: 3404 case Intrinsic::amdgcn_image_sample_c_cl_o: 3405 case Intrinsic::amdgcn_image_sample_c_d_o: 3406 case Intrinsic::amdgcn_image_sample_c_d_cl_o: 3407 case Intrinsic::amdgcn_image_sample_c_l_o: 3408 case Intrinsic::amdgcn_image_sample_c_b_o: 3409 case Intrinsic::amdgcn_image_sample_c_b_cl_o: 3410 case Intrinsic::amdgcn_image_sample_c_lz_o: 3411 case Intrinsic::amdgcn_image_sample_c_cd_o: 3412 case Intrinsic::amdgcn_image_sample_c_cd_cl_o: 3413 3414 case Intrinsic::amdgcn_image_getlod: { 3415 // Replace dmask with everything disabled with undef. 3416 const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(5)); 3417 if (!DMask || DMask->isNullValue()) { 3418 SDValue Undef = DAG.getUNDEF(Op.getValueType()); 3419 return DAG.getMergeValues({ Undef, Op.getOperand(0) }, SDLoc(Op)); 3420 } 3421 3422 return SDValue(); 3423 } 3424 default: 3425 return SDValue(); 3426 } 3427 } 3428 3429 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, 3430 SelectionDAG &DAG) const { 3431 SDLoc DL(Op); 3432 SDValue Chain = Op.getOperand(0); 3433 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 3434 MachineFunction &MF = DAG.getMachineFunction(); 3435 3436 switch (IntrinsicID) { 3437 case Intrinsic::amdgcn_exp: { 3438 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 3439 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 3440 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8)); 3441 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9)); 3442 3443 const SDValue Ops[] = { 3444 Chain, 3445 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 3446 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 3447 Op.getOperand(4), // src0 3448 Op.getOperand(5), // src1 3449 Op.getOperand(6), // src2 3450 Op.getOperand(7), // src3 3451 DAG.getTargetConstant(0, DL, MVT::i1), // compr 3452 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 3453 }; 3454 3455 unsigned Opc = Done->isNullValue() ? 3456 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 3457 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 3458 } 3459 case Intrinsic::amdgcn_exp_compr: { 3460 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 3461 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 3462 SDValue Src0 = Op.getOperand(4); 3463 SDValue Src1 = Op.getOperand(5); 3464 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); 3465 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7)); 3466 3467 SDValue Undef = DAG.getUNDEF(MVT::f32); 3468 const SDValue Ops[] = { 3469 Chain, 3470 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 3471 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 3472 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), 3473 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), 3474 Undef, // src2 3475 Undef, // src3 3476 DAG.getTargetConstant(1, DL, MVT::i1), // compr 3477 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 3478 }; 3479 3480 unsigned Opc = Done->isNullValue() ? 3481 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 3482 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 3483 } 3484 case Intrinsic::amdgcn_s_sendmsg: 3485 case Intrinsic::amdgcn_s_sendmsghalt: { 3486 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ? 3487 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT; 3488 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 3489 SDValue Glue = Chain.getValue(1); 3490 return DAG.getNode(NodeOp, DL, MVT::Other, Chain, 3491 Op.getOperand(2), Glue); 3492 } 3493 case Intrinsic::amdgcn_init_exec: { 3494 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain, 3495 Op.getOperand(2)); 3496 } 3497 case Intrinsic::amdgcn_init_exec_from_input: { 3498 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain, 3499 Op.getOperand(2), Op.getOperand(3)); 3500 } 3501 case AMDGPUIntrinsic::AMDGPU_kill: { 3502 SDValue Src = Op.getOperand(2); 3503 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) { 3504 if (!K->isNegative()) 3505 return Chain; 3506 3507 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32); 3508 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne); 3509 } 3510 3511 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src); 3512 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast); 3513 } 3514 case Intrinsic::amdgcn_s_barrier: { 3515 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) { 3516 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 3517 unsigned WGSize = ST.getFlatWorkGroupSizes(*MF.getFunction()).second; 3518 if (WGSize <= ST.getWavefrontSize()) 3519 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other, 3520 Op.getOperand(0)), 0); 3521 } 3522 return SDValue(); 3523 }; 3524 case AMDGPUIntrinsic::SI_tbuffer_store: { 3525 3526 // Extract vindex and voffset from vaddr as appropriate 3527 const ConstantSDNode *OffEn = cast<ConstantSDNode>(Op.getOperand(10)); 3528 const ConstantSDNode *IdxEn = cast<ConstantSDNode>(Op.getOperand(11)); 3529 SDValue VAddr = Op.getOperand(5); 3530 3531 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32); 3532 3533 assert(!(OffEn->isOne() && IdxEn->isOne()) && 3534 "Legacy intrinsic doesn't support both offset and index - use new version"); 3535 3536 SDValue VIndex = IdxEn->isOne() ? VAddr : Zero; 3537 SDValue VOffset = OffEn->isOne() ? VAddr : Zero; 3538 3539 // Deal with the vec-3 case 3540 const ConstantSDNode *NumChannels = cast<ConstantSDNode>(Op.getOperand(4)); 3541 auto Opcode = NumChannels->getZExtValue() == 3 ? 3542 AMDGPUISD::TBUFFER_STORE_FORMAT_X3 : AMDGPUISD::TBUFFER_STORE_FORMAT; 3543 3544 SDValue Ops[] = { 3545 Chain, 3546 Op.getOperand(3), // vdata 3547 Op.getOperand(2), // rsrc 3548 VIndex, 3549 VOffset, 3550 Op.getOperand(6), // soffset 3551 Op.getOperand(7), // inst_offset 3552 Op.getOperand(8), // dfmt 3553 Op.getOperand(9), // nfmt 3554 Op.getOperand(12), // glc 3555 Op.getOperand(13), // slc 3556 }; 3557 3558 assert((cast<ConstantSDNode>(Op.getOperand(14)))->getZExtValue() == 0 && 3559 "Value of tfe other than zero is unsupported"); 3560 3561 EVT VT = Op.getOperand(3).getValueType(); 3562 MachineMemOperand *MMO = MF.getMachineMemOperand( 3563 MachinePointerInfo(), 3564 MachineMemOperand::MOStore, 3565 VT.getStoreSize(), 4); 3566 return DAG.getMemIntrinsicNode(Opcode, DL, 3567 Op->getVTList(), Ops, VT, MMO); 3568 } 3569 3570 case Intrinsic::amdgcn_tbuffer_store: { 3571 SDValue Ops[] = { 3572 Chain, 3573 Op.getOperand(2), // vdata 3574 Op.getOperand(3), // rsrc 3575 Op.getOperand(4), // vindex 3576 Op.getOperand(5), // voffset 3577 Op.getOperand(6), // soffset 3578 Op.getOperand(7), // offset 3579 Op.getOperand(8), // dfmt 3580 Op.getOperand(9), // nfmt 3581 Op.getOperand(10), // glc 3582 Op.getOperand(11) // slc 3583 }; 3584 EVT VT = Op.getOperand(3).getValueType(); 3585 MachineMemOperand *MMO = MF.getMachineMemOperand( 3586 MachinePointerInfo(), 3587 MachineMemOperand::MOStore, 3588 VT.getStoreSize(), 4); 3589 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL, 3590 Op->getVTList(), Ops, VT, MMO); 3591 } 3592 3593 default: 3594 return Op; 3595 } 3596 } 3597 3598 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 3599 SDLoc DL(Op); 3600 LoadSDNode *Load = cast<LoadSDNode>(Op); 3601 ISD::LoadExtType ExtType = Load->getExtensionType(); 3602 EVT MemVT = Load->getMemoryVT(); 3603 3604 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { 3605 // FIXME: Copied from PPC 3606 // First, load into 32 bits, then truncate to 1 bit. 3607 3608 SDValue Chain = Load->getChain(); 3609 SDValue BasePtr = Load->getBasePtr(); 3610 MachineMemOperand *MMO = Load->getMemOperand(); 3611 3612 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; 3613 3614 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 3615 BasePtr, RealMemVT, MMO); 3616 3617 SDValue Ops[] = { 3618 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), 3619 NewLD.getValue(1) 3620 }; 3621 3622 return DAG.getMergeValues(Ops, DL); 3623 } 3624 3625 if (!MemVT.isVector()) 3626 return SDValue(); 3627 3628 assert(Op.getValueType().getVectorElementType() == MVT::i32 && 3629 "Custom lowering for non-i32 vectors hasn't been implemented."); 3630 3631 unsigned AS = Load->getAddressSpace(); 3632 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, 3633 AS, Load->getAlignment())) { 3634 SDValue Ops[2]; 3635 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); 3636 return DAG.getMergeValues(Ops, DL); 3637 } 3638 3639 MachineFunction &MF = DAG.getMachineFunction(); 3640 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 3641 // If there is a possibilty that flat instruction access scratch memory 3642 // then we need to use the same legalization rules we use for private. 3643 if (AS == AMDGPUASI.FLAT_ADDRESS) 3644 AS = MFI->hasFlatScratchInit() ? 3645 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS; 3646 3647 unsigned NumElements = MemVT.getVectorNumElements(); 3648 if (AS == AMDGPUASI.CONSTANT_ADDRESS) { 3649 if (isMemOpUniform(Load)) 3650 return SDValue(); 3651 // Non-uniform loads will be selected to MUBUF instructions, so they 3652 // have the same legalization requirements as global and private 3653 // loads. 3654 // 3655 } 3656 if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS) { 3657 if (Subtarget->getScalarizeGlobalBehavior() && isMemOpUniform(Load) && 3658 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load)) 3659 return SDValue(); 3660 // Non-uniform loads will be selected to MUBUF instructions, so they 3661 // have the same legalization requirements as global and private 3662 // loads. 3663 // 3664 } 3665 if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS || 3666 AS == AMDGPUASI.FLAT_ADDRESS) { 3667 if (NumElements > 4) 3668 return SplitVectorLoad(Op, DAG); 3669 // v4 loads are supported for private and global memory. 3670 return SDValue(); 3671 } 3672 if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 3673 // Depending on the setting of the private_element_size field in the 3674 // resource descriptor, we can only make private accesses up to a certain 3675 // size. 3676 switch (Subtarget->getMaxPrivateElementSize()) { 3677 case 4: 3678 return scalarizeVectorLoad(Load, DAG); 3679 case 8: 3680 if (NumElements > 2) 3681 return SplitVectorLoad(Op, DAG); 3682 return SDValue(); 3683 case 16: 3684 // Same as global/flat 3685 if (NumElements > 4) 3686 return SplitVectorLoad(Op, DAG); 3687 return SDValue(); 3688 default: 3689 llvm_unreachable("unsupported private_element_size"); 3690 } 3691 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 3692 if (NumElements > 2) 3693 return SplitVectorLoad(Op, DAG); 3694 3695 if (NumElements == 2) 3696 return SDValue(); 3697 3698 // If properly aligned, if we split we might be able to use ds_read_b64. 3699 return SplitVectorLoad(Op, DAG); 3700 } 3701 return SDValue(); 3702 } 3703 3704 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 3705 if (Op.getValueType() != MVT::i64) 3706 return SDValue(); 3707 3708 SDLoc DL(Op); 3709 SDValue Cond = Op.getOperand(0); 3710 3711 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 3712 SDValue One = DAG.getConstant(1, DL, MVT::i32); 3713 3714 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); 3715 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); 3716 3717 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); 3718 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); 3719 3720 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); 3721 3722 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); 3723 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); 3724 3725 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); 3726 3727 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); 3728 return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res); 3729 } 3730 3731 // Catch division cases where we can use shortcuts with rcp and rsq 3732 // instructions. 3733 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, 3734 SelectionDAG &DAG) const { 3735 SDLoc SL(Op); 3736 SDValue LHS = Op.getOperand(0); 3737 SDValue RHS = Op.getOperand(1); 3738 EVT VT = Op.getValueType(); 3739 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath; 3740 3741 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals()) 3742 return SDValue(); 3743 3744 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { 3745 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) { 3746 if (CLHS->isExactlyValue(1.0)) { 3747 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 3748 // the CI documentation has a worst case error of 1 ulp. 3749 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 3750 // use it as long as we aren't trying to use denormals. 3751 // 3752 // v_rcp_f16 and v_rsq_f16 DO support denormals. 3753 3754 // 1.0 / sqrt(x) -> rsq(x) 3755 3756 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP 3757 // error seems really high at 2^29 ULP. 3758 if (RHS.getOpcode() == ISD::FSQRT) 3759 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); 3760 3761 // 1.0 / x -> rcp(x) 3762 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 3763 } 3764 3765 // Same as for 1.0, but expand the sign out of the constant. 3766 if (CLHS->isExactlyValue(-1.0)) { 3767 // -1.0 / x -> rcp (fneg x) 3768 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3769 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); 3770 } 3771 } 3772 } 3773 3774 const SDNodeFlags Flags = Op->getFlags(); 3775 3776 if (Unsafe || Flags.hasAllowReciprocal()) { 3777 // Turn into multiply by the reciprocal. 3778 // x / y -> x * (1.0 / y) 3779 SDNodeFlags NewFlags; 3780 NewFlags.setUnsafeAlgebra(true); 3781 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 3782 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, NewFlags); 3783 } 3784 3785 return SDValue(); 3786 } 3787 3788 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 3789 EVT VT, SDValue A, SDValue B, SDValue GlueChain) { 3790 if (GlueChain->getNumValues() <= 1) { 3791 return DAG.getNode(Opcode, SL, VT, A, B); 3792 } 3793 3794 assert(GlueChain->getNumValues() == 3); 3795 3796 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 3797 switch (Opcode) { 3798 default: llvm_unreachable("no chain equivalent for opcode"); 3799 case ISD::FMUL: 3800 Opcode = AMDGPUISD::FMUL_W_CHAIN; 3801 break; 3802 } 3803 3804 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, 3805 GlueChain.getValue(2)); 3806 } 3807 3808 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 3809 EVT VT, SDValue A, SDValue B, SDValue C, 3810 SDValue GlueChain) { 3811 if (GlueChain->getNumValues() <= 1) { 3812 return DAG.getNode(Opcode, SL, VT, A, B, C); 3813 } 3814 3815 assert(GlueChain->getNumValues() == 3); 3816 3817 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 3818 switch (Opcode) { 3819 default: llvm_unreachable("no chain equivalent for opcode"); 3820 case ISD::FMA: 3821 Opcode = AMDGPUISD::FMA_W_CHAIN; 3822 break; 3823 } 3824 3825 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C, 3826 GlueChain.getValue(2)); 3827 } 3828 3829 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { 3830 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 3831 return FastLowered; 3832 3833 SDLoc SL(Op); 3834 SDValue Src0 = Op.getOperand(0); 3835 SDValue Src1 = Op.getOperand(1); 3836 3837 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); 3838 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); 3839 3840 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); 3841 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); 3842 3843 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); 3844 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); 3845 3846 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); 3847 } 3848 3849 // Faster 2.5 ULP division that does not support denormals. 3850 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { 3851 SDLoc SL(Op); 3852 SDValue LHS = Op.getOperand(1); 3853 SDValue RHS = Op.getOperand(2); 3854 3855 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); 3856 3857 const APFloat K0Val(BitsToFloat(0x6f800000)); 3858 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); 3859 3860 const APFloat K1Val(BitsToFloat(0x2f800000)); 3861 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); 3862 3863 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 3864 3865 EVT SetCCVT = 3866 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 3867 3868 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); 3869 3870 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); 3871 3872 // TODO: Should this propagate fast-math-flags? 3873 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); 3874 3875 // rcp does not support denormals. 3876 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); 3877 3878 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); 3879 3880 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); 3881 } 3882 3883 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { 3884 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 3885 return FastLowered; 3886 3887 SDLoc SL(Op); 3888 SDValue LHS = Op.getOperand(0); 3889 SDValue RHS = Op.getOperand(1); 3890 3891 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 3892 3893 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); 3894 3895 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 3896 RHS, RHS, LHS); 3897 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 3898 LHS, RHS, LHS); 3899 3900 // Denominator is scaled to not be denormal, so using rcp is ok. 3901 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, 3902 DenominatorScaled); 3903 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, 3904 DenominatorScaled); 3905 3906 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | 3907 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | 3908 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); 3909 3910 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16); 3911 3912 if (!Subtarget->hasFP32Denormals()) { 3913 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); 3914 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE, 3915 SL, MVT::i32); 3916 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs, 3917 DAG.getEntryNode(), 3918 EnableDenormValue, BitField); 3919 SDValue Ops[3] = { 3920 NegDivScale0, 3921 EnableDenorm.getValue(0), 3922 EnableDenorm.getValue(1) 3923 }; 3924 3925 NegDivScale0 = DAG.getMergeValues(Ops, SL); 3926 } 3927 3928 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, 3929 ApproxRcp, One, NegDivScale0); 3930 3931 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, 3932 ApproxRcp, Fma0); 3933 3934 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, 3935 Fma1, Fma1); 3936 3937 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, 3938 NumeratorScaled, Mul); 3939 3940 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2); 3941 3942 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, 3943 NumeratorScaled, Fma3); 3944 3945 if (!Subtarget->hasFP32Denormals()) { 3946 const SDValue DisableDenormValue = 3947 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32); 3948 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other, 3949 Fma4.getValue(1), 3950 DisableDenormValue, 3951 BitField, 3952 Fma4.getValue(2)); 3953 3954 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 3955 DisableDenorm, DAG.getRoot()); 3956 DAG.setRoot(OutputChain); 3957 } 3958 3959 SDValue Scale = NumeratorScaled.getValue(1); 3960 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, 3961 Fma4, Fma1, Fma3, Scale); 3962 3963 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS); 3964 } 3965 3966 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { 3967 if (DAG.getTarget().Options.UnsafeFPMath) 3968 return lowerFastUnsafeFDIV(Op, DAG); 3969 3970 SDLoc SL(Op); 3971 SDValue X = Op.getOperand(0); 3972 SDValue Y = Op.getOperand(1); 3973 3974 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 3975 3976 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); 3977 3978 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); 3979 3980 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); 3981 3982 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); 3983 3984 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); 3985 3986 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); 3987 3988 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); 3989 3990 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); 3991 3992 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); 3993 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); 3994 3995 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, 3996 NegDivScale0, Mul, DivScale1); 3997 3998 SDValue Scale; 3999 4000 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 4001 // Workaround a hardware bug on SI where the condition output from div_scale 4002 // is not usable. 4003 4004 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); 4005 4006 // Figure out if the scale to use for div_fmas. 4007 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 4008 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); 4009 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); 4010 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); 4011 4012 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); 4013 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); 4014 4015 SDValue Scale0Hi 4016 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); 4017 SDValue Scale1Hi 4018 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); 4019 4020 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); 4021 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); 4022 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); 4023 } else { 4024 Scale = DivScale1.getValue(1); 4025 } 4026 4027 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, 4028 Fma4, Fma3, Mul, Scale); 4029 4030 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); 4031 } 4032 4033 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { 4034 EVT VT = Op.getValueType(); 4035 4036 if (VT == MVT::f32) 4037 return LowerFDIV32(Op, DAG); 4038 4039 if (VT == MVT::f64) 4040 return LowerFDIV64(Op, DAG); 4041 4042 if (VT == MVT::f16) 4043 return LowerFDIV16(Op, DAG); 4044 4045 llvm_unreachable("Unexpected type for fdiv"); 4046 } 4047 4048 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 4049 SDLoc DL(Op); 4050 StoreSDNode *Store = cast<StoreSDNode>(Op); 4051 EVT VT = Store->getMemoryVT(); 4052 4053 if (VT == MVT::i1) { 4054 return DAG.getTruncStore(Store->getChain(), DL, 4055 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), 4056 Store->getBasePtr(), MVT::i1, Store->getMemOperand()); 4057 } 4058 4059 assert(VT.isVector() && 4060 Store->getValue().getValueType().getScalarType() == MVT::i32); 4061 4062 unsigned AS = Store->getAddressSpace(); 4063 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 4064 AS, Store->getAlignment())) { 4065 return expandUnalignedStore(Store, DAG); 4066 } 4067 4068 MachineFunction &MF = DAG.getMachineFunction(); 4069 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 4070 // If there is a possibilty that flat instruction access scratch memory 4071 // then we need to use the same legalization rules we use for private. 4072 if (AS == AMDGPUASI.FLAT_ADDRESS) 4073 AS = MFI->hasFlatScratchInit() ? 4074 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS; 4075 4076 unsigned NumElements = VT.getVectorNumElements(); 4077 if (AS == AMDGPUASI.GLOBAL_ADDRESS || 4078 AS == AMDGPUASI.FLAT_ADDRESS) { 4079 if (NumElements > 4) 4080 return SplitVectorStore(Op, DAG); 4081 return SDValue(); 4082 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 4083 switch (Subtarget->getMaxPrivateElementSize()) { 4084 case 4: 4085 return scalarizeVectorStore(Store, DAG); 4086 case 8: 4087 if (NumElements > 2) 4088 return SplitVectorStore(Op, DAG); 4089 return SDValue(); 4090 case 16: 4091 if (NumElements > 4) 4092 return SplitVectorStore(Op, DAG); 4093 return SDValue(); 4094 default: 4095 llvm_unreachable("unsupported private_element_size"); 4096 } 4097 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 4098 if (NumElements > 2) 4099 return SplitVectorStore(Op, DAG); 4100 4101 if (NumElements == 2) 4102 return Op; 4103 4104 // If properly aligned, if we split we might be able to use ds_write_b64. 4105 return SplitVectorStore(Op, DAG); 4106 } else { 4107 llvm_unreachable("unhandled address space"); 4108 } 4109 } 4110 4111 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 4112 SDLoc DL(Op); 4113 EVT VT = Op.getValueType(); 4114 SDValue Arg = Op.getOperand(0); 4115 // TODO: Should this propagate fast-math-flags? 4116 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, 4117 DAG.getNode(ISD::FMUL, DL, VT, Arg, 4118 DAG.getConstantFP(0.5/M_PI, DL, 4119 VT))); 4120 4121 switch (Op.getOpcode()) { 4122 case ISD::FCOS: 4123 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart); 4124 case ISD::FSIN: 4125 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart); 4126 default: 4127 llvm_unreachable("Wrong trig opcode"); 4128 } 4129 } 4130 4131 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 4132 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); 4133 assert(AtomicNode->isCompareAndSwap()); 4134 unsigned AS = AtomicNode->getAddressSpace(); 4135 4136 // No custom lowering required for local address space 4137 if (!isFlatGlobalAddrSpace(AS, AMDGPUASI)) 4138 return Op; 4139 4140 // Non-local address space requires custom lowering for atomic compare 4141 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 4142 SDLoc DL(Op); 4143 SDValue ChainIn = Op.getOperand(0); 4144 SDValue Addr = Op.getOperand(1); 4145 SDValue Old = Op.getOperand(2); 4146 SDValue New = Op.getOperand(3); 4147 EVT VT = Op.getValueType(); 4148 MVT SimpleVT = VT.getSimpleVT(); 4149 MVT VecType = MVT::getVectorVT(SimpleVT, 2); 4150 4151 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); 4152 SDValue Ops[] = { ChainIn, Addr, NewOld }; 4153 4154 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), 4155 Ops, VT, AtomicNode->getMemOperand()); 4156 } 4157 4158 //===----------------------------------------------------------------------===// 4159 // Custom DAG optimizations 4160 //===----------------------------------------------------------------------===// 4161 4162 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, 4163 DAGCombinerInfo &DCI) const { 4164 EVT VT = N->getValueType(0); 4165 EVT ScalarVT = VT.getScalarType(); 4166 if (ScalarVT != MVT::f32) 4167 return SDValue(); 4168 4169 SelectionDAG &DAG = DCI.DAG; 4170 SDLoc DL(N); 4171 4172 SDValue Src = N->getOperand(0); 4173 EVT SrcVT = Src.getValueType(); 4174 4175 // TODO: We could try to match extracting the higher bytes, which would be 4176 // easier if i8 vectors weren't promoted to i32 vectors, particularly after 4177 // types are legalized. v4i8 -> v4f32 is probably the only case to worry 4178 // about in practice. 4179 if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) { 4180 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { 4181 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); 4182 DCI.AddToWorklist(Cvt.getNode()); 4183 return Cvt; 4184 } 4185 } 4186 4187 return SDValue(); 4188 } 4189 4190 /// \brief Return true if the given offset Size in bytes can be folded into 4191 /// the immediate offsets of a memory instruction for the given address space. 4192 static bool canFoldOffset(unsigned OffsetSize, unsigned AS, 4193 const SISubtarget &STI) { 4194 auto AMDGPUASI = STI.getAMDGPUAS(); 4195 if (AS == AMDGPUASI.GLOBAL_ADDRESS) { 4196 // MUBUF instructions a 12-bit offset in bytes. 4197 return isUInt<12>(OffsetSize); 4198 } 4199 if (AS == AMDGPUASI.CONSTANT_ADDRESS) { 4200 // SMRD instructions have an 8-bit offset in dwords on SI and 4201 // a 20-bit offset in bytes on VI. 4202 if (STI.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 4203 return isUInt<20>(OffsetSize); 4204 else 4205 return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4); 4206 } 4207 if (AS == AMDGPUASI.LOCAL_ADDRESS || 4208 AS == AMDGPUASI.REGION_ADDRESS) { 4209 // The single offset versions have a 16-bit offset in bytes. 4210 return isUInt<16>(OffsetSize); 4211 } 4212 // Indirect register addressing does not use any offsets. 4213 return false; 4214 } 4215 4216 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) 4217 4218 // This is a variant of 4219 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), 4220 // 4221 // The normal DAG combiner will do this, but only if the add has one use since 4222 // that would increase the number of instructions. 4223 // 4224 // This prevents us from seeing a constant offset that can be folded into a 4225 // memory instruction's addressing mode. If we know the resulting add offset of 4226 // a pointer can be folded into an addressing offset, we can replace the pointer 4227 // operand with the add of new constant offset. This eliminates one of the uses, 4228 // and may allow the remaining use to also be simplified. 4229 // 4230 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, 4231 unsigned AddrSpace, 4232 DAGCombinerInfo &DCI) const { 4233 SDValue N0 = N->getOperand(0); 4234 SDValue N1 = N->getOperand(1); 4235 4236 if (N0.getOpcode() != ISD::ADD) 4237 return SDValue(); 4238 4239 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); 4240 if (!CN1) 4241 return SDValue(); 4242 4243 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 4244 if (!CAdd) 4245 return SDValue(); 4246 4247 // If the resulting offset is too large, we can't fold it into the addressing 4248 // mode offset. 4249 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); 4250 if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *getSubtarget())) 4251 return SDValue(); 4252 4253 SelectionDAG &DAG = DCI.DAG; 4254 SDLoc SL(N); 4255 EVT VT = N->getValueType(0); 4256 4257 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); 4258 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); 4259 4260 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset); 4261 } 4262 4263 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, 4264 DAGCombinerInfo &DCI) const { 4265 SDValue Ptr = N->getBasePtr(); 4266 SelectionDAG &DAG = DCI.DAG; 4267 SDLoc SL(N); 4268 4269 // TODO: We could also do this for multiplies. 4270 unsigned AS = N->getAddressSpace(); 4271 if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUASI.PRIVATE_ADDRESS) { 4272 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI); 4273 if (NewPtr) { 4274 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); 4275 4276 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; 4277 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); 4278 } 4279 } 4280 4281 return SDValue(); 4282 } 4283 4284 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { 4285 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || 4286 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || 4287 (Opc == ISD::XOR && Val == 0); 4288 } 4289 4290 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This 4291 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit 4292 // integer combine opportunities since most 64-bit operations are decomposed 4293 // this way. TODO: We won't want this for SALU especially if it is an inline 4294 // immediate. 4295 SDValue SITargetLowering::splitBinaryBitConstantOp( 4296 DAGCombinerInfo &DCI, 4297 const SDLoc &SL, 4298 unsigned Opc, SDValue LHS, 4299 const ConstantSDNode *CRHS) const { 4300 uint64_t Val = CRHS->getZExtValue(); 4301 uint32_t ValLo = Lo_32(Val); 4302 uint32_t ValHi = Hi_32(Val); 4303 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 4304 4305 if ((bitOpWithConstantIsReducible(Opc, ValLo) || 4306 bitOpWithConstantIsReducible(Opc, ValHi)) || 4307 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { 4308 // If we need to materialize a 64-bit immediate, it will be split up later 4309 // anyway. Avoid creating the harder to understand 64-bit immediate 4310 // materialization. 4311 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); 4312 } 4313 4314 return SDValue(); 4315 } 4316 4317 SDValue SITargetLowering::performAndCombine(SDNode *N, 4318 DAGCombinerInfo &DCI) const { 4319 if (DCI.isBeforeLegalize()) 4320 return SDValue(); 4321 4322 SelectionDAG &DAG = DCI.DAG; 4323 EVT VT = N->getValueType(0); 4324 SDValue LHS = N->getOperand(0); 4325 SDValue RHS = N->getOperand(1); 4326 4327 4328 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 4329 if (VT == MVT::i64 && CRHS) { 4330 if (SDValue Split 4331 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) 4332 return Split; 4333 } 4334 4335 if (CRHS && VT == MVT::i32) { 4336 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb 4337 // nb = number of trailing zeroes in mask 4338 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass, 4339 // given that we are selecting 8 or 16 bit fields starting at byte boundary. 4340 uint64_t Mask = CRHS->getZExtValue(); 4341 unsigned Bits = countPopulation(Mask); 4342 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL && 4343 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) { 4344 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) { 4345 unsigned Shift = CShift->getZExtValue(); 4346 unsigned NB = CRHS->getAPIntValue().countTrailingZeros(); 4347 unsigned Offset = NB + Shift; 4348 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary. 4349 SDLoc SL(N); 4350 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 4351 LHS->getOperand(0), 4352 DAG.getConstant(Offset, SL, MVT::i32), 4353 DAG.getConstant(Bits, SL, MVT::i32)); 4354 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits); 4355 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE, 4356 DAG.getValueType(NarrowVT)); 4357 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext, 4358 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32)); 4359 return Shl; 4360 } 4361 } 4362 } 4363 } 4364 4365 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> 4366 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) 4367 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { 4368 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 4369 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); 4370 4371 SDValue X = LHS.getOperand(0); 4372 SDValue Y = RHS.getOperand(0); 4373 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) 4374 return SDValue(); 4375 4376 if (LCC == ISD::SETO) { 4377 if (X != LHS.getOperand(1)) 4378 return SDValue(); 4379 4380 if (RCC == ISD::SETUNE) { 4381 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); 4382 if (!C1 || !C1->isInfinity() || C1->isNegative()) 4383 return SDValue(); 4384 4385 const uint32_t Mask = SIInstrFlags::N_NORMAL | 4386 SIInstrFlags::N_SUBNORMAL | 4387 SIInstrFlags::N_ZERO | 4388 SIInstrFlags::P_ZERO | 4389 SIInstrFlags::P_SUBNORMAL | 4390 SIInstrFlags::P_NORMAL; 4391 4392 static_assert(((~(SIInstrFlags::S_NAN | 4393 SIInstrFlags::Q_NAN | 4394 SIInstrFlags::N_INFINITY | 4395 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, 4396 "mask not equal"); 4397 4398 SDLoc DL(N); 4399 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 4400 X, DAG.getConstant(Mask, DL, MVT::i32)); 4401 } 4402 } 4403 } 4404 4405 return SDValue(); 4406 } 4407 4408 SDValue SITargetLowering::performOrCombine(SDNode *N, 4409 DAGCombinerInfo &DCI) const { 4410 SelectionDAG &DAG = DCI.DAG; 4411 SDValue LHS = N->getOperand(0); 4412 SDValue RHS = N->getOperand(1); 4413 4414 EVT VT = N->getValueType(0); 4415 if (VT == MVT::i1) { 4416 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) 4417 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && 4418 RHS.getOpcode() == AMDGPUISD::FP_CLASS) { 4419 SDValue Src = LHS.getOperand(0); 4420 if (Src != RHS.getOperand(0)) 4421 return SDValue(); 4422 4423 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 4424 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 4425 if (!CLHS || !CRHS) 4426 return SDValue(); 4427 4428 // Only 10 bits are used. 4429 static const uint32_t MaxMask = 0x3ff; 4430 4431 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; 4432 SDLoc DL(N); 4433 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 4434 Src, DAG.getConstant(NewMask, DL, MVT::i32)); 4435 } 4436 4437 return SDValue(); 4438 } 4439 4440 if (VT != MVT::i64) 4441 return SDValue(); 4442 4443 // TODO: This could be a generic combine with a predicate for extracting the 4444 // high half of an integer being free. 4445 4446 // (or i64:x, (zero_extend i32:y)) -> 4447 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) 4448 if (LHS.getOpcode() == ISD::ZERO_EXTEND && 4449 RHS.getOpcode() != ISD::ZERO_EXTEND) 4450 std::swap(LHS, RHS); 4451 4452 if (RHS.getOpcode() == ISD::ZERO_EXTEND) { 4453 SDValue ExtSrc = RHS.getOperand(0); 4454 EVT SrcVT = ExtSrc.getValueType(); 4455 if (SrcVT == MVT::i32) { 4456 SDLoc SL(N); 4457 SDValue LowLHS, HiBits; 4458 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); 4459 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); 4460 4461 DCI.AddToWorklist(LowOr.getNode()); 4462 DCI.AddToWorklist(HiBits.getNode()); 4463 4464 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 4465 LowOr, HiBits); 4466 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 4467 } 4468 } 4469 4470 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 4471 if (CRHS) { 4472 if (SDValue Split 4473 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) 4474 return Split; 4475 } 4476 4477 return SDValue(); 4478 } 4479 4480 SDValue SITargetLowering::performXorCombine(SDNode *N, 4481 DAGCombinerInfo &DCI) const { 4482 EVT VT = N->getValueType(0); 4483 if (VT != MVT::i64) 4484 return SDValue(); 4485 4486 SDValue LHS = N->getOperand(0); 4487 SDValue RHS = N->getOperand(1); 4488 4489 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 4490 if (CRHS) { 4491 if (SDValue Split 4492 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) 4493 return Split; 4494 } 4495 4496 return SDValue(); 4497 } 4498 4499 // Instructions that will be lowered with a final instruction that zeros the 4500 // high result bits. 4501 // XXX - probably only need to list legal operations. 4502 static bool fp16SrcZerosHighBits(unsigned Opc) { 4503 switch (Opc) { 4504 case ISD::FADD: 4505 case ISD::FSUB: 4506 case ISD::FMUL: 4507 case ISD::FDIV: 4508 case ISD::FREM: 4509 case ISD::FMA: 4510 case ISD::FMAD: 4511 case ISD::FCANONICALIZE: 4512 case ISD::FP_ROUND: 4513 case ISD::UINT_TO_FP: 4514 case ISD::SINT_TO_FP: 4515 case ISD::FABS: 4516 // Fabs is lowered to a bit operation, but it's an and which will clear the 4517 // high bits anyway. 4518 case ISD::FSQRT: 4519 case ISD::FSIN: 4520 case ISD::FCOS: 4521 case ISD::FPOWI: 4522 case ISD::FPOW: 4523 case ISD::FLOG: 4524 case ISD::FLOG2: 4525 case ISD::FLOG10: 4526 case ISD::FEXP: 4527 case ISD::FEXP2: 4528 case ISD::FCEIL: 4529 case ISD::FTRUNC: 4530 case ISD::FRINT: 4531 case ISD::FNEARBYINT: 4532 case ISD::FROUND: 4533 case ISD::FFLOOR: 4534 case ISD::FMINNUM: 4535 case ISD::FMAXNUM: 4536 case AMDGPUISD::FRACT: 4537 case AMDGPUISD::CLAMP: 4538 case AMDGPUISD::COS_HW: 4539 case AMDGPUISD::SIN_HW: 4540 case AMDGPUISD::FMIN3: 4541 case AMDGPUISD::FMAX3: 4542 case AMDGPUISD::FMED3: 4543 case AMDGPUISD::FMAD_FTZ: 4544 case AMDGPUISD::RCP: 4545 case AMDGPUISD::RSQ: 4546 case AMDGPUISD::LDEXP: 4547 return true; 4548 default: 4549 // fcopysign, select and others may be lowered to 32-bit bit operations 4550 // which don't zero the high bits. 4551 return false; 4552 } 4553 } 4554 4555 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N, 4556 DAGCombinerInfo &DCI) const { 4557 if (!Subtarget->has16BitInsts() || 4558 DCI.getDAGCombineLevel() < AfterLegalizeDAG) 4559 return SDValue(); 4560 4561 EVT VT = N->getValueType(0); 4562 if (VT != MVT::i32) 4563 return SDValue(); 4564 4565 SDValue Src = N->getOperand(0); 4566 if (Src.getValueType() != MVT::i16) 4567 return SDValue(); 4568 4569 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src 4570 // FIXME: It is not universally true that the high bits are zeroed on gfx9. 4571 if (Src.getOpcode() == ISD::BITCAST) { 4572 SDValue BCSrc = Src.getOperand(0); 4573 if (BCSrc.getValueType() == MVT::f16 && 4574 fp16SrcZerosHighBits(BCSrc.getOpcode())) 4575 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc); 4576 } 4577 4578 return SDValue(); 4579 } 4580 4581 SDValue SITargetLowering::performClassCombine(SDNode *N, 4582 DAGCombinerInfo &DCI) const { 4583 SelectionDAG &DAG = DCI.DAG; 4584 SDValue Mask = N->getOperand(1); 4585 4586 // fp_class x, 0 -> false 4587 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { 4588 if (CMask->isNullValue()) 4589 return DAG.getConstant(0, SDLoc(N), MVT::i1); 4590 } 4591 4592 if (N->getOperand(0).isUndef()) 4593 return DAG.getUNDEF(MVT::i1); 4594 4595 return SDValue(); 4596 } 4597 4598 // Constant fold canonicalize. 4599 SDValue SITargetLowering::performFCanonicalizeCombine( 4600 SDNode *N, 4601 DAGCombinerInfo &DCI) const { 4602 ConstantFPSDNode *CFP = isConstOrConstSplatFP(N->getOperand(0)); 4603 if (!CFP) 4604 return SDValue(); 4605 4606 SelectionDAG &DAG = DCI.DAG; 4607 const APFloat &C = CFP->getValueAPF(); 4608 4609 // Flush denormals to 0 if not enabled. 4610 if (C.isDenormal()) { 4611 EVT VT = N->getValueType(0); 4612 EVT SVT = VT.getScalarType(); 4613 if (SVT == MVT::f32 && !Subtarget->hasFP32Denormals()) 4614 return DAG.getConstantFP(0.0, SDLoc(N), VT); 4615 4616 if (SVT == MVT::f64 && !Subtarget->hasFP64Denormals()) 4617 return DAG.getConstantFP(0.0, SDLoc(N), VT); 4618 4619 if (SVT == MVT::f16 && !Subtarget->hasFP16Denormals()) 4620 return DAG.getConstantFP(0.0, SDLoc(N), VT); 4621 } 4622 4623 if (C.isNaN()) { 4624 EVT VT = N->getValueType(0); 4625 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); 4626 if (C.isSignaling()) { 4627 // Quiet a signaling NaN. 4628 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 4629 } 4630 4631 // Make sure it is the canonical NaN bitpattern. 4632 // 4633 // TODO: Can we use -1 as the canonical NaN value since it's an inline 4634 // immediate? 4635 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) 4636 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 4637 } 4638 4639 return N->getOperand(0); 4640 } 4641 4642 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { 4643 switch (Opc) { 4644 case ISD::FMAXNUM: 4645 return AMDGPUISD::FMAX3; 4646 case ISD::SMAX: 4647 return AMDGPUISD::SMAX3; 4648 case ISD::UMAX: 4649 return AMDGPUISD::UMAX3; 4650 case ISD::FMINNUM: 4651 return AMDGPUISD::FMIN3; 4652 case ISD::SMIN: 4653 return AMDGPUISD::SMIN3; 4654 case ISD::UMIN: 4655 return AMDGPUISD::UMIN3; 4656 default: 4657 llvm_unreachable("Not a min/max opcode"); 4658 } 4659 } 4660 4661 SDValue SITargetLowering::performIntMed3ImmCombine( 4662 SelectionDAG &DAG, const SDLoc &SL, 4663 SDValue Op0, SDValue Op1, bool Signed) const { 4664 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); 4665 if (!K1) 4666 return SDValue(); 4667 4668 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 4669 if (!K0) 4670 return SDValue(); 4671 4672 if (Signed) { 4673 if (K0->getAPIntValue().sge(K1->getAPIntValue())) 4674 return SDValue(); 4675 } else { 4676 if (K0->getAPIntValue().uge(K1->getAPIntValue())) 4677 return SDValue(); 4678 } 4679 4680 EVT VT = K0->getValueType(0); 4681 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3; 4682 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) { 4683 return DAG.getNode(Med3Opc, SL, VT, 4684 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); 4685 } 4686 4687 // If there isn't a 16-bit med3 operation, convert to 32-bit. 4688 MVT NVT = MVT::i32; 4689 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4690 4691 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); 4692 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); 4693 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); 4694 4695 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3); 4696 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3); 4697 } 4698 4699 static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) { 4700 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions()) 4701 return true; 4702 4703 return DAG.isKnownNeverNaN(Op); 4704 } 4705 4706 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG, 4707 const SDLoc &SL, 4708 SDValue Op0, 4709 SDValue Op1) const { 4710 ConstantFPSDNode *K1 = dyn_cast<ConstantFPSDNode>(Op1); 4711 if (!K1) 4712 return SDValue(); 4713 4714 ConstantFPSDNode *K0 = dyn_cast<ConstantFPSDNode>(Op0.getOperand(1)); 4715 if (!K0) 4716 return SDValue(); 4717 4718 // Ordered >= (although NaN inputs should have folded away by now). 4719 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF()); 4720 if (Cmp == APFloat::cmpGreaterThan) 4721 return SDValue(); 4722 4723 // TODO: Check IEEE bit enabled? 4724 EVT VT = K0->getValueType(0); 4725 if (Subtarget->enableDX10Clamp()) { 4726 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the 4727 // hardware fmed3 behavior converting to a min. 4728 // FIXME: Should this be allowing -0.0? 4729 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0)) 4730 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0)); 4731 } 4732 4733 // med3 for f16 is only available on gfx9+. 4734 if (VT == MVT::f64 || (VT == MVT::f16 && !Subtarget->hasMed3_16())) 4735 return SDValue(); 4736 4737 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a 4738 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would then 4739 // give the other result, which is different from med3 with a NaN input. 4740 SDValue Var = Op0.getOperand(0); 4741 if (!isKnownNeverSNan(DAG, Var)) 4742 return SDValue(); 4743 4744 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), 4745 Var, SDValue(K0, 0), SDValue(K1, 0)); 4746 } 4747 4748 SDValue SITargetLowering::performMinMaxCombine(SDNode *N, 4749 DAGCombinerInfo &DCI) const { 4750 SelectionDAG &DAG = DCI.DAG; 4751 4752 EVT VT = N->getValueType(0); 4753 unsigned Opc = N->getOpcode(); 4754 SDValue Op0 = N->getOperand(0); 4755 SDValue Op1 = N->getOperand(1); 4756 4757 // Only do this if the inner op has one use since this will just increases 4758 // register pressure for no benefit. 4759 4760 4761 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && 4762 VT != MVT::f64 && 4763 ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) { 4764 // max(max(a, b), c) -> max3(a, b, c) 4765 // min(min(a, b), c) -> min3(a, b, c) 4766 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { 4767 SDLoc DL(N); 4768 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 4769 DL, 4770 N->getValueType(0), 4771 Op0.getOperand(0), 4772 Op0.getOperand(1), 4773 Op1); 4774 } 4775 4776 // Try commuted. 4777 // max(a, max(b, c)) -> max3(a, b, c) 4778 // min(a, min(b, c)) -> min3(a, b, c) 4779 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { 4780 SDLoc DL(N); 4781 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 4782 DL, 4783 N->getValueType(0), 4784 Op0, 4785 Op1.getOperand(0), 4786 Op1.getOperand(1)); 4787 } 4788 } 4789 4790 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) 4791 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { 4792 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) 4793 return Med3; 4794 } 4795 4796 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { 4797 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) 4798 return Med3; 4799 } 4800 4801 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) 4802 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || 4803 (Opc == AMDGPUISD::FMIN_LEGACY && 4804 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && 4805 (VT == MVT::f32 || VT == MVT::f64 || 4806 (VT == MVT::f16 && Subtarget->has16BitInsts())) && 4807 Op0.hasOneUse()) { 4808 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) 4809 return Res; 4810 } 4811 4812 return SDValue(); 4813 } 4814 4815 static bool isClampZeroToOne(SDValue A, SDValue B) { 4816 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) { 4817 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) { 4818 // FIXME: Should this be allowing -0.0? 4819 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) || 4820 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0)); 4821 } 4822 } 4823 4824 return false; 4825 } 4826 4827 // FIXME: Should only worry about snans for version with chain. 4828 SDValue SITargetLowering::performFMed3Combine(SDNode *N, 4829 DAGCombinerInfo &DCI) const { 4830 EVT VT = N->getValueType(0); 4831 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and 4832 // NaNs. With a NaN input, the order of the operands may change the result. 4833 4834 SelectionDAG &DAG = DCI.DAG; 4835 SDLoc SL(N); 4836 4837 SDValue Src0 = N->getOperand(0); 4838 SDValue Src1 = N->getOperand(1); 4839 SDValue Src2 = N->getOperand(2); 4840 4841 if (isClampZeroToOne(Src0, Src1)) { 4842 // const_a, const_b, x -> clamp is safe in all cases including signaling 4843 // nans. 4844 // FIXME: Should this be allowing -0.0? 4845 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2); 4846 } 4847 4848 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother 4849 // handling no dx10-clamp? 4850 if (Subtarget->enableDX10Clamp()) { 4851 // If NaNs is clamped to 0, we are free to reorder the inputs. 4852 4853 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 4854 std::swap(Src0, Src1); 4855 4856 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2)) 4857 std::swap(Src1, Src2); 4858 4859 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 4860 std::swap(Src0, Src1); 4861 4862 if (isClampZeroToOne(Src1, Src2)) 4863 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0); 4864 } 4865 4866 return SDValue(); 4867 } 4868 4869 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N, 4870 DAGCombinerInfo &DCI) const { 4871 SDValue Src0 = N->getOperand(0); 4872 SDValue Src1 = N->getOperand(1); 4873 if (Src0.isUndef() && Src1.isUndef()) 4874 return DCI.DAG.getUNDEF(N->getValueType(0)); 4875 return SDValue(); 4876 } 4877 4878 SDValue SITargetLowering::performExtractVectorEltCombine( 4879 SDNode *N, DAGCombinerInfo &DCI) const { 4880 SDValue Vec = N->getOperand(0); 4881 4882 SelectionDAG &DAG= DCI.DAG; 4883 if (Vec.getOpcode() == ISD::FNEG && allUsesHaveSourceMods(N)) { 4884 SDLoc SL(N); 4885 EVT EltVT = N->getValueType(0); 4886 SDValue Idx = N->getOperand(1); 4887 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 4888 Vec.getOperand(0), Idx); 4889 return DAG.getNode(ISD::FNEG, SL, EltVT, Elt); 4890 } 4891 4892 return SDValue(); 4893 } 4894 4895 4896 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, 4897 const SDNode *N0, 4898 const SDNode *N1) const { 4899 EVT VT = N0->getValueType(0); 4900 4901 // Only do this if we are not trying to support denormals. v_mad_f32 does not 4902 // support denormals ever. 4903 if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) || 4904 (VT == MVT::f16 && !Subtarget->hasFP16Denormals())) 4905 return ISD::FMAD; 4906 4907 const TargetOptions &Options = DAG.getTarget().Options; 4908 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || 4909 (N0->getFlags().hasUnsafeAlgebra() && 4910 N1->getFlags().hasUnsafeAlgebra())) && 4911 isFMAFasterThanFMulAndFAdd(VT)) { 4912 return ISD::FMA; 4913 } 4914 4915 return 0; 4916 } 4917 4918 SDValue SITargetLowering::performAddCombine(SDNode *N, 4919 DAGCombinerInfo &DCI) const { 4920 SelectionDAG &DAG = DCI.DAG; 4921 EVT VT = N->getValueType(0); 4922 4923 if (VT != MVT::i32) 4924 return SDValue(); 4925 4926 SDLoc SL(N); 4927 SDValue LHS = N->getOperand(0); 4928 SDValue RHS = N->getOperand(1); 4929 4930 // add x, zext (setcc) => addcarry x, 0, setcc 4931 // add x, sext (setcc) => subcarry x, 0, setcc 4932 unsigned Opc = LHS.getOpcode(); 4933 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND || 4934 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY) 4935 std::swap(RHS, LHS); 4936 4937 Opc = RHS.getOpcode(); 4938 switch (Opc) { 4939 default: break; 4940 case ISD::ZERO_EXTEND: 4941 case ISD::SIGN_EXTEND: 4942 case ISD::ANY_EXTEND: { 4943 auto Cond = RHS.getOperand(0); 4944 if (Cond.getOpcode() != ISD::SETCC && 4945 Cond.getOpcode() != AMDGPUISD::FP_CLASS) 4946 break; 4947 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); 4948 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; 4949 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY; 4950 return DAG.getNode(Opc, SL, VTList, Args); 4951 } 4952 case ISD::ADDCARRY: { 4953 // add x, (addcarry y, 0, cc) => addcarry x, y, cc 4954 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 4955 if (!C || C->getZExtValue() != 0) break; 4956 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) }; 4957 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args); 4958 } 4959 } 4960 return SDValue(); 4961 } 4962 4963 SDValue SITargetLowering::performSubCombine(SDNode *N, 4964 DAGCombinerInfo &DCI) const { 4965 SelectionDAG &DAG = DCI.DAG; 4966 EVT VT = N->getValueType(0); 4967 4968 if (VT != MVT::i32) 4969 return SDValue(); 4970 4971 SDLoc SL(N); 4972 SDValue LHS = N->getOperand(0); 4973 SDValue RHS = N->getOperand(1); 4974 4975 unsigned Opc = LHS.getOpcode(); 4976 if (Opc != ISD::SUBCARRY) 4977 std::swap(RHS, LHS); 4978 4979 if (LHS.getOpcode() == ISD::SUBCARRY) { 4980 // sub (subcarry x, 0, cc), y => subcarry x, y, cc 4981 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 4982 if (!C || C->getZExtValue() != 0) 4983 return SDValue(); 4984 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) }; 4985 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args); 4986 } 4987 return SDValue(); 4988 } 4989 4990 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N, 4991 DAGCombinerInfo &DCI) const { 4992 4993 if (N->getValueType(0) != MVT::i32) 4994 return SDValue(); 4995 4996 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 4997 if (!C || C->getZExtValue() != 0) 4998 return SDValue(); 4999 5000 SelectionDAG &DAG = DCI.DAG; 5001 SDValue LHS = N->getOperand(0); 5002 5003 // addcarry (add x, y), 0, cc => addcarry x, y, cc 5004 // subcarry (sub x, y), 0, cc => subcarry x, y, cc 5005 unsigned LHSOpc = LHS.getOpcode(); 5006 unsigned Opc = N->getOpcode(); 5007 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) || 5008 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) { 5009 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) }; 5010 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args); 5011 } 5012 return SDValue(); 5013 } 5014 5015 SDValue SITargetLowering::performFAddCombine(SDNode *N, 5016 DAGCombinerInfo &DCI) const { 5017 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 5018 return SDValue(); 5019 5020 SelectionDAG &DAG = DCI.DAG; 5021 EVT VT = N->getValueType(0); 5022 5023 SDLoc SL(N); 5024 SDValue LHS = N->getOperand(0); 5025 SDValue RHS = N->getOperand(1); 5026 5027 // These should really be instruction patterns, but writing patterns with 5028 // source modiifiers is a pain. 5029 5030 // fadd (fadd (a, a), b) -> mad 2.0, a, b 5031 if (LHS.getOpcode() == ISD::FADD) { 5032 SDValue A = LHS.getOperand(0); 5033 if (A == LHS.getOperand(1)) { 5034 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 5035 if (FusedOp != 0) { 5036 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 5037 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); 5038 } 5039 } 5040 } 5041 5042 // fadd (b, fadd (a, a)) -> mad 2.0, a, b 5043 if (RHS.getOpcode() == ISD::FADD) { 5044 SDValue A = RHS.getOperand(0); 5045 if (A == RHS.getOperand(1)) { 5046 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 5047 if (FusedOp != 0) { 5048 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 5049 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); 5050 } 5051 } 5052 } 5053 5054 return SDValue(); 5055 } 5056 5057 SDValue SITargetLowering::performFSubCombine(SDNode *N, 5058 DAGCombinerInfo &DCI) const { 5059 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 5060 return SDValue(); 5061 5062 SelectionDAG &DAG = DCI.DAG; 5063 SDLoc SL(N); 5064 EVT VT = N->getValueType(0); 5065 assert(!VT.isVector()); 5066 5067 // Try to get the fneg to fold into the source modifier. This undoes generic 5068 // DAG combines and folds them into the mad. 5069 // 5070 // Only do this if we are not trying to support denormals. v_mad_f32 does 5071 // not support denormals ever. 5072 SDValue LHS = N->getOperand(0); 5073 SDValue RHS = N->getOperand(1); 5074 if (LHS.getOpcode() == ISD::FADD) { 5075 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) 5076 SDValue A = LHS.getOperand(0); 5077 if (A == LHS.getOperand(1)) { 5078 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 5079 if (FusedOp != 0){ 5080 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 5081 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 5082 5083 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); 5084 } 5085 } 5086 } 5087 5088 if (RHS.getOpcode() == ISD::FADD) { 5089 // (fsub c, (fadd a, a)) -> mad -2.0, a, c 5090 5091 SDValue A = RHS.getOperand(0); 5092 if (A == RHS.getOperand(1)) { 5093 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 5094 if (FusedOp != 0){ 5095 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); 5096 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); 5097 } 5098 } 5099 } 5100 5101 return SDValue(); 5102 } 5103 5104 SDValue SITargetLowering::performSetCCCombine(SDNode *N, 5105 DAGCombinerInfo &DCI) const { 5106 SelectionDAG &DAG = DCI.DAG; 5107 SDLoc SL(N); 5108 5109 SDValue LHS = N->getOperand(0); 5110 SDValue RHS = N->getOperand(1); 5111 EVT VT = LHS.getValueType(); 5112 5113 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && 5114 VT != MVT::f16)) 5115 return SDValue(); 5116 5117 // Match isinf pattern 5118 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) 5119 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 5120 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { 5121 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 5122 if (!CRHS) 5123 return SDValue(); 5124 5125 const APFloat &APF = CRHS->getValueAPF(); 5126 if (APF.isInfinity() && !APF.isNegative()) { 5127 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY; 5128 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), 5129 DAG.getConstant(Mask, SL, MVT::i32)); 5130 } 5131 } 5132 5133 return SDValue(); 5134 } 5135 5136 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, 5137 DAGCombinerInfo &DCI) const { 5138 SelectionDAG &DAG = DCI.DAG; 5139 SDLoc SL(N); 5140 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; 5141 5142 SDValue Src = N->getOperand(0); 5143 SDValue Srl = N->getOperand(0); 5144 if (Srl.getOpcode() == ISD::ZERO_EXTEND) 5145 Srl = Srl.getOperand(0); 5146 5147 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero. 5148 if (Srl.getOpcode() == ISD::SRL) { 5149 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x 5150 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x 5151 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x 5152 5153 if (const ConstantSDNode *C = 5154 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) { 5155 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)), 5156 EVT(MVT::i32)); 5157 5158 unsigned SrcOffset = C->getZExtValue() + 8 * Offset; 5159 if (SrcOffset < 32 && SrcOffset % 8 == 0) { 5160 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL, 5161 MVT::f32, Srl); 5162 } 5163 } 5164 } 5165 5166 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); 5167 5168 KnownBits Known; 5169 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 5170 !DCI.isBeforeLegalizeOps()); 5171 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5172 if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) || 5173 TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) { 5174 DCI.CommitTargetLoweringOpt(TLO); 5175 } 5176 5177 return SDValue(); 5178 } 5179 5180 SDValue SITargetLowering::PerformDAGCombine(SDNode *N, 5181 DAGCombinerInfo &DCI) const { 5182 switch (N->getOpcode()) { 5183 default: 5184 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 5185 case ISD::ADD: 5186 return performAddCombine(N, DCI); 5187 case ISD::SUB: 5188 return performSubCombine(N, DCI); 5189 case ISD::ADDCARRY: 5190 case ISD::SUBCARRY: 5191 return performAddCarrySubCarryCombine(N, DCI); 5192 case ISD::FADD: 5193 return performFAddCombine(N, DCI); 5194 case ISD::FSUB: 5195 return performFSubCombine(N, DCI); 5196 case ISD::SETCC: 5197 return performSetCCCombine(N, DCI); 5198 case ISD::FMAXNUM: 5199 case ISD::FMINNUM: 5200 case ISD::SMAX: 5201 case ISD::SMIN: 5202 case ISD::UMAX: 5203 case ISD::UMIN: 5204 case AMDGPUISD::FMIN_LEGACY: 5205 case AMDGPUISD::FMAX_LEGACY: { 5206 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG && 5207 getTargetMachine().getOptLevel() > CodeGenOpt::None) 5208 return performMinMaxCombine(N, DCI); 5209 break; 5210 } 5211 case ISD::LOAD: 5212 case ISD::STORE: 5213 case ISD::ATOMIC_LOAD: 5214 case ISD::ATOMIC_STORE: 5215 case ISD::ATOMIC_CMP_SWAP: 5216 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 5217 case ISD::ATOMIC_SWAP: 5218 case ISD::ATOMIC_LOAD_ADD: 5219 case ISD::ATOMIC_LOAD_SUB: 5220 case ISD::ATOMIC_LOAD_AND: 5221 case ISD::ATOMIC_LOAD_OR: 5222 case ISD::ATOMIC_LOAD_XOR: 5223 case ISD::ATOMIC_LOAD_NAND: 5224 case ISD::ATOMIC_LOAD_MIN: 5225 case ISD::ATOMIC_LOAD_MAX: 5226 case ISD::ATOMIC_LOAD_UMIN: 5227 case ISD::ATOMIC_LOAD_UMAX: 5228 case AMDGPUISD::ATOMIC_INC: 5229 case AMDGPUISD::ATOMIC_DEC: // TODO: Target mem intrinsics. 5230 if (DCI.isBeforeLegalize()) 5231 break; 5232 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI); 5233 case ISD::AND: 5234 return performAndCombine(N, DCI); 5235 case ISD::OR: 5236 return performOrCombine(N, DCI); 5237 case ISD::XOR: 5238 return performXorCombine(N, DCI); 5239 case ISD::ZERO_EXTEND: 5240 return performZeroExtendCombine(N, DCI); 5241 case AMDGPUISD::FP_CLASS: 5242 return performClassCombine(N, DCI); 5243 case ISD::FCANONICALIZE: 5244 return performFCanonicalizeCombine(N, DCI); 5245 case AMDGPUISD::FRACT: 5246 case AMDGPUISD::RCP: 5247 case AMDGPUISD::RSQ: 5248 case AMDGPUISD::RCP_LEGACY: 5249 case AMDGPUISD::RSQ_LEGACY: 5250 case AMDGPUISD::RSQ_CLAMP: 5251 case AMDGPUISD::LDEXP: { 5252 SDValue Src = N->getOperand(0); 5253 if (Src.isUndef()) 5254 return Src; 5255 break; 5256 } 5257 case ISD::SINT_TO_FP: 5258 case ISD::UINT_TO_FP: 5259 return performUCharToFloatCombine(N, DCI); 5260 case AMDGPUISD::CVT_F32_UBYTE0: 5261 case AMDGPUISD::CVT_F32_UBYTE1: 5262 case AMDGPUISD::CVT_F32_UBYTE2: 5263 case AMDGPUISD::CVT_F32_UBYTE3: 5264 return performCvtF32UByteNCombine(N, DCI); 5265 case AMDGPUISD::FMED3: 5266 return performFMed3Combine(N, DCI); 5267 case AMDGPUISD::CVT_PKRTZ_F16_F32: 5268 return performCvtPkRTZCombine(N, DCI); 5269 case ISD::SCALAR_TO_VECTOR: { 5270 SelectionDAG &DAG = DCI.DAG; 5271 EVT VT = N->getValueType(0); 5272 5273 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x)) 5274 if (VT == MVT::v2i16 || VT == MVT::v2f16) { 5275 SDLoc SL(N); 5276 SDValue Src = N->getOperand(0); 5277 EVT EltVT = Src.getValueType(); 5278 if (EltVT == MVT::f16) 5279 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src); 5280 5281 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src); 5282 return DAG.getNode(ISD::BITCAST, SL, VT, Ext); 5283 } 5284 5285 break; 5286 } 5287 case ISD::EXTRACT_VECTOR_ELT: 5288 return performExtractVectorEltCombine(N, DCI); 5289 } 5290 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 5291 } 5292 5293 /// \brief Helper function for adjustWritemask 5294 static unsigned SubIdx2Lane(unsigned Idx) { 5295 switch (Idx) { 5296 default: return 0; 5297 case AMDGPU::sub0: return 0; 5298 case AMDGPU::sub1: return 1; 5299 case AMDGPU::sub2: return 2; 5300 case AMDGPU::sub3: return 3; 5301 } 5302 } 5303 5304 /// \brief Adjust the writemask of MIMG instructions 5305 void SITargetLowering::adjustWritemask(MachineSDNode *&Node, 5306 SelectionDAG &DAG) const { 5307 SDNode *Users[4] = { }; 5308 unsigned Lane = 0; 5309 unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3; 5310 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); 5311 unsigned NewDmask = 0; 5312 5313 // Try to figure out the used register components 5314 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); 5315 I != E; ++I) { 5316 5317 // Don't look at users of the chain. 5318 if (I.getUse().getResNo() != 0) 5319 continue; 5320 5321 // Abort if we can't understand the usage 5322 if (!I->isMachineOpcode() || 5323 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) 5324 return; 5325 5326 // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used. 5327 // Note that subregs are packed, i.e. Lane==0 is the first bit set 5328 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 5329 // set, etc. 5330 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 5331 5332 // Set which texture component corresponds to the lane. 5333 unsigned Comp; 5334 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) { 5335 assert(Dmask); 5336 Comp = countTrailingZeros(Dmask); 5337 Dmask &= ~(1 << Comp); 5338 } 5339 5340 // Abort if we have more than one user per component 5341 if (Users[Lane]) 5342 return; 5343 5344 Users[Lane] = *I; 5345 NewDmask |= 1 << Comp; 5346 } 5347 5348 // Abort if there's no change 5349 if (NewDmask == OldDmask) 5350 return; 5351 5352 // Adjust the writemask in the node 5353 std::vector<SDValue> Ops; 5354 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); 5355 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); 5356 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); 5357 Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops); 5358 5359 // If we only got one lane, replace it with a copy 5360 // (if NewDmask has only one bit set...) 5361 if (NewDmask && (NewDmask & (NewDmask-1)) == 0) { 5362 SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(), 5363 MVT::i32); 5364 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 5365 SDLoc(), Users[Lane]->getValueType(0), 5366 SDValue(Node, 0), RC); 5367 DAG.ReplaceAllUsesWith(Users[Lane], Copy); 5368 return; 5369 } 5370 5371 // Update the users of the node with the new indices 5372 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { 5373 SDNode *User = Users[i]; 5374 if (!User) 5375 continue; 5376 5377 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); 5378 DAG.UpdateNodeOperands(User, User->getOperand(0), Op); 5379 5380 switch (Idx) { 5381 default: break; 5382 case AMDGPU::sub0: Idx = AMDGPU::sub1; break; 5383 case AMDGPU::sub1: Idx = AMDGPU::sub2; break; 5384 case AMDGPU::sub2: Idx = AMDGPU::sub3; break; 5385 } 5386 } 5387 } 5388 5389 static bool isFrameIndexOp(SDValue Op) { 5390 if (Op.getOpcode() == ISD::AssertZext) 5391 Op = Op.getOperand(0); 5392 5393 return isa<FrameIndexSDNode>(Op); 5394 } 5395 5396 /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) 5397 /// with frame index operands. 5398 /// LLVM assumes that inputs are to these instructions are registers. 5399 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, 5400 SelectionDAG &DAG) const { 5401 if (Node->getOpcode() == ISD::CopyToReg) { 5402 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1)); 5403 SDValue SrcVal = Node->getOperand(2); 5404 5405 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have 5406 // to try understanding copies to physical registers. 5407 if (SrcVal.getValueType() == MVT::i1 && 5408 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) { 5409 SDLoc SL(Node); 5410 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 5411 SDValue VReg = DAG.getRegister( 5412 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1); 5413 5414 SDNode *Glued = Node->getGluedNode(); 5415 SDValue ToVReg 5416 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal, 5417 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0)); 5418 SDValue ToResultReg 5419 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0), 5420 VReg, ToVReg.getValue(1)); 5421 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode()); 5422 DAG.RemoveDeadNode(Node); 5423 return ToResultReg.getNode(); 5424 } 5425 } 5426 5427 SmallVector<SDValue, 8> Ops; 5428 for (unsigned i = 0; i < Node->getNumOperands(); ++i) { 5429 if (!isFrameIndexOp(Node->getOperand(i))) { 5430 Ops.push_back(Node->getOperand(i)); 5431 continue; 5432 } 5433 5434 SDLoc DL(Node); 5435 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, 5436 Node->getOperand(i).getValueType(), 5437 Node->getOperand(i)), 0)); 5438 } 5439 5440 DAG.UpdateNodeOperands(Node, Ops); 5441 return Node; 5442 } 5443 5444 /// \brief Fold the instructions after selecting them. 5445 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, 5446 SelectionDAG &DAG) const { 5447 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 5448 unsigned Opcode = Node->getMachineOpcode(); 5449 5450 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && 5451 !TII->isGather4(Opcode)) 5452 adjustWritemask(Node, DAG); 5453 5454 if (Opcode == AMDGPU::INSERT_SUBREG || 5455 Opcode == AMDGPU::REG_SEQUENCE) { 5456 legalizeTargetIndependentNode(Node, DAG); 5457 return Node; 5458 } 5459 return Node; 5460 } 5461 5462 /// \brief Assign the register class depending on the number of 5463 /// bits set in the writemask 5464 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 5465 SDNode *Node) const { 5466 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 5467 5468 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 5469 5470 if (TII->isVOP3(MI.getOpcode())) { 5471 // Make sure constant bus requirements are respected. 5472 TII->legalizeOperandsVOP3(MRI, MI); 5473 return; 5474 } 5475 5476 if (TII->isMIMG(MI)) { 5477 unsigned VReg = MI.getOperand(0).getReg(); 5478 const TargetRegisterClass *RC = MRI.getRegClass(VReg); 5479 // TODO: Need mapping tables to handle other cases (register classes). 5480 if (RC != &AMDGPU::VReg_128RegClass) 5481 return; 5482 5483 unsigned DmaskIdx = MI.getNumOperands() == 12 ? 3 : 4; 5484 unsigned Writemask = MI.getOperand(DmaskIdx).getImm(); 5485 unsigned BitsSet = 0; 5486 for (unsigned i = 0; i < 4; ++i) 5487 BitsSet += Writemask & (1 << i) ? 1 : 0; 5488 switch (BitsSet) { 5489 default: return; 5490 case 1: RC = &AMDGPU::VGPR_32RegClass; break; 5491 case 2: RC = &AMDGPU::VReg_64RegClass; break; 5492 case 3: RC = &AMDGPU::VReg_96RegClass; break; 5493 } 5494 5495 unsigned NewOpcode = TII->getMaskedMIMGOp(MI.getOpcode(), BitsSet); 5496 MI.setDesc(TII->get(NewOpcode)); 5497 MRI.setRegClass(VReg, RC); 5498 return; 5499 } 5500 5501 // Replace unused atomics with the no return version. 5502 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); 5503 if (NoRetAtomicOp != -1) { 5504 if (!Node->hasAnyUseOfValue(0)) { 5505 MI.setDesc(TII->get(NoRetAtomicOp)); 5506 MI.RemoveOperand(0); 5507 return; 5508 } 5509 5510 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg 5511 // instruction, because the return type of these instructions is a vec2 of 5512 // the memory type, so it can be tied to the input operand. 5513 // This means these instructions always have a use, so we need to add a 5514 // special case to check if the atomic has only one extract_subreg use, 5515 // which itself has no uses. 5516 if ((Node->hasNUsesOfValue(1, 0) && 5517 Node->use_begin()->isMachineOpcode() && 5518 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && 5519 !Node->use_begin()->hasAnyUseOfValue(0))) { 5520 unsigned Def = MI.getOperand(0).getReg(); 5521 5522 // Change this into a noret atomic. 5523 MI.setDesc(TII->get(NoRetAtomicOp)); 5524 MI.RemoveOperand(0); 5525 5526 // If we only remove the def operand from the atomic instruction, the 5527 // extract_subreg will be left with a use of a vreg without a def. 5528 // So we need to insert an implicit_def to avoid machine verifier 5529 // errors. 5530 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), 5531 TII->get(AMDGPU::IMPLICIT_DEF), Def); 5532 } 5533 return; 5534 } 5535 } 5536 5537 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, 5538 uint64_t Val) { 5539 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); 5540 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); 5541 } 5542 5543 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, 5544 const SDLoc &DL, 5545 SDValue Ptr) const { 5546 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 5547 5548 // Build the half of the subregister with the constants before building the 5549 // full 128-bit register. If we are building multiple resource descriptors, 5550 // this will allow CSEing of the 2-component register. 5551 const SDValue Ops0[] = { 5552 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), 5553 buildSMovImm32(DAG, DL, 0), 5554 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 5555 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), 5556 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) 5557 }; 5558 5559 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, 5560 MVT::v2i32, Ops0), 0); 5561 5562 // Combine the constants and the pointer. 5563 const SDValue Ops1[] = { 5564 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 5565 Ptr, 5566 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), 5567 SubRegHi, 5568 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) 5569 }; 5570 5571 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); 5572 } 5573 5574 /// \brief Return a resource descriptor with the 'Add TID' bit enabled 5575 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] 5576 /// of the resource descriptor) to create an offset, which is added to 5577 /// the resource pointer. 5578 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, 5579 SDValue Ptr, uint32_t RsrcDword1, 5580 uint64_t RsrcDword2And3) const { 5581 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); 5582 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); 5583 if (RsrcDword1) { 5584 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, 5585 DAG.getConstant(RsrcDword1, DL, MVT::i32)), 5586 0); 5587 } 5588 5589 SDValue DataLo = buildSMovImm32(DAG, DL, 5590 RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); 5591 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); 5592 5593 const SDValue Ops[] = { 5594 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 5595 PtrLo, 5596 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 5597 PtrHi, 5598 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), 5599 DataLo, 5600 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), 5601 DataHi, 5602 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) 5603 }; 5604 5605 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); 5606 } 5607 5608 //===----------------------------------------------------------------------===// 5609 // SI Inline Assembly Support 5610 //===----------------------------------------------------------------------===// 5611 5612 std::pair<unsigned, const TargetRegisterClass *> 5613 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 5614 StringRef Constraint, 5615 MVT VT) const { 5616 if (!isTypeLegal(VT)) 5617 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 5618 5619 if (Constraint.size() == 1) { 5620 switch (Constraint[0]) { 5621 case 's': 5622 case 'r': 5623 switch (VT.getSizeInBits()) { 5624 default: 5625 return std::make_pair(0U, nullptr); 5626 case 32: 5627 case 16: 5628 return std::make_pair(0U, &AMDGPU::SReg_32_XM0RegClass); 5629 case 64: 5630 return std::make_pair(0U, &AMDGPU::SGPR_64RegClass); 5631 case 128: 5632 return std::make_pair(0U, &AMDGPU::SReg_128RegClass); 5633 case 256: 5634 return std::make_pair(0U, &AMDGPU::SReg_256RegClass); 5635 case 512: 5636 return std::make_pair(0U, &AMDGPU::SReg_512RegClass); 5637 } 5638 5639 case 'v': 5640 switch (VT.getSizeInBits()) { 5641 default: 5642 return std::make_pair(0U, nullptr); 5643 case 32: 5644 case 16: 5645 return std::make_pair(0U, &AMDGPU::VGPR_32RegClass); 5646 case 64: 5647 return std::make_pair(0U, &AMDGPU::VReg_64RegClass); 5648 case 96: 5649 return std::make_pair(0U, &AMDGPU::VReg_96RegClass); 5650 case 128: 5651 return std::make_pair(0U, &AMDGPU::VReg_128RegClass); 5652 case 256: 5653 return std::make_pair(0U, &AMDGPU::VReg_256RegClass); 5654 case 512: 5655 return std::make_pair(0U, &AMDGPU::VReg_512RegClass); 5656 } 5657 } 5658 } 5659 5660 if (Constraint.size() > 1) { 5661 const TargetRegisterClass *RC = nullptr; 5662 if (Constraint[1] == 'v') { 5663 RC = &AMDGPU::VGPR_32RegClass; 5664 } else if (Constraint[1] == 's') { 5665 RC = &AMDGPU::SGPR_32RegClass; 5666 } 5667 5668 if (RC) { 5669 uint32_t Idx; 5670 bool Failed = Constraint.substr(2).getAsInteger(10, Idx); 5671 if (!Failed && Idx < RC->getNumRegs()) 5672 return std::make_pair(RC->getRegister(Idx), RC); 5673 } 5674 } 5675 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 5676 } 5677 5678 SITargetLowering::ConstraintType 5679 SITargetLowering::getConstraintType(StringRef Constraint) const { 5680 if (Constraint.size() == 1) { 5681 switch (Constraint[0]) { 5682 default: break; 5683 case 's': 5684 case 'v': 5685 return C_RegisterClass; 5686 } 5687 } 5688 return TargetLowering::getConstraintType(Constraint); 5689 } 5690