1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief Custom DAG lowering for SI 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifdef _MSC_VER 16 // Provide M_PI. 17 #define _USE_MATH_DEFINES 18 #endif 19 20 #include "SIISelLowering.h" 21 #include "AMDGPU.h" 22 #include "AMDGPUIntrinsicInfo.h" 23 #include "AMDGPUSubtarget.h" 24 #include "AMDGPUTargetMachine.h" 25 #include "SIDefines.h" 26 #include "SIInstrInfo.h" 27 #include "SIMachineFunctionInfo.h" 28 #include "SIRegisterInfo.h" 29 #include "Utils/AMDGPUBaseInfo.h" 30 #include "llvm/ADT/APFloat.h" 31 #include "llvm/ADT/APInt.h" 32 #include "llvm/ADT/ArrayRef.h" 33 #include "llvm/ADT/BitVector.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/StringSwitch.h" 38 #include "llvm/ADT/Twine.h" 39 #include "llvm/CodeGen/Analysis.h" 40 #include "llvm/CodeGen/CallingConvLower.h" 41 #include "llvm/CodeGen/DAGCombine.h" 42 #include "llvm/CodeGen/ISDOpcodes.h" 43 #include "llvm/CodeGen/MachineBasicBlock.h" 44 #include "llvm/CodeGen/MachineFrameInfo.h" 45 #include "llvm/CodeGen/MachineFunction.h" 46 #include "llvm/CodeGen/MachineInstr.h" 47 #include "llvm/CodeGen/MachineInstrBuilder.h" 48 #include "llvm/CodeGen/MachineMemOperand.h" 49 #include "llvm/CodeGen/MachineModuleInfo.h" 50 #include "llvm/CodeGen/MachineOperand.h" 51 #include "llvm/CodeGen/MachineRegisterInfo.h" 52 #include "llvm/CodeGen/MachineValueType.h" 53 #include "llvm/CodeGen/SelectionDAG.h" 54 #include "llvm/CodeGen/SelectionDAGNodes.h" 55 #include "llvm/CodeGen/TargetCallingConv.h" 56 #include "llvm/CodeGen/TargetRegisterInfo.h" 57 #include "llvm/CodeGen/ValueTypes.h" 58 #include "llvm/IR/Constants.h" 59 #include "llvm/IR/DataLayout.h" 60 #include "llvm/IR/DebugLoc.h" 61 #include "llvm/IR/DerivedTypes.h" 62 #include "llvm/IR/DiagnosticInfo.h" 63 #include "llvm/IR/Function.h" 64 #include "llvm/IR/GlobalValue.h" 65 #include "llvm/IR/InstrTypes.h" 66 #include "llvm/IR/Instruction.h" 67 #include "llvm/IR/Instructions.h" 68 #include "llvm/IR/IntrinsicInst.h" 69 #include "llvm/IR/Type.h" 70 #include "llvm/Support/Casting.h" 71 #include "llvm/Support/CodeGen.h" 72 #include "llvm/Support/CommandLine.h" 73 #include "llvm/Support/Compiler.h" 74 #include "llvm/Support/ErrorHandling.h" 75 #include "llvm/Support/KnownBits.h" 76 #include "llvm/Support/MathExtras.h" 77 #include "llvm/Target/TargetOptions.h" 78 #include <cassert> 79 #include <cmath> 80 #include <cstdint> 81 #include <iterator> 82 #include <tuple> 83 #include <utility> 84 #include <vector> 85 86 using namespace llvm; 87 88 #define DEBUG_TYPE "si-lower" 89 90 STATISTIC(NumTailCalls, "Number of tail calls"); 91 92 static cl::opt<bool> EnableVGPRIndexMode( 93 "amdgpu-vgpr-index-mode", 94 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), 95 cl::init(false)); 96 97 static cl::opt<unsigned> AssumeFrameIndexHighZeroBits( 98 "amdgpu-frame-index-zero-bits", 99 cl::desc("High bits of frame index assumed to be zero"), 100 cl::init(5), 101 cl::ReallyHidden); 102 103 static unsigned findFirstFreeSGPR(CCState &CCInfo) { 104 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); 105 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { 106 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { 107 return AMDGPU::SGPR0 + Reg; 108 } 109 } 110 llvm_unreachable("Cannot allocate sgpr"); 111 } 112 113 SITargetLowering::SITargetLowering(const TargetMachine &TM, 114 const SISubtarget &STI) 115 : AMDGPUTargetLowering(TM, STI) { 116 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); 117 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); 118 119 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass); 120 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); 121 122 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); 123 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); 124 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); 125 126 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); 127 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); 128 129 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); 130 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); 131 132 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); 133 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); 134 135 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); 136 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); 137 138 if (Subtarget->has16BitInsts()) { 139 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass); 140 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass); 141 } 142 143 if (Subtarget->hasVOP3PInsts()) { 144 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass); 145 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass); 146 } 147 148 computeRegisterProperties(STI.getRegisterInfo()); 149 150 // We need to custom lower vector stores from local memory 151 setOperationAction(ISD::LOAD, MVT::v2i32, Custom); 152 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 153 setOperationAction(ISD::LOAD, MVT::v8i32, Custom); 154 setOperationAction(ISD::LOAD, MVT::v16i32, Custom); 155 setOperationAction(ISD::LOAD, MVT::i1, Custom); 156 157 setOperationAction(ISD::STORE, MVT::v2i32, Custom); 158 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 159 setOperationAction(ISD::STORE, MVT::v8i32, Custom); 160 setOperationAction(ISD::STORE, MVT::v16i32, Custom); 161 setOperationAction(ISD::STORE, MVT::i1, Custom); 162 163 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); 164 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); 165 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); 166 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); 167 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); 168 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); 169 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); 170 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); 171 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); 172 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); 173 174 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 175 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 176 setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand); 177 178 setOperationAction(ISD::SELECT, MVT::i1, Promote); 179 setOperationAction(ISD::SELECT, MVT::i64, Custom); 180 setOperationAction(ISD::SELECT, MVT::f64, Promote); 181 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); 182 183 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 184 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 185 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 186 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 187 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); 188 189 setOperationAction(ISD::SETCC, MVT::i1, Promote); 190 setOperationAction(ISD::SETCC, MVT::v2i1, Expand); 191 setOperationAction(ISD::SETCC, MVT::v4i1, Expand); 192 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); 193 194 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); 195 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 196 197 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); 198 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); 199 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 200 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); 201 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 202 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); 203 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); 204 205 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 206 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); 207 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); 208 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom); 209 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom); 210 211 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 212 213 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 214 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); 215 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); 216 217 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 218 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 219 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 220 setOperationAction(ISD::BR_CC, MVT::i64, Expand); 221 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 222 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 223 224 setOperationAction(ISD::UADDO, MVT::i32, Legal); 225 setOperationAction(ISD::USUBO, MVT::i32, Legal); 226 227 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal); 228 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal); 229 230 #if 0 231 setOperationAction(ISD::ADDCARRY, MVT::i64, Legal); 232 setOperationAction(ISD::SUBCARRY, MVT::i64, Legal); 233 #endif 234 235 //setOperationAction(ISD::ADDC, MVT::i64, Expand); 236 //setOperationAction(ISD::SUBC, MVT::i64, Expand); 237 238 // We only support LOAD/STORE and vector manipulation ops for vectors 239 // with > 4 elements. 240 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, 241 MVT::v2i64, MVT::v2f64}) { 242 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 243 switch (Op) { 244 case ISD::LOAD: 245 case ISD::STORE: 246 case ISD::BUILD_VECTOR: 247 case ISD::BITCAST: 248 case ISD::EXTRACT_VECTOR_ELT: 249 case ISD::INSERT_VECTOR_ELT: 250 case ISD::INSERT_SUBVECTOR: 251 case ISD::EXTRACT_SUBVECTOR: 252 case ISD::SCALAR_TO_VECTOR: 253 break; 254 case ISD::CONCAT_VECTORS: 255 setOperationAction(Op, VT, Custom); 256 break; 257 default: 258 setOperationAction(Op, VT, Expand); 259 break; 260 } 261 } 262 } 263 264 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that 265 // is expanded to avoid having two separate loops in case the index is a VGPR. 266 267 // Most operations are naturally 32-bit vector operations. We only support 268 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. 269 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { 270 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 271 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); 272 273 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 274 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); 275 276 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 277 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); 278 279 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 280 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); 281 } 282 283 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); 284 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); 285 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); 286 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); 287 288 // Avoid stack access for these. 289 // TODO: Generalize to more vector types. 290 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); 291 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); 292 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 293 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 294 295 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, 296 // and output demarshalling 297 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 298 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 299 300 // We can't return success/failure, only the old value, 301 // let LLVM add the comparison 302 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); 303 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); 304 305 if (getSubtarget()->hasFlatAddressSpace()) { 306 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); 307 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); 308 } 309 310 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 311 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 312 313 // On SI this is s_memtime and s_memrealtime on VI. 314 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 315 setOperationAction(ISD::TRAP, MVT::Other, Custom); 316 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom); 317 318 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 319 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 320 321 if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) { 322 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 323 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 324 setOperationAction(ISD::FRINT, MVT::f64, Legal); 325 } 326 327 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 328 329 setOperationAction(ISD::FSIN, MVT::f32, Custom); 330 setOperationAction(ISD::FCOS, MVT::f32, Custom); 331 setOperationAction(ISD::FDIV, MVT::f32, Custom); 332 setOperationAction(ISD::FDIV, MVT::f64, Custom); 333 334 if (Subtarget->has16BitInsts()) { 335 setOperationAction(ISD::Constant, MVT::i16, Legal); 336 337 setOperationAction(ISD::SMIN, MVT::i16, Legal); 338 setOperationAction(ISD::SMAX, MVT::i16, Legal); 339 340 setOperationAction(ISD::UMIN, MVT::i16, Legal); 341 setOperationAction(ISD::UMAX, MVT::i16, Legal); 342 343 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); 344 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); 345 346 setOperationAction(ISD::ROTR, MVT::i16, Promote); 347 setOperationAction(ISD::ROTL, MVT::i16, Promote); 348 349 setOperationAction(ISD::SDIV, MVT::i16, Promote); 350 setOperationAction(ISD::UDIV, MVT::i16, Promote); 351 setOperationAction(ISD::SREM, MVT::i16, Promote); 352 setOperationAction(ISD::UREM, MVT::i16, Promote); 353 354 setOperationAction(ISD::BSWAP, MVT::i16, Promote); 355 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); 356 357 setOperationAction(ISD::CTTZ, MVT::i16, Promote); 358 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); 359 setOperationAction(ISD::CTLZ, MVT::i16, Promote); 360 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); 361 362 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); 363 364 setOperationAction(ISD::BR_CC, MVT::i16, Expand); 365 366 setOperationAction(ISD::LOAD, MVT::i16, Custom); 367 368 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 369 370 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); 371 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); 372 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); 373 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); 374 375 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); 376 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); 377 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); 378 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); 379 380 // F16 - Constant Actions. 381 setOperationAction(ISD::ConstantFP, MVT::f16, Legal); 382 383 // F16 - Load/Store Actions. 384 setOperationAction(ISD::LOAD, MVT::f16, Promote); 385 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); 386 setOperationAction(ISD::STORE, MVT::f16, Promote); 387 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); 388 389 // F16 - VOP1 Actions. 390 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); 391 setOperationAction(ISD::FCOS, MVT::f16, Promote); 392 setOperationAction(ISD::FSIN, MVT::f16, Promote); 393 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); 394 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); 395 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); 396 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); 397 setOperationAction(ISD::FROUND, MVT::f16, Custom); 398 399 // F16 - VOP2 Actions. 400 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 401 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 402 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 403 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 404 setOperationAction(ISD::FDIV, MVT::f16, Custom); 405 406 // F16 - VOP3 Actions. 407 setOperationAction(ISD::FMA, MVT::f16, Legal); 408 if (!Subtarget->hasFP16Denormals()) 409 setOperationAction(ISD::FMAD, MVT::f16, Legal); 410 } 411 412 if (Subtarget->hasVOP3PInsts()) { 413 for (MVT VT : {MVT::v2i16, MVT::v2f16}) { 414 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 415 switch (Op) { 416 case ISD::LOAD: 417 case ISD::STORE: 418 case ISD::BUILD_VECTOR: 419 case ISD::BITCAST: 420 case ISD::EXTRACT_VECTOR_ELT: 421 case ISD::INSERT_VECTOR_ELT: 422 case ISD::INSERT_SUBVECTOR: 423 case ISD::EXTRACT_SUBVECTOR: 424 case ISD::SCALAR_TO_VECTOR: 425 break; 426 case ISD::CONCAT_VECTORS: 427 setOperationAction(Op, VT, Custom); 428 break; 429 default: 430 setOperationAction(Op, VT, Expand); 431 break; 432 } 433 } 434 } 435 436 // XXX - Do these do anything? Vector constants turn into build_vector. 437 setOperationAction(ISD::Constant, MVT::v2i16, Legal); 438 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal); 439 440 setOperationAction(ISD::STORE, MVT::v2i16, Promote); 441 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32); 442 setOperationAction(ISD::STORE, MVT::v2f16, Promote); 443 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32); 444 445 setOperationAction(ISD::LOAD, MVT::v2i16, Promote); 446 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32); 447 setOperationAction(ISD::LOAD, MVT::v2f16, Promote); 448 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32); 449 450 setOperationAction(ISD::AND, MVT::v2i16, Promote); 451 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32); 452 setOperationAction(ISD::OR, MVT::v2i16, Promote); 453 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32); 454 setOperationAction(ISD::XOR, MVT::v2i16, Promote); 455 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32); 456 setOperationAction(ISD::SELECT, MVT::v2i16, Promote); 457 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32); 458 setOperationAction(ISD::SELECT, MVT::v2f16, Promote); 459 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32); 460 461 setOperationAction(ISD::ADD, MVT::v2i16, Legal); 462 setOperationAction(ISD::SUB, MVT::v2i16, Legal); 463 setOperationAction(ISD::MUL, MVT::v2i16, Legal); 464 setOperationAction(ISD::SHL, MVT::v2i16, Legal); 465 setOperationAction(ISD::SRL, MVT::v2i16, Legal); 466 setOperationAction(ISD::SRA, MVT::v2i16, Legal); 467 setOperationAction(ISD::SMIN, MVT::v2i16, Legal); 468 setOperationAction(ISD::UMIN, MVT::v2i16, Legal); 469 setOperationAction(ISD::SMAX, MVT::v2i16, Legal); 470 setOperationAction(ISD::UMAX, MVT::v2i16, Legal); 471 472 setOperationAction(ISD::FADD, MVT::v2f16, Legal); 473 setOperationAction(ISD::FNEG, MVT::v2f16, Legal); 474 setOperationAction(ISD::FMUL, MVT::v2f16, Legal); 475 setOperationAction(ISD::FMA, MVT::v2f16, Legal); 476 setOperationAction(ISD::FMINNUM, MVT::v2f16, Legal); 477 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Legal); 478 479 // This isn't really legal, but this avoids the legalizer unrolling it (and 480 // allows matching fneg (fabs x) patterns) 481 setOperationAction(ISD::FABS, MVT::v2f16, Legal); 482 483 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 484 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 485 486 setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand); 487 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand); 488 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand); 489 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); 490 } else { 491 setOperationAction(ISD::SELECT, MVT::v2i16, Custom); 492 setOperationAction(ISD::SELECT, MVT::v2f16, Custom); 493 } 494 495 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) { 496 setOperationAction(ISD::SELECT, VT, Custom); 497 } 498 499 setTargetDAGCombine(ISD::ADD); 500 setTargetDAGCombine(ISD::ADDCARRY); 501 setTargetDAGCombine(ISD::SUB); 502 setTargetDAGCombine(ISD::SUBCARRY); 503 setTargetDAGCombine(ISD::FADD); 504 setTargetDAGCombine(ISD::FSUB); 505 setTargetDAGCombine(ISD::FMINNUM); 506 setTargetDAGCombine(ISD::FMAXNUM); 507 setTargetDAGCombine(ISD::SMIN); 508 setTargetDAGCombine(ISD::SMAX); 509 setTargetDAGCombine(ISD::UMIN); 510 setTargetDAGCombine(ISD::UMAX); 511 setTargetDAGCombine(ISD::SETCC); 512 setTargetDAGCombine(ISD::AND); 513 setTargetDAGCombine(ISD::OR); 514 setTargetDAGCombine(ISD::XOR); 515 setTargetDAGCombine(ISD::SINT_TO_FP); 516 setTargetDAGCombine(ISD::UINT_TO_FP); 517 setTargetDAGCombine(ISD::FCANONICALIZE); 518 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); 519 setTargetDAGCombine(ISD::ZERO_EXTEND); 520 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 521 setTargetDAGCombine(ISD::BUILD_VECTOR); 522 523 // All memory operations. Some folding on the pointer operand is done to help 524 // matching the constant offsets in the addressing modes. 525 setTargetDAGCombine(ISD::LOAD); 526 setTargetDAGCombine(ISD::STORE); 527 setTargetDAGCombine(ISD::ATOMIC_LOAD); 528 setTargetDAGCombine(ISD::ATOMIC_STORE); 529 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); 530 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 531 setTargetDAGCombine(ISD::ATOMIC_SWAP); 532 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); 533 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); 534 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); 535 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); 536 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); 537 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); 538 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); 539 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); 540 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); 541 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); 542 543 setSchedulingPreference(Sched::RegPressure); 544 } 545 546 const SISubtarget *SITargetLowering::getSubtarget() const { 547 return static_cast<const SISubtarget *>(Subtarget); 548 } 549 550 //===----------------------------------------------------------------------===// 551 // TargetLowering queries 552 //===----------------------------------------------------------------------===// 553 554 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const { 555 // SI has some legal vector types, but no legal vector operations. Say no 556 // shuffles are legal in order to prefer scalarizing some vector operations. 557 return false; 558 } 559 560 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 561 const CallInst &CI, 562 MachineFunction &MF, 563 unsigned IntrID) const { 564 switch (IntrID) { 565 case Intrinsic::amdgcn_atomic_inc: 566 case Intrinsic::amdgcn_atomic_dec: { 567 Info.opc = ISD::INTRINSIC_W_CHAIN; 568 Info.memVT = MVT::getVT(CI.getType()); 569 Info.ptrVal = CI.getOperand(0); 570 Info.align = 0; 571 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 572 573 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4)); 574 if (!Vol || !Vol->isZero()) 575 Info.flags |= MachineMemOperand::MOVolatile; 576 577 return true; 578 } 579 580 // Image load. 581 case Intrinsic::amdgcn_image_load: 582 case Intrinsic::amdgcn_image_load_mip: 583 584 // Sample. 585 case Intrinsic::amdgcn_image_sample: 586 case Intrinsic::amdgcn_image_sample_cl: 587 case Intrinsic::amdgcn_image_sample_d: 588 case Intrinsic::amdgcn_image_sample_d_cl: 589 case Intrinsic::amdgcn_image_sample_l: 590 case Intrinsic::amdgcn_image_sample_b: 591 case Intrinsic::amdgcn_image_sample_b_cl: 592 case Intrinsic::amdgcn_image_sample_lz: 593 case Intrinsic::amdgcn_image_sample_cd: 594 case Intrinsic::amdgcn_image_sample_cd_cl: 595 596 // Sample with comparison. 597 case Intrinsic::amdgcn_image_sample_c: 598 case Intrinsic::amdgcn_image_sample_c_cl: 599 case Intrinsic::amdgcn_image_sample_c_d: 600 case Intrinsic::amdgcn_image_sample_c_d_cl: 601 case Intrinsic::amdgcn_image_sample_c_l: 602 case Intrinsic::amdgcn_image_sample_c_b: 603 case Intrinsic::amdgcn_image_sample_c_b_cl: 604 case Intrinsic::amdgcn_image_sample_c_lz: 605 case Intrinsic::amdgcn_image_sample_c_cd: 606 case Intrinsic::amdgcn_image_sample_c_cd_cl: 607 608 // Sample with offsets. 609 case Intrinsic::amdgcn_image_sample_o: 610 case Intrinsic::amdgcn_image_sample_cl_o: 611 case Intrinsic::amdgcn_image_sample_d_o: 612 case Intrinsic::amdgcn_image_sample_d_cl_o: 613 case Intrinsic::amdgcn_image_sample_l_o: 614 case Intrinsic::amdgcn_image_sample_b_o: 615 case Intrinsic::amdgcn_image_sample_b_cl_o: 616 case Intrinsic::amdgcn_image_sample_lz_o: 617 case Intrinsic::amdgcn_image_sample_cd_o: 618 case Intrinsic::amdgcn_image_sample_cd_cl_o: 619 620 // Sample with comparison and offsets. 621 case Intrinsic::amdgcn_image_sample_c_o: 622 case Intrinsic::amdgcn_image_sample_c_cl_o: 623 case Intrinsic::amdgcn_image_sample_c_d_o: 624 case Intrinsic::amdgcn_image_sample_c_d_cl_o: 625 case Intrinsic::amdgcn_image_sample_c_l_o: 626 case Intrinsic::amdgcn_image_sample_c_b_o: 627 case Intrinsic::amdgcn_image_sample_c_b_cl_o: 628 case Intrinsic::amdgcn_image_sample_c_lz_o: 629 case Intrinsic::amdgcn_image_sample_c_cd_o: 630 case Intrinsic::amdgcn_image_sample_c_cd_cl_o: 631 632 // Basic gather4 633 case Intrinsic::amdgcn_image_gather4: 634 case Intrinsic::amdgcn_image_gather4_cl: 635 case Intrinsic::amdgcn_image_gather4_l: 636 case Intrinsic::amdgcn_image_gather4_b: 637 case Intrinsic::amdgcn_image_gather4_b_cl: 638 case Intrinsic::amdgcn_image_gather4_lz: 639 640 // Gather4 with comparison 641 case Intrinsic::amdgcn_image_gather4_c: 642 case Intrinsic::amdgcn_image_gather4_c_cl: 643 case Intrinsic::amdgcn_image_gather4_c_l: 644 case Intrinsic::amdgcn_image_gather4_c_b: 645 case Intrinsic::amdgcn_image_gather4_c_b_cl: 646 case Intrinsic::amdgcn_image_gather4_c_lz: 647 648 // Gather4 with offsets 649 case Intrinsic::amdgcn_image_gather4_o: 650 case Intrinsic::amdgcn_image_gather4_cl_o: 651 case Intrinsic::amdgcn_image_gather4_l_o: 652 case Intrinsic::amdgcn_image_gather4_b_o: 653 case Intrinsic::amdgcn_image_gather4_b_cl_o: 654 case Intrinsic::amdgcn_image_gather4_lz_o: 655 656 // Gather4 with comparison and offsets 657 case Intrinsic::amdgcn_image_gather4_c_o: 658 case Intrinsic::amdgcn_image_gather4_c_cl_o: 659 case Intrinsic::amdgcn_image_gather4_c_l_o: 660 case Intrinsic::amdgcn_image_gather4_c_b_o: 661 case Intrinsic::amdgcn_image_gather4_c_b_cl_o: 662 case Intrinsic::amdgcn_image_gather4_c_lz_o: { 663 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 664 Info.opc = ISD::INTRINSIC_W_CHAIN; 665 Info.memVT = MVT::getVT(CI.getType()); 666 Info.ptrVal = MFI->getImagePSV( 667 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 668 CI.getArgOperand(1)); 669 Info.align = 0; 670 Info.flags = MachineMemOperand::MOLoad | 671 MachineMemOperand::MODereferenceable; 672 return true; 673 } 674 case Intrinsic::amdgcn_image_store: 675 case Intrinsic::amdgcn_image_store_mip: { 676 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 677 Info.opc = ISD::INTRINSIC_VOID; 678 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType()); 679 Info.ptrVal = MFI->getImagePSV( 680 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 681 CI.getArgOperand(2)); 682 Info.flags = MachineMemOperand::MOStore | 683 MachineMemOperand::MODereferenceable; 684 Info.align = 0; 685 return true; 686 } 687 case Intrinsic::amdgcn_image_atomic_swap: 688 case Intrinsic::amdgcn_image_atomic_add: 689 case Intrinsic::amdgcn_image_atomic_sub: 690 case Intrinsic::amdgcn_image_atomic_smin: 691 case Intrinsic::amdgcn_image_atomic_umin: 692 case Intrinsic::amdgcn_image_atomic_smax: 693 case Intrinsic::amdgcn_image_atomic_umax: 694 case Intrinsic::amdgcn_image_atomic_and: 695 case Intrinsic::amdgcn_image_atomic_or: 696 case Intrinsic::amdgcn_image_atomic_xor: 697 case Intrinsic::amdgcn_image_atomic_inc: 698 case Intrinsic::amdgcn_image_atomic_dec: { 699 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 700 Info.opc = ISD::INTRINSIC_W_CHAIN; 701 Info.memVT = MVT::getVT(CI.getType()); 702 Info.ptrVal = MFI->getImagePSV( 703 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 704 CI.getArgOperand(2)); 705 706 Info.flags = MachineMemOperand::MOLoad | 707 MachineMemOperand::MOStore | 708 MachineMemOperand::MODereferenceable; 709 710 // XXX - Should this be volatile without known ordering? 711 Info.flags |= MachineMemOperand::MOVolatile; 712 return true; 713 } 714 case Intrinsic::amdgcn_image_atomic_cmpswap: { 715 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 716 Info.opc = ISD::INTRINSIC_W_CHAIN; 717 Info.memVT = MVT::getVT(CI.getType()); 718 Info.ptrVal = MFI->getImagePSV( 719 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 720 CI.getArgOperand(3)); 721 722 Info.flags = MachineMemOperand::MOLoad | 723 MachineMemOperand::MOStore | 724 MachineMemOperand::MODereferenceable; 725 726 // XXX - Should this be volatile without known ordering? 727 Info.flags |= MachineMemOperand::MOVolatile; 728 return true; 729 } 730 case Intrinsic::amdgcn_tbuffer_load: 731 case Intrinsic::amdgcn_buffer_load: 732 case Intrinsic::amdgcn_buffer_load_format: { 733 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 734 Info.opc = ISD::INTRINSIC_W_CHAIN; 735 Info.ptrVal = MFI->getBufferPSV( 736 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 737 CI.getArgOperand(0)); 738 Info.memVT = MVT::getVT(CI.getType()); 739 Info.flags = MachineMemOperand::MOLoad | 740 MachineMemOperand::MODereferenceable; 741 742 // There is a constant offset component, but there are additional register 743 // offsets which could break AA if we set the offset to anything non-0. 744 return true; 745 } 746 case Intrinsic::amdgcn_tbuffer_store: 747 case Intrinsic::amdgcn_buffer_store: 748 case Intrinsic::amdgcn_buffer_store_format: { 749 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 750 Info.opc = ISD::INTRINSIC_VOID; 751 Info.ptrVal = MFI->getBufferPSV( 752 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 753 CI.getArgOperand(1)); 754 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType()); 755 Info.flags = MachineMemOperand::MOStore | 756 MachineMemOperand::MODereferenceable; 757 return true; 758 } 759 case Intrinsic::amdgcn_buffer_atomic_swap: 760 case Intrinsic::amdgcn_buffer_atomic_add: 761 case Intrinsic::amdgcn_buffer_atomic_sub: 762 case Intrinsic::amdgcn_buffer_atomic_smin: 763 case Intrinsic::amdgcn_buffer_atomic_umin: 764 case Intrinsic::amdgcn_buffer_atomic_smax: 765 case Intrinsic::amdgcn_buffer_atomic_umax: 766 case Intrinsic::amdgcn_buffer_atomic_and: 767 case Intrinsic::amdgcn_buffer_atomic_or: 768 case Intrinsic::amdgcn_buffer_atomic_xor: { 769 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 770 Info.opc = ISD::INTRINSIC_W_CHAIN; 771 Info.ptrVal = MFI->getBufferPSV( 772 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 773 CI.getArgOperand(1)); 774 Info.memVT = MVT::getVT(CI.getType()); 775 Info.flags = MachineMemOperand::MOLoad | 776 MachineMemOperand::MOStore | 777 MachineMemOperand::MODereferenceable | 778 MachineMemOperand::MOVolatile; 779 return true; 780 } 781 case Intrinsic::amdgcn_buffer_atomic_cmpswap: { 782 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 783 Info.opc = ISD::INTRINSIC_W_CHAIN; 784 Info.ptrVal = MFI->getBufferPSV( 785 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 786 CI.getArgOperand(2)); 787 Info.memVT = MVT::getVT(CI.getType()); 788 Info.flags = MachineMemOperand::MOLoad | 789 MachineMemOperand::MOStore | 790 MachineMemOperand::MODereferenceable | 791 MachineMemOperand::MOVolatile; 792 return true; 793 } 794 default: 795 return false; 796 } 797 } 798 799 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II, 800 SmallVectorImpl<Value*> &Ops, 801 Type *&AccessTy) const { 802 switch (II->getIntrinsicID()) { 803 case Intrinsic::amdgcn_atomic_inc: 804 case Intrinsic::amdgcn_atomic_dec: { 805 Value *Ptr = II->getArgOperand(0); 806 AccessTy = II->getType(); 807 Ops.push_back(Ptr); 808 return true; 809 } 810 default: 811 return false; 812 } 813 } 814 815 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { 816 if (!Subtarget->hasFlatInstOffsets()) { 817 // Flat instructions do not have offsets, and only have the register 818 // address. 819 return AM.BaseOffs == 0 && AM.Scale == 0; 820 } 821 822 // GFX9 added a 13-bit signed offset. When using regular flat instructions, 823 // the sign bit is ignored and is treated as a 12-bit unsigned offset. 824 825 // Just r + i 826 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0; 827 } 828 829 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const { 830 if (Subtarget->hasFlatGlobalInsts()) 831 return isInt<13>(AM.BaseOffs) && AM.Scale == 0; 832 833 if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) { 834 // Assume the we will use FLAT for all global memory accesses 835 // on VI. 836 // FIXME: This assumption is currently wrong. On VI we still use 837 // MUBUF instructions for the r + i addressing mode. As currently 838 // implemented, the MUBUF instructions only work on buffer < 4GB. 839 // It may be possible to support > 4GB buffers with MUBUF instructions, 840 // by setting the stride value in the resource descriptor which would 841 // increase the size limit to (stride * 4GB). However, this is risky, 842 // because it has never been validated. 843 return isLegalFlatAddressingMode(AM); 844 } 845 846 return isLegalMUBUFAddressingMode(AM); 847 } 848 849 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { 850 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and 851 // additionally can do r + r + i with addr64. 32-bit has more addressing 852 // mode options. Depending on the resource constant, it can also do 853 // (i64 r0) + (i32 r1) * (i14 i). 854 // 855 // Private arrays end up using a scratch buffer most of the time, so also 856 // assume those use MUBUF instructions. Scratch loads / stores are currently 857 // implemented as mubuf instructions with offen bit set, so slightly 858 // different than the normal addr64. 859 if (!isUInt<12>(AM.BaseOffs)) 860 return false; 861 862 // FIXME: Since we can split immediate into soffset and immediate offset, 863 // would it make sense to allow any immediate? 864 865 switch (AM.Scale) { 866 case 0: // r + i or just i, depending on HasBaseReg. 867 return true; 868 case 1: 869 return true; // We have r + r or r + i. 870 case 2: 871 if (AM.HasBaseReg) { 872 // Reject 2 * r + r. 873 return false; 874 } 875 876 // Allow 2 * r as r + r 877 // Or 2 * r + i is allowed as r + r + i. 878 return true; 879 default: // Don't allow n * r 880 return false; 881 } 882 } 883 884 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, 885 const AddrMode &AM, Type *Ty, 886 unsigned AS, Instruction *I) const { 887 // No global is ever allowed as a base. 888 if (AM.BaseGV) 889 return false; 890 891 if (AS == AMDGPUASI.GLOBAL_ADDRESS) 892 return isLegalGlobalAddressingMode(AM); 893 894 if (AS == AMDGPUASI.CONSTANT_ADDRESS) { 895 // If the offset isn't a multiple of 4, it probably isn't going to be 896 // correctly aligned. 897 // FIXME: Can we get the real alignment here? 898 if (AM.BaseOffs % 4 != 0) 899 return isLegalMUBUFAddressingMode(AM); 900 901 // There are no SMRD extloads, so if we have to do a small type access we 902 // will use a MUBUF load. 903 // FIXME?: We also need to do this if unaligned, but we don't know the 904 // alignment here. 905 if (DL.getTypeStoreSize(Ty) < 4) 906 return isLegalGlobalAddressingMode(AM); 907 908 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 909 // SMRD instructions have an 8-bit, dword offset on SI. 910 if (!isUInt<8>(AM.BaseOffs / 4)) 911 return false; 912 } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) { 913 // On CI+, this can also be a 32-bit literal constant offset. If it fits 914 // in 8-bits, it can use a smaller encoding. 915 if (!isUInt<32>(AM.BaseOffs / 4)) 916 return false; 917 } else if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 918 // On VI, these use the SMEM format and the offset is 20-bit in bytes. 919 if (!isUInt<20>(AM.BaseOffs)) 920 return false; 921 } else 922 llvm_unreachable("unhandled generation"); 923 924 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 925 return true; 926 927 if (AM.Scale == 1 && AM.HasBaseReg) 928 return true; 929 930 return false; 931 932 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 933 return isLegalMUBUFAddressingMode(AM); 934 } else if (AS == AMDGPUASI.LOCAL_ADDRESS || 935 AS == AMDGPUASI.REGION_ADDRESS) { 936 // Basic, single offset DS instructions allow a 16-bit unsigned immediate 937 // field. 938 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have 939 // an 8-bit dword offset but we don't know the alignment here. 940 if (!isUInt<16>(AM.BaseOffs)) 941 return false; 942 943 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 944 return true; 945 946 if (AM.Scale == 1 && AM.HasBaseReg) 947 return true; 948 949 return false; 950 } else if (AS == AMDGPUASI.FLAT_ADDRESS || 951 AS == AMDGPUASI.UNKNOWN_ADDRESS_SPACE) { 952 // For an unknown address space, this usually means that this is for some 953 // reason being used for pure arithmetic, and not based on some addressing 954 // computation. We don't have instructions that compute pointers with any 955 // addressing modes, so treat them as having no offset like flat 956 // instructions. 957 return isLegalFlatAddressingMode(AM); 958 } else { 959 llvm_unreachable("unhandled address space"); 960 } 961 } 962 963 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, 964 const SelectionDAG &DAG) const { 965 if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) { 966 return (MemVT.getSizeInBits() <= 4 * 32); 967 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 968 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize(); 969 return (MemVT.getSizeInBits() <= MaxPrivateBits); 970 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 971 return (MemVT.getSizeInBits() <= 2 * 32); 972 } 973 return true; 974 } 975 976 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 977 unsigned AddrSpace, 978 unsigned Align, 979 bool *IsFast) const { 980 if (IsFast) 981 *IsFast = false; 982 983 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, 984 // which isn't a simple VT. 985 // Until MVT is extended to handle this, simply check for the size and 986 // rely on the condition below: allow accesses if the size is a multiple of 4. 987 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && 988 VT.getStoreSize() > 16)) { 989 return false; 990 } 991 992 if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS || 993 AddrSpace == AMDGPUASI.REGION_ADDRESS) { 994 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte 995 // aligned, 8 byte access in a single operation using ds_read2/write2_b32 996 // with adjacent offsets. 997 bool AlignedBy4 = (Align % 4 == 0); 998 if (IsFast) 999 *IsFast = AlignedBy4; 1000 1001 return AlignedBy4; 1002 } 1003 1004 // FIXME: We have to be conservative here and assume that flat operations 1005 // will access scratch. If we had access to the IR function, then we 1006 // could determine if any private memory was used in the function. 1007 if (!Subtarget->hasUnalignedScratchAccess() && 1008 (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS || 1009 AddrSpace == AMDGPUASI.FLAT_ADDRESS)) { 1010 return false; 1011 } 1012 1013 if (Subtarget->hasUnalignedBufferAccess()) { 1014 // If we have an uniform constant load, it still requires using a slow 1015 // buffer instruction if unaligned. 1016 if (IsFast) { 1017 *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS) ? 1018 (Align % 4 == 0) : true; 1019 } 1020 1021 return true; 1022 } 1023 1024 // Smaller than dword value must be aligned. 1025 if (VT.bitsLT(MVT::i32)) 1026 return false; 1027 1028 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the 1029 // byte-address are ignored, thus forcing Dword alignment. 1030 // This applies to private, global, and constant memory. 1031 if (IsFast) 1032 *IsFast = true; 1033 1034 return VT.bitsGT(MVT::i32) && Align % 4 == 0; 1035 } 1036 1037 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, 1038 unsigned SrcAlign, bool IsMemset, 1039 bool ZeroMemset, 1040 bool MemcpyStrSrc, 1041 MachineFunction &MF) const { 1042 // FIXME: Should account for address space here. 1043 1044 // The default fallback uses the private pointer size as a guess for a type to 1045 // use. Make sure we switch these to 64-bit accesses. 1046 1047 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global 1048 return MVT::v4i32; 1049 1050 if (Size >= 8 && DstAlign >= 4) 1051 return MVT::v2i32; 1052 1053 // Use the default. 1054 return MVT::Other; 1055 } 1056 1057 static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) { 1058 return AS == AMDGPUASI.GLOBAL_ADDRESS || 1059 AS == AMDGPUASI.FLAT_ADDRESS || 1060 AS == AMDGPUASI.CONSTANT_ADDRESS; 1061 } 1062 1063 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, 1064 unsigned DestAS) const { 1065 return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) && 1066 isFlatGlobalAddrSpace(DestAS, AMDGPUASI); 1067 } 1068 1069 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { 1070 const MemSDNode *MemNode = cast<MemSDNode>(N); 1071 const Value *Ptr = MemNode->getMemOperand()->getValue(); 1072 const Instruction *I = dyn_cast<Instruction>(Ptr); 1073 return I && I->getMetadata("amdgpu.noclobber"); 1074 } 1075 1076 bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS, 1077 unsigned DestAS) const { 1078 // Flat -> private/local is a simple truncate. 1079 // Flat -> global is no-op 1080 if (SrcAS == AMDGPUASI.FLAT_ADDRESS) 1081 return true; 1082 1083 return isNoopAddrSpaceCast(SrcAS, DestAS); 1084 } 1085 1086 bool SITargetLowering::isMemOpUniform(const SDNode *N) const { 1087 const MemSDNode *MemNode = cast<MemSDNode>(N); 1088 1089 return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand()); 1090 } 1091 1092 TargetLoweringBase::LegalizeTypeAction 1093 SITargetLowering::getPreferredVectorAction(EVT VT) const { 1094 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) 1095 return TypeSplitVector; 1096 1097 return TargetLoweringBase::getPreferredVectorAction(VT); 1098 } 1099 1100 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 1101 Type *Ty) const { 1102 // FIXME: Could be smarter if called for vector constants. 1103 return true; 1104 } 1105 1106 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { 1107 if (Subtarget->has16BitInsts() && VT == MVT::i16) { 1108 switch (Op) { 1109 case ISD::LOAD: 1110 case ISD::STORE: 1111 1112 // These operations are done with 32-bit instructions anyway. 1113 case ISD::AND: 1114 case ISD::OR: 1115 case ISD::XOR: 1116 case ISD::SELECT: 1117 // TODO: Extensions? 1118 return true; 1119 default: 1120 return false; 1121 } 1122 } 1123 1124 // SimplifySetCC uses this function to determine whether or not it should 1125 // create setcc with i1 operands. We don't have instructions for i1 setcc. 1126 if (VT == MVT::i1 && Op == ISD::SETCC) 1127 return false; 1128 1129 return TargetLowering::isTypeDesirableForOp(Op, VT); 1130 } 1131 1132 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG, 1133 const SDLoc &SL, 1134 SDValue Chain, 1135 uint64_t Offset) const { 1136 const DataLayout &DL = DAG.getDataLayout(); 1137 MachineFunction &MF = DAG.getMachineFunction(); 1138 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1139 1140 const ArgDescriptor *InputPtrReg; 1141 const TargetRegisterClass *RC; 1142 1143 std::tie(InputPtrReg, RC) 1144 = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 1145 1146 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1147 MVT PtrVT = getPointerTy(DL, AMDGPUASI.CONSTANT_ADDRESS); 1148 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, 1149 MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT); 1150 1151 return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, 1152 DAG.getConstant(Offset, SL, PtrVT)); 1153 } 1154 1155 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG, 1156 const SDLoc &SL) const { 1157 auto MFI = DAG.getMachineFunction().getInfo<SIMachineFunctionInfo>(); 1158 uint64_t Offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT); 1159 return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset); 1160 } 1161 1162 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, 1163 const SDLoc &SL, SDValue Val, 1164 bool Signed, 1165 const ISD::InputArg *Arg) const { 1166 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && 1167 VT.bitsLT(MemVT)) { 1168 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; 1169 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); 1170 } 1171 1172 if (MemVT.isFloatingPoint()) 1173 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT); 1174 else if (Signed) 1175 Val = DAG.getSExtOrTrunc(Val, SL, VT); 1176 else 1177 Val = DAG.getZExtOrTrunc(Val, SL, VT); 1178 1179 return Val; 1180 } 1181 1182 SDValue SITargetLowering::lowerKernargMemParameter( 1183 SelectionDAG &DAG, EVT VT, EVT MemVT, 1184 const SDLoc &SL, SDValue Chain, 1185 uint64_t Offset, bool Signed, 1186 const ISD::InputArg *Arg) const { 1187 const DataLayout &DL = DAG.getDataLayout(); 1188 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 1189 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS); 1190 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 1191 1192 unsigned Align = DL.getABITypeAlignment(Ty); 1193 1194 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset); 1195 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align, 1196 MachineMemOperand::MONonTemporal | 1197 MachineMemOperand::MODereferenceable | 1198 MachineMemOperand::MOInvariant); 1199 1200 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg); 1201 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); 1202 } 1203 1204 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA, 1205 const SDLoc &SL, SDValue Chain, 1206 const ISD::InputArg &Arg) const { 1207 MachineFunction &MF = DAG.getMachineFunction(); 1208 MachineFrameInfo &MFI = MF.getFrameInfo(); 1209 1210 if (Arg.Flags.isByVal()) { 1211 unsigned Size = Arg.Flags.getByValSize(); 1212 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false); 1213 return DAG.getFrameIndex(FrameIdx, MVT::i32); 1214 } 1215 1216 unsigned ArgOffset = VA.getLocMemOffset(); 1217 unsigned ArgSize = VA.getValVT().getStoreSize(); 1218 1219 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true); 1220 1221 // Create load nodes to retrieve arguments from the stack. 1222 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1223 SDValue ArgValue; 1224 1225 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) 1226 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 1227 MVT MemVT = VA.getValVT(); 1228 1229 switch (VA.getLocInfo()) { 1230 default: 1231 break; 1232 case CCValAssign::BCvt: 1233 MemVT = VA.getLocVT(); 1234 break; 1235 case CCValAssign::SExt: 1236 ExtType = ISD::SEXTLOAD; 1237 break; 1238 case CCValAssign::ZExt: 1239 ExtType = ISD::ZEXTLOAD; 1240 break; 1241 case CCValAssign::AExt: 1242 ExtType = ISD::EXTLOAD; 1243 break; 1244 } 1245 1246 ArgValue = DAG.getExtLoad( 1247 ExtType, SL, VA.getLocVT(), Chain, FIN, 1248 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), 1249 MemVT); 1250 return ArgValue; 1251 } 1252 1253 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG, 1254 const SIMachineFunctionInfo &MFI, 1255 EVT VT, 1256 AMDGPUFunctionArgInfo::PreloadedValue PVID) const { 1257 const ArgDescriptor *Reg; 1258 const TargetRegisterClass *RC; 1259 1260 std::tie(Reg, RC) = MFI.getPreloadedValue(PVID); 1261 return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT); 1262 } 1263 1264 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits, 1265 CallingConv::ID CallConv, 1266 ArrayRef<ISD::InputArg> Ins, 1267 BitVector &Skipped, 1268 FunctionType *FType, 1269 SIMachineFunctionInfo *Info) { 1270 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) { 1271 const ISD::InputArg &Arg = Ins[I]; 1272 1273 // First check if it's a PS input addr. 1274 if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() && 1275 !Arg.Flags.isByVal() && PSInputNum <= 15) { 1276 1277 if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) { 1278 // We can safely skip PS inputs. 1279 Skipped.set(I); 1280 ++PSInputNum; 1281 continue; 1282 } 1283 1284 Info->markPSInputAllocated(PSInputNum); 1285 if (Arg.Used) 1286 Info->markPSInputEnabled(PSInputNum); 1287 1288 ++PSInputNum; 1289 } 1290 1291 // Second split vertices into their elements. 1292 if (Arg.VT.isVector()) { 1293 ISD::InputArg NewArg = Arg; 1294 NewArg.Flags.setSplit(); 1295 NewArg.VT = Arg.VT.getVectorElementType(); 1296 1297 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a 1298 // three or five element vertex only needs three or five registers, 1299 // NOT four or eight. 1300 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 1301 unsigned NumElements = ParamType->getVectorNumElements(); 1302 1303 for (unsigned J = 0; J != NumElements; ++J) { 1304 Splits.push_back(NewArg); 1305 NewArg.PartOffset += NewArg.VT.getStoreSize(); 1306 } 1307 } else { 1308 Splits.push_back(Arg); 1309 } 1310 } 1311 } 1312 1313 // Allocate special inputs passed in VGPRs. 1314 static void allocateSpecialEntryInputVGPRs(CCState &CCInfo, 1315 MachineFunction &MF, 1316 const SIRegisterInfo &TRI, 1317 SIMachineFunctionInfo &Info) { 1318 if (Info.hasWorkItemIDX()) { 1319 unsigned Reg = AMDGPU::VGPR0; 1320 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1321 1322 CCInfo.AllocateReg(Reg); 1323 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg)); 1324 } 1325 1326 if (Info.hasWorkItemIDY()) { 1327 unsigned Reg = AMDGPU::VGPR1; 1328 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1329 1330 CCInfo.AllocateReg(Reg); 1331 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg)); 1332 } 1333 1334 if (Info.hasWorkItemIDZ()) { 1335 unsigned Reg = AMDGPU::VGPR2; 1336 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1337 1338 CCInfo.AllocateReg(Reg); 1339 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg)); 1340 } 1341 } 1342 1343 // Try to allocate a VGPR at the end of the argument list, or if no argument 1344 // VGPRs are left allocating a stack slot. 1345 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo) { 1346 ArrayRef<MCPhysReg> ArgVGPRs 1347 = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32); 1348 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs); 1349 if (RegIdx == ArgVGPRs.size()) { 1350 // Spill to stack required. 1351 int64_t Offset = CCInfo.AllocateStack(4, 4); 1352 1353 return ArgDescriptor::createStack(Offset); 1354 } 1355 1356 unsigned Reg = ArgVGPRs[RegIdx]; 1357 Reg = CCInfo.AllocateReg(Reg); 1358 assert(Reg != AMDGPU::NoRegister); 1359 1360 MachineFunction &MF = CCInfo.getMachineFunction(); 1361 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1362 return ArgDescriptor::createRegister(Reg); 1363 } 1364 1365 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo, 1366 const TargetRegisterClass *RC, 1367 unsigned NumArgRegs) { 1368 ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32); 1369 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs); 1370 if (RegIdx == ArgSGPRs.size()) 1371 report_fatal_error("ran out of SGPRs for arguments"); 1372 1373 unsigned Reg = ArgSGPRs[RegIdx]; 1374 Reg = CCInfo.AllocateReg(Reg); 1375 assert(Reg != AMDGPU::NoRegister); 1376 1377 MachineFunction &MF = CCInfo.getMachineFunction(); 1378 MF.addLiveIn(Reg, RC); 1379 return ArgDescriptor::createRegister(Reg); 1380 } 1381 1382 static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) { 1383 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32); 1384 } 1385 1386 static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) { 1387 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16); 1388 } 1389 1390 static void allocateSpecialInputVGPRs(CCState &CCInfo, 1391 MachineFunction &MF, 1392 const SIRegisterInfo &TRI, 1393 SIMachineFunctionInfo &Info) { 1394 if (Info.hasWorkItemIDX()) 1395 Info.setWorkItemIDX(allocateVGPR32Input(CCInfo)); 1396 1397 if (Info.hasWorkItemIDY()) 1398 Info.setWorkItemIDY(allocateVGPR32Input(CCInfo)); 1399 1400 if (Info.hasWorkItemIDZ()) 1401 Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo)); 1402 } 1403 1404 static void allocateSpecialInputSGPRs(CCState &CCInfo, 1405 MachineFunction &MF, 1406 const SIRegisterInfo &TRI, 1407 SIMachineFunctionInfo &Info) { 1408 auto &ArgInfo = Info.getArgInfo(); 1409 1410 // TODO: Unify handling with private memory pointers. 1411 1412 if (Info.hasDispatchPtr()) 1413 ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo); 1414 1415 if (Info.hasQueuePtr()) 1416 ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo); 1417 1418 if (Info.hasKernargSegmentPtr()) 1419 ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo); 1420 1421 if (Info.hasDispatchID()) 1422 ArgInfo.DispatchID = allocateSGPR64Input(CCInfo); 1423 1424 // flat_scratch_init is not applicable for non-kernel functions. 1425 1426 if (Info.hasWorkGroupIDX()) 1427 ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo); 1428 1429 if (Info.hasWorkGroupIDY()) 1430 ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo); 1431 1432 if (Info.hasWorkGroupIDZ()) 1433 ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo); 1434 1435 if (Info.hasImplicitArgPtr()) 1436 ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo); 1437 } 1438 1439 // Allocate special inputs passed in user SGPRs. 1440 static void allocateHSAUserSGPRs(CCState &CCInfo, 1441 MachineFunction &MF, 1442 const SIRegisterInfo &TRI, 1443 SIMachineFunctionInfo &Info) { 1444 if (Info.hasImplicitBufferPtr()) { 1445 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI); 1446 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 1447 CCInfo.AllocateReg(ImplicitBufferPtrReg); 1448 } 1449 1450 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 1451 if (Info.hasPrivateSegmentBuffer()) { 1452 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 1453 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 1454 CCInfo.AllocateReg(PrivateSegmentBufferReg); 1455 } 1456 1457 if (Info.hasDispatchPtr()) { 1458 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI); 1459 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 1460 CCInfo.AllocateReg(DispatchPtrReg); 1461 } 1462 1463 if (Info.hasQueuePtr()) { 1464 unsigned QueuePtrReg = Info.addQueuePtr(TRI); 1465 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 1466 CCInfo.AllocateReg(QueuePtrReg); 1467 } 1468 1469 if (Info.hasKernargSegmentPtr()) { 1470 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI); 1471 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); 1472 CCInfo.AllocateReg(InputPtrReg); 1473 } 1474 1475 if (Info.hasDispatchID()) { 1476 unsigned DispatchIDReg = Info.addDispatchID(TRI); 1477 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 1478 CCInfo.AllocateReg(DispatchIDReg); 1479 } 1480 1481 if (Info.hasFlatScratchInit()) { 1482 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI); 1483 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 1484 CCInfo.AllocateReg(FlatScratchInitReg); 1485 } 1486 1487 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 1488 // these from the dispatch pointer. 1489 } 1490 1491 // Allocate special input registers that are initialized per-wave. 1492 static void allocateSystemSGPRs(CCState &CCInfo, 1493 MachineFunction &MF, 1494 SIMachineFunctionInfo &Info, 1495 CallingConv::ID CallConv, 1496 bool IsShader) { 1497 if (Info.hasWorkGroupIDX()) { 1498 unsigned Reg = Info.addWorkGroupIDX(); 1499 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1500 CCInfo.AllocateReg(Reg); 1501 } 1502 1503 if (Info.hasWorkGroupIDY()) { 1504 unsigned Reg = Info.addWorkGroupIDY(); 1505 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1506 CCInfo.AllocateReg(Reg); 1507 } 1508 1509 if (Info.hasWorkGroupIDZ()) { 1510 unsigned Reg = Info.addWorkGroupIDZ(); 1511 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1512 CCInfo.AllocateReg(Reg); 1513 } 1514 1515 if (Info.hasWorkGroupInfo()) { 1516 unsigned Reg = Info.addWorkGroupInfo(); 1517 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1518 CCInfo.AllocateReg(Reg); 1519 } 1520 1521 if (Info.hasPrivateSegmentWaveByteOffset()) { 1522 // Scratch wave offset passed in system SGPR. 1523 unsigned PrivateSegmentWaveByteOffsetReg; 1524 1525 if (IsShader) { 1526 PrivateSegmentWaveByteOffsetReg = 1527 Info.getPrivateSegmentWaveByteOffsetSystemSGPR(); 1528 1529 // This is true if the scratch wave byte offset doesn't have a fixed 1530 // location. 1531 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) { 1532 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); 1533 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); 1534 } 1535 } else 1536 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset(); 1537 1538 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); 1539 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); 1540 } 1541 } 1542 1543 static void reservePrivateMemoryRegs(const TargetMachine &TM, 1544 MachineFunction &MF, 1545 const SIRegisterInfo &TRI, 1546 SIMachineFunctionInfo &Info) { 1547 // Now that we've figured out where the scratch register inputs are, see if 1548 // should reserve the arguments and use them directly. 1549 MachineFrameInfo &MFI = MF.getFrameInfo(); 1550 bool HasStackObjects = MFI.hasStackObjects(); 1551 1552 // Record that we know we have non-spill stack objects so we don't need to 1553 // check all stack objects later. 1554 if (HasStackObjects) 1555 Info.setHasNonSpillStackObjects(true); 1556 1557 // Everything live out of a block is spilled with fast regalloc, so it's 1558 // almost certain that spilling will be required. 1559 if (TM.getOptLevel() == CodeGenOpt::None) 1560 HasStackObjects = true; 1561 1562 // For now assume stack access is needed in any callee functions, so we need 1563 // the scratch registers to pass in. 1564 bool RequiresStackAccess = HasStackObjects || MFI.hasCalls(); 1565 1566 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 1567 if (ST.isAmdCodeObjectV2(MF)) { 1568 if (RequiresStackAccess) { 1569 // If we have stack objects, we unquestionably need the private buffer 1570 // resource. For the Code Object V2 ABI, this will be the first 4 user 1571 // SGPR inputs. We can reserve those and use them directly. 1572 1573 unsigned PrivateSegmentBufferReg = Info.getPreloadedReg( 1574 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 1575 Info.setScratchRSrcReg(PrivateSegmentBufferReg); 1576 1577 if (MFI.hasCalls()) { 1578 // If we have calls, we need to keep the frame register in a register 1579 // that won't be clobbered by a call, so ensure it is copied somewhere. 1580 1581 // This is not a problem for the scratch wave offset, because the same 1582 // registers are reserved in all functions. 1583 1584 // FIXME: Nothing is really ensuring this is a call preserved register, 1585 // it's just selected from the end so it happens to be. 1586 unsigned ReservedOffsetReg 1587 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1588 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1589 } else { 1590 unsigned PrivateSegmentWaveByteOffsetReg = Info.getPreloadedReg( 1591 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1592 Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg); 1593 } 1594 } else { 1595 unsigned ReservedBufferReg 1596 = TRI.reservedPrivateSegmentBufferReg(MF); 1597 unsigned ReservedOffsetReg 1598 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1599 1600 // We tentatively reserve the last registers (skipping the last two 1601 // which may contain VCC). After register allocation, we'll replace 1602 // these with the ones immediately after those which were really 1603 // allocated. In the prologue copies will be inserted from the argument 1604 // to these reserved registers. 1605 Info.setScratchRSrcReg(ReservedBufferReg); 1606 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1607 } 1608 } else { 1609 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF); 1610 1611 // Without HSA, relocations are used for the scratch pointer and the 1612 // buffer resource setup is always inserted in the prologue. Scratch wave 1613 // offset is still in an input SGPR. 1614 Info.setScratchRSrcReg(ReservedBufferReg); 1615 1616 if (HasStackObjects && !MFI.hasCalls()) { 1617 unsigned ScratchWaveOffsetReg = Info.getPreloadedReg( 1618 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1619 Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg); 1620 } else { 1621 unsigned ReservedOffsetReg 1622 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1623 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1624 } 1625 } 1626 } 1627 1628 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const { 1629 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1630 return !Info->isEntryFunction(); 1631 } 1632 1633 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 1634 1635 } 1636 1637 void SITargetLowering::insertCopiesSplitCSR( 1638 MachineBasicBlock *Entry, 1639 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 1640 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 1641 1642 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 1643 if (!IStart) 1644 return; 1645 1646 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 1647 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 1648 MachineBasicBlock::iterator MBBI = Entry->begin(); 1649 for (const MCPhysReg *I = IStart; *I; ++I) { 1650 const TargetRegisterClass *RC = nullptr; 1651 if (AMDGPU::SReg_64RegClass.contains(*I)) 1652 RC = &AMDGPU::SGPR_64RegClass; 1653 else if (AMDGPU::SReg_32RegClass.contains(*I)) 1654 RC = &AMDGPU::SGPR_32RegClass; 1655 else 1656 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 1657 1658 unsigned NewVR = MRI->createVirtualRegister(RC); 1659 // Create copy from CSR to a virtual register. 1660 Entry->addLiveIn(*I); 1661 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 1662 .addReg(*I); 1663 1664 // Insert the copy-back instructions right before the terminator. 1665 for (auto *Exit : Exits) 1666 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 1667 TII->get(TargetOpcode::COPY), *I) 1668 .addReg(NewVR); 1669 } 1670 } 1671 1672 SDValue SITargetLowering::LowerFormalArguments( 1673 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1674 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1675 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1676 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 1677 1678 MachineFunction &MF = DAG.getMachineFunction(); 1679 FunctionType *FType = MF.getFunction().getFunctionType(); 1680 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1681 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 1682 1683 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { 1684 const Function &Fn = MF.getFunction(); 1685 DiagnosticInfoUnsupported NoGraphicsHSA( 1686 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); 1687 DAG.getContext()->diagnose(NoGraphicsHSA); 1688 return DAG.getEntryNode(); 1689 } 1690 1691 // Create stack objects that are used for emitting debugger prologue if 1692 // "amdgpu-debugger-emit-prologue" attribute was specified. 1693 if (ST.debuggerEmitPrologue()) 1694 createDebuggerPrologueStackObjects(MF); 1695 1696 SmallVector<ISD::InputArg, 16> Splits; 1697 SmallVector<CCValAssign, 16> ArgLocs; 1698 BitVector Skipped(Ins.size()); 1699 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1700 *DAG.getContext()); 1701 1702 bool IsShader = AMDGPU::isShader(CallConv); 1703 bool IsKernel = AMDGPU::isKernel(CallConv); 1704 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv); 1705 1706 if (!IsEntryFunc) { 1707 // 4 bytes are reserved at offset 0 for the emergency stack slot. Skip over 1708 // this when allocating argument fixed offsets. 1709 CCInfo.AllocateStack(4, 4); 1710 } 1711 1712 if (IsShader) { 1713 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info); 1714 1715 // At least one interpolation mode must be enabled or else the GPU will 1716 // hang. 1717 // 1718 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 1719 // set PSInputAddr, the user wants to enable some bits after the compilation 1720 // based on run-time states. Since we can't know what the final PSInputEna 1721 // will look like, so we shouldn't do anything here and the user should take 1722 // responsibility for the correct programming. 1723 // 1724 // Otherwise, the following restrictions apply: 1725 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 1726 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 1727 // enabled too. 1728 if (CallConv == CallingConv::AMDGPU_PS) { 1729 if ((Info->getPSInputAddr() & 0x7F) == 0 || 1730 ((Info->getPSInputAddr() & 0xF) == 0 && 1731 Info->isPSInputAllocated(11))) { 1732 CCInfo.AllocateReg(AMDGPU::VGPR0); 1733 CCInfo.AllocateReg(AMDGPU::VGPR1); 1734 Info->markPSInputAllocated(0); 1735 Info->markPSInputEnabled(0); 1736 } 1737 if (Subtarget->isAmdPalOS()) { 1738 // For isAmdPalOS, the user does not enable some bits after compilation 1739 // based on run-time states; the register values being generated here are 1740 // the final ones set in hardware. Therefore we need to apply the 1741 // workaround to PSInputAddr and PSInputEnable together. (The case where 1742 // a bit is set in PSInputAddr but not PSInputEnable is where the 1743 // frontend set up an input arg for a particular interpolation mode, but 1744 // nothing uses that input arg. Really we should have an earlier pass 1745 // that removes such an arg.) 1746 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 1747 if ((PsInputBits & 0x7F) == 0 || 1748 ((PsInputBits & 0xF) == 0 && 1749 (PsInputBits >> 11 & 1))) 1750 Info->markPSInputEnabled( 1751 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 1752 } 1753 } 1754 1755 assert(!Info->hasDispatchPtr() && 1756 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && 1757 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && 1758 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && 1759 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && 1760 !Info->hasWorkItemIDZ()); 1761 } else if (IsKernel) { 1762 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); 1763 } else { 1764 Splits.append(Ins.begin(), Ins.end()); 1765 } 1766 1767 if (IsEntryFunc) { 1768 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 1769 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info); 1770 } 1771 1772 if (IsKernel) { 1773 analyzeFormalArgumentsCompute(CCInfo, Ins); 1774 } else { 1775 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg); 1776 CCInfo.AnalyzeFormalArguments(Splits, AssignFn); 1777 } 1778 1779 SmallVector<SDValue, 16> Chains; 1780 1781 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { 1782 const ISD::InputArg &Arg = Ins[i]; 1783 if (Skipped[i]) { 1784 InVals.push_back(DAG.getUNDEF(Arg.VT)); 1785 continue; 1786 } 1787 1788 CCValAssign &VA = ArgLocs[ArgIdx++]; 1789 MVT VT = VA.getLocVT(); 1790 1791 if (IsEntryFunc && VA.isMemLoc()) { 1792 VT = Ins[i].VT; 1793 EVT MemVT = VA.getLocVT(); 1794 1795 const uint64_t Offset = Subtarget->getExplicitKernelArgOffset(MF) + 1796 VA.getLocMemOffset(); 1797 Info->setABIArgOffset(Offset + MemVT.getStoreSize()); 1798 1799 // The first 36 bytes of the input buffer contains information about 1800 // thread group and global sizes. 1801 SDValue Arg = lowerKernargMemParameter( 1802 DAG, VT, MemVT, DL, Chain, Offset, Ins[i].Flags.isSExt(), &Ins[i]); 1803 Chains.push_back(Arg.getValue(1)); 1804 1805 auto *ParamTy = 1806 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); 1807 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 1808 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 1809 // On SI local pointers are just offsets into LDS, so they are always 1810 // less than 16-bits. On CI and newer they could potentially be 1811 // real pointers, so we can't guarantee their size. 1812 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, 1813 DAG.getValueType(MVT::i16)); 1814 } 1815 1816 InVals.push_back(Arg); 1817 continue; 1818 } else if (!IsEntryFunc && VA.isMemLoc()) { 1819 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg); 1820 InVals.push_back(Val); 1821 if (!Arg.Flags.isByVal()) 1822 Chains.push_back(Val.getValue(1)); 1823 continue; 1824 } 1825 1826 assert(VA.isRegLoc() && "Parameter must be in a register!"); 1827 1828 unsigned Reg = VA.getLocReg(); 1829 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 1830 EVT ValVT = VA.getValVT(); 1831 1832 Reg = MF.addLiveIn(Reg, RC); 1833 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1834 1835 if (Arg.Flags.isSRet() && !getSubtarget()->enableHugePrivateBuffer()) { 1836 // The return object should be reasonably addressable. 1837 1838 // FIXME: This helps when the return is a real sret. If it is a 1839 // automatically inserted sret (i.e. CanLowerReturn returns false), an 1840 // extra copy is inserted in SelectionDAGBuilder which obscures this. 1841 unsigned NumBits = 32 - AssumeFrameIndexHighZeroBits; 1842 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, 1843 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits))); 1844 } 1845 1846 // If this is an 8 or 16-bit value, it is really passed promoted 1847 // to 32 bits. Insert an assert[sz]ext to capture this, then 1848 // truncate to the right size. 1849 switch (VA.getLocInfo()) { 1850 case CCValAssign::Full: 1851 break; 1852 case CCValAssign::BCvt: 1853 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val); 1854 break; 1855 case CCValAssign::SExt: 1856 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val, 1857 DAG.getValueType(ValVT)); 1858 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 1859 break; 1860 case CCValAssign::ZExt: 1861 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, 1862 DAG.getValueType(ValVT)); 1863 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 1864 break; 1865 case CCValAssign::AExt: 1866 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 1867 break; 1868 default: 1869 llvm_unreachable("Unknown loc info!"); 1870 } 1871 1872 if (IsShader && Arg.VT.isVector()) { 1873 // Build a vector from the registers 1874 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 1875 unsigned NumElements = ParamType->getVectorNumElements(); 1876 1877 SmallVector<SDValue, 4> Regs; 1878 Regs.push_back(Val); 1879 for (unsigned j = 1; j != NumElements; ++j) { 1880 Reg = ArgLocs[ArgIdx++].getLocReg(); 1881 Reg = MF.addLiveIn(Reg, RC); 1882 1883 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1884 Regs.push_back(Copy); 1885 } 1886 1887 // Fill up the missing vector elements 1888 NumElements = Arg.VT.getVectorNumElements() - NumElements; 1889 Regs.append(NumElements, DAG.getUNDEF(VT)); 1890 1891 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs)); 1892 continue; 1893 } 1894 1895 InVals.push_back(Val); 1896 } 1897 1898 if (!IsEntryFunc) { 1899 // Special inputs come after user arguments. 1900 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); 1901 } 1902 1903 // Start adding system SGPRs. 1904 if (IsEntryFunc) { 1905 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader); 1906 } else { 1907 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 1908 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg()); 1909 CCInfo.AllocateReg(Info->getFrameOffsetReg()); 1910 allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 1911 } 1912 1913 auto &ArgUsageInfo = 1914 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); 1915 ArgUsageInfo.setFuncArgInfo(MF.getFunction(), Info->getArgInfo()); 1916 1917 unsigned StackArgSize = CCInfo.getNextStackOffset(); 1918 Info->setBytesInStackArgArea(StackArgSize); 1919 1920 return Chains.empty() ? Chain : 1921 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 1922 } 1923 1924 // TODO: If return values can't fit in registers, we should return as many as 1925 // possible in registers before passing on stack. 1926 bool SITargetLowering::CanLowerReturn( 1927 CallingConv::ID CallConv, 1928 MachineFunction &MF, bool IsVarArg, 1929 const SmallVectorImpl<ISD::OutputArg> &Outs, 1930 LLVMContext &Context) const { 1931 // Replacing returns with sret/stack usage doesn't make sense for shaders. 1932 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn 1933 // for shaders. Vector types should be explicitly handled by CC. 1934 if (AMDGPU::isEntryFunctionCC(CallConv)) 1935 return true; 1936 1937 SmallVector<CCValAssign, 16> RVLocs; 1938 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 1939 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); 1940 } 1941 1942 SDValue 1943 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1944 bool isVarArg, 1945 const SmallVectorImpl<ISD::OutputArg> &Outs, 1946 const SmallVectorImpl<SDValue> &OutVals, 1947 const SDLoc &DL, SelectionDAG &DAG) const { 1948 MachineFunction &MF = DAG.getMachineFunction(); 1949 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1950 1951 if (AMDGPU::isKernel(CallConv)) { 1952 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, 1953 OutVals, DL, DAG); 1954 } 1955 1956 bool IsShader = AMDGPU::isShader(CallConv); 1957 1958 Info->setIfReturnsVoid(Outs.size() == 0); 1959 bool IsWaveEnd = Info->returnsVoid() && IsShader; 1960 1961 SmallVector<ISD::OutputArg, 48> Splits; 1962 SmallVector<SDValue, 48> SplitVals; 1963 1964 // Split vectors into their elements. 1965 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 1966 const ISD::OutputArg &Out = Outs[i]; 1967 1968 if (IsShader && Out.VT.isVector()) { 1969 MVT VT = Out.VT.getVectorElementType(); 1970 ISD::OutputArg NewOut = Out; 1971 NewOut.Flags.setSplit(); 1972 NewOut.VT = VT; 1973 1974 // We want the original number of vector elements here, e.g. 1975 // three or five, not four or eight. 1976 unsigned NumElements = Out.ArgVT.getVectorNumElements(); 1977 1978 for (unsigned j = 0; j != NumElements; ++j) { 1979 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i], 1980 DAG.getConstant(j, DL, MVT::i32)); 1981 SplitVals.push_back(Elem); 1982 Splits.push_back(NewOut); 1983 NewOut.PartOffset += NewOut.VT.getStoreSize(); 1984 } 1985 } else { 1986 SplitVals.push_back(OutVals[i]); 1987 Splits.push_back(Out); 1988 } 1989 } 1990 1991 // CCValAssign - represent the assignment of the return value to a location. 1992 SmallVector<CCValAssign, 48> RVLocs; 1993 1994 // CCState - Info about the registers and stack slots. 1995 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1996 *DAG.getContext()); 1997 1998 // Analyze outgoing return values. 1999 CCInfo.AnalyzeReturn(Splits, CCAssignFnForReturn(CallConv, isVarArg)); 2000 2001 SDValue Flag; 2002 SmallVector<SDValue, 48> RetOps; 2003 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2004 2005 // Add return address for callable functions. 2006 if (!Info->isEntryFunction()) { 2007 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2008 SDValue ReturnAddrReg = CreateLiveInRegister( 2009 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); 2010 2011 // FIXME: Should be able to use a vreg here, but need a way to prevent it 2012 // from being allcoated to a CSR. 2013 2014 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), 2015 MVT::i64); 2016 2017 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag); 2018 Flag = Chain.getValue(1); 2019 2020 RetOps.push_back(PhysReturnAddrReg); 2021 } 2022 2023 // Copy the result values into the output registers. 2024 for (unsigned i = 0, realRVLocIdx = 0; 2025 i != RVLocs.size(); 2026 ++i, ++realRVLocIdx) { 2027 CCValAssign &VA = RVLocs[i]; 2028 assert(VA.isRegLoc() && "Can only return in registers!"); 2029 // TODO: Partially return in registers if return values don't fit. 2030 2031 SDValue Arg = SplitVals[realRVLocIdx]; 2032 2033 // Copied from other backends. 2034 switch (VA.getLocInfo()) { 2035 case CCValAssign::Full: 2036 break; 2037 case CCValAssign::BCvt: 2038 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 2039 break; 2040 case CCValAssign::SExt: 2041 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 2042 break; 2043 case CCValAssign::ZExt: 2044 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 2045 break; 2046 case CCValAssign::AExt: 2047 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 2048 break; 2049 default: 2050 llvm_unreachable("Unknown loc info!"); 2051 } 2052 2053 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 2054 Flag = Chain.getValue(1); 2055 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2056 } 2057 2058 // FIXME: Does sret work properly? 2059 if (!Info->isEntryFunction()) { 2060 const SIRegisterInfo *TRI 2061 = static_cast<const SISubtarget *>(Subtarget)->getRegisterInfo(); 2062 const MCPhysReg *I = 2063 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 2064 if (I) { 2065 for (; *I; ++I) { 2066 if (AMDGPU::SReg_64RegClass.contains(*I)) 2067 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 2068 else if (AMDGPU::SReg_32RegClass.contains(*I)) 2069 RetOps.push_back(DAG.getRegister(*I, MVT::i32)); 2070 else 2071 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 2072 } 2073 } 2074 } 2075 2076 // Update chain and glue. 2077 RetOps[0] = Chain; 2078 if (Flag.getNode()) 2079 RetOps.push_back(Flag); 2080 2081 unsigned Opc = AMDGPUISD::ENDPGM; 2082 if (!IsWaveEnd) 2083 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG; 2084 return DAG.getNode(Opc, DL, MVT::Other, RetOps); 2085 } 2086 2087 SDValue SITargetLowering::LowerCallResult( 2088 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, 2089 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 2090 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn, 2091 SDValue ThisVal) const { 2092 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg); 2093 2094 // Assign locations to each value returned by this call. 2095 SmallVector<CCValAssign, 16> RVLocs; 2096 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 2097 *DAG.getContext()); 2098 CCInfo.AnalyzeCallResult(Ins, RetCC); 2099 2100 // Copy all of the result registers out of their specified physreg. 2101 for (unsigned i = 0; i != RVLocs.size(); ++i) { 2102 CCValAssign VA = RVLocs[i]; 2103 SDValue Val; 2104 2105 if (VA.isRegLoc()) { 2106 Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); 2107 Chain = Val.getValue(1); 2108 InFlag = Val.getValue(2); 2109 } else if (VA.isMemLoc()) { 2110 report_fatal_error("TODO: return values in memory"); 2111 } else 2112 llvm_unreachable("unknown argument location type"); 2113 2114 switch (VA.getLocInfo()) { 2115 case CCValAssign::Full: 2116 break; 2117 case CCValAssign::BCvt: 2118 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 2119 break; 2120 case CCValAssign::ZExt: 2121 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val, 2122 DAG.getValueType(VA.getValVT())); 2123 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2124 break; 2125 case CCValAssign::SExt: 2126 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val, 2127 DAG.getValueType(VA.getValVT())); 2128 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2129 break; 2130 case CCValAssign::AExt: 2131 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2132 break; 2133 default: 2134 llvm_unreachable("Unknown loc info!"); 2135 } 2136 2137 InVals.push_back(Val); 2138 } 2139 2140 return Chain; 2141 } 2142 2143 // Add code to pass special inputs required depending on used features separate 2144 // from the explicit user arguments present in the IR. 2145 void SITargetLowering::passSpecialInputs( 2146 CallLoweringInfo &CLI, 2147 const SIMachineFunctionInfo &Info, 2148 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 2149 SmallVectorImpl<SDValue> &MemOpChains, 2150 SDValue Chain, 2151 SDValue StackPtr) const { 2152 // If we don't have a call site, this was a call inserted by 2153 // legalization. These can never use special inputs. 2154 if (!CLI.CS) 2155 return; 2156 2157 const Function *CalleeFunc = CLI.CS.getCalledFunction(); 2158 assert(CalleeFunc); 2159 2160 SelectionDAG &DAG = CLI.DAG; 2161 const SDLoc &DL = CLI.DL; 2162 2163 const SISubtarget *ST = getSubtarget(); 2164 const SIRegisterInfo *TRI = ST->getRegisterInfo(); 2165 2166 auto &ArgUsageInfo = 2167 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); 2168 const AMDGPUFunctionArgInfo &CalleeArgInfo 2169 = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc); 2170 2171 const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo(); 2172 2173 // TODO: Unify with private memory register handling. This is complicated by 2174 // the fact that at least in kernels, the input argument is not necessarily 2175 // in the same location as the input. 2176 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { 2177 AMDGPUFunctionArgInfo::DISPATCH_PTR, 2178 AMDGPUFunctionArgInfo::QUEUE_PTR, 2179 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR, 2180 AMDGPUFunctionArgInfo::DISPATCH_ID, 2181 AMDGPUFunctionArgInfo::WORKGROUP_ID_X, 2182 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, 2183 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z, 2184 AMDGPUFunctionArgInfo::WORKITEM_ID_X, 2185 AMDGPUFunctionArgInfo::WORKITEM_ID_Y, 2186 AMDGPUFunctionArgInfo::WORKITEM_ID_Z, 2187 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR 2188 }; 2189 2190 for (auto InputID : InputRegs) { 2191 const ArgDescriptor *OutgoingArg; 2192 const TargetRegisterClass *ArgRC; 2193 2194 std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID); 2195 if (!OutgoingArg) 2196 continue; 2197 2198 const ArgDescriptor *IncomingArg; 2199 const TargetRegisterClass *IncomingArgRC; 2200 std::tie(IncomingArg, IncomingArgRC) 2201 = CallerArgInfo.getPreloadedValue(InputID); 2202 assert(IncomingArgRC == ArgRC); 2203 2204 // All special arguments are ints for now. 2205 EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32; 2206 SDValue InputReg; 2207 2208 if (IncomingArg) { 2209 InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg); 2210 } else { 2211 // The implicit arg ptr is special because it doesn't have a corresponding 2212 // input for kernels, and is computed from the kernarg segment pointer. 2213 assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); 2214 InputReg = getImplicitArgPtr(DAG, DL); 2215 } 2216 2217 if (OutgoingArg->isRegister()) { 2218 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); 2219 } else { 2220 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, StackPtr, 2221 InputReg, 2222 OutgoingArg->getStackOffset()); 2223 MemOpChains.push_back(ArgStore); 2224 } 2225 } 2226 } 2227 2228 static bool canGuaranteeTCO(CallingConv::ID CC) { 2229 return CC == CallingConv::Fast; 2230 } 2231 2232 /// Return true if we might ever do TCO for calls with this calling convention. 2233 static bool mayTailCallThisCC(CallingConv::ID CC) { 2234 switch (CC) { 2235 case CallingConv::C: 2236 return true; 2237 default: 2238 return canGuaranteeTCO(CC); 2239 } 2240 } 2241 2242 bool SITargetLowering::isEligibleForTailCallOptimization( 2243 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg, 2244 const SmallVectorImpl<ISD::OutputArg> &Outs, 2245 const SmallVectorImpl<SDValue> &OutVals, 2246 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { 2247 if (!mayTailCallThisCC(CalleeCC)) 2248 return false; 2249 2250 MachineFunction &MF = DAG.getMachineFunction(); 2251 const Function &CallerF = MF.getFunction(); 2252 CallingConv::ID CallerCC = CallerF.getCallingConv(); 2253 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2254 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2255 2256 // Kernels aren't callable, and don't have a live in return address so it 2257 // doesn't make sense to do a tail call with entry functions. 2258 if (!CallerPreserved) 2259 return false; 2260 2261 bool CCMatch = CallerCC == CalleeCC; 2262 2263 if (DAG.getTarget().Options.GuaranteedTailCallOpt) { 2264 if (canGuaranteeTCO(CalleeCC) && CCMatch) 2265 return true; 2266 return false; 2267 } 2268 2269 // TODO: Can we handle var args? 2270 if (IsVarArg) 2271 return false; 2272 2273 for (const Argument &Arg : CallerF.args()) { 2274 if (Arg.hasByValAttr()) 2275 return false; 2276 } 2277 2278 LLVMContext &Ctx = *DAG.getContext(); 2279 2280 // Check that the call results are passed in the same way. 2281 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins, 2282 CCAssignFnForCall(CalleeCC, IsVarArg), 2283 CCAssignFnForCall(CallerCC, IsVarArg))) 2284 return false; 2285 2286 // The callee has to preserve all registers the caller needs to preserve. 2287 if (!CCMatch) { 2288 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2289 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2290 return false; 2291 } 2292 2293 // Nothing more to check if the callee is taking no arguments. 2294 if (Outs.empty()) 2295 return true; 2296 2297 SmallVector<CCValAssign, 16> ArgLocs; 2298 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx); 2299 2300 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg)); 2301 2302 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 2303 // If the stack arguments for this call do not fit into our own save area then 2304 // the call cannot be made tail. 2305 // TODO: Is this really necessary? 2306 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) 2307 return false; 2308 2309 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2310 return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals); 2311 } 2312 2313 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 2314 if (!CI->isTailCall()) 2315 return false; 2316 2317 const Function *ParentFn = CI->getParent()->getParent(); 2318 if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv())) 2319 return false; 2320 2321 auto Attr = ParentFn->getFnAttribute("disable-tail-calls"); 2322 return (Attr.getValueAsString() != "true"); 2323 } 2324 2325 // The wave scratch offset register is used as the global base pointer. 2326 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI, 2327 SmallVectorImpl<SDValue> &InVals) const { 2328 SelectionDAG &DAG = CLI.DAG; 2329 const SDLoc &DL = CLI.DL; 2330 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 2331 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 2332 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 2333 SDValue Chain = CLI.Chain; 2334 SDValue Callee = CLI.Callee; 2335 bool &IsTailCall = CLI.IsTailCall; 2336 CallingConv::ID CallConv = CLI.CallConv; 2337 bool IsVarArg = CLI.IsVarArg; 2338 bool IsSibCall = false; 2339 bool IsThisReturn = false; 2340 MachineFunction &MF = DAG.getMachineFunction(); 2341 2342 if (IsVarArg) { 2343 return lowerUnhandledCall(CLI, InVals, 2344 "unsupported call to variadic function "); 2345 } 2346 2347 if (!CLI.CS.getCalledFunction()) { 2348 return lowerUnhandledCall(CLI, InVals, 2349 "unsupported indirect call to function "); 2350 } 2351 2352 if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) { 2353 return lowerUnhandledCall(CLI, InVals, 2354 "unsupported required tail call to function "); 2355 } 2356 2357 // The first 4 bytes are reserved for the callee's emergency stack slot. 2358 const unsigned CalleeUsableStackOffset = 4; 2359 2360 if (IsTailCall) { 2361 IsTailCall = isEligibleForTailCallOptimization( 2362 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG); 2363 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) { 2364 report_fatal_error("failed to perform tail call elimination on a call " 2365 "site marked musttail"); 2366 } 2367 2368 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 2369 2370 // A sibling call is one where we're under the usual C ABI and not planning 2371 // to change that but can still do a tail call: 2372 if (!TailCallOpt && IsTailCall) 2373 IsSibCall = true; 2374 2375 if (IsTailCall) 2376 ++NumTailCalls; 2377 } 2378 2379 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee)) { 2380 // FIXME: Remove this hack for function pointer types after removing 2381 // support of old address space mapping. In the new address space 2382 // mapping the pointer in default address space is 64 bit, therefore 2383 // does not need this hack. 2384 if (Callee.getValueType() == MVT::i32) { 2385 const GlobalValue *GV = GA->getGlobal(); 2386 Callee = DAG.getGlobalAddress(GV, DL, MVT::i64, GA->getOffset(), false, 2387 GA->getTargetFlags()); 2388 } 2389 } 2390 assert(Callee.getValueType() == MVT::i64); 2391 2392 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2393 2394 // Analyze operands of the call, assigning locations to each operand. 2395 SmallVector<CCValAssign, 16> ArgLocs; 2396 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 2397 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg); 2398 CCInfo.AnalyzeCallOperands(Outs, AssignFn); 2399 2400 // Get a count of how many bytes are to be pushed on the stack. 2401 unsigned NumBytes = CCInfo.getNextStackOffset(); 2402 2403 if (IsSibCall) { 2404 // Since we're not changing the ABI to make this a tail call, the memory 2405 // operands are already available in the caller's incoming argument space. 2406 NumBytes = 0; 2407 } 2408 2409 // FPDiff is the byte offset of the call's argument area from the callee's. 2410 // Stores to callee stack arguments will be placed in FixedStackSlots offset 2411 // by this amount for a tail call. In a sibling call it must be 0 because the 2412 // caller will deallocate the entire stack and the callee still expects its 2413 // arguments to begin at SP+0. Completely unused for non-tail calls. 2414 int32_t FPDiff = 0; 2415 MachineFrameInfo &MFI = MF.getFrameInfo(); 2416 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2417 2418 SDValue CallerSavedFP; 2419 2420 // Adjust the stack pointer for the new arguments... 2421 // These operations are automatically eliminated by the prolog/epilog pass 2422 if (!IsSibCall) { 2423 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL); 2424 2425 unsigned OffsetReg = Info->getScratchWaveOffsetReg(); 2426 2427 // In the HSA case, this should be an identity copy. 2428 SDValue ScratchRSrcReg 2429 = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32); 2430 RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); 2431 2432 // TODO: Don't hardcode these registers and get from the callee function. 2433 SDValue ScratchWaveOffsetReg 2434 = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32); 2435 RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg); 2436 2437 if (!Info->isEntryFunction()) { 2438 // Avoid clobbering this function's FP value. In the current convention 2439 // callee will overwrite this, so do save/restore around the call site. 2440 CallerSavedFP = DAG.getCopyFromReg(Chain, DL, 2441 Info->getFrameOffsetReg(), MVT::i32); 2442 } 2443 } 2444 2445 // Stack pointer relative accesses are done by changing the offset SGPR. This 2446 // is just the VGPR offset component. 2447 SDValue StackPtr = DAG.getConstant(CalleeUsableStackOffset, DL, MVT::i32); 2448 2449 SmallVector<SDValue, 8> MemOpChains; 2450 MVT PtrVT = MVT::i32; 2451 2452 // Walk the register/memloc assignments, inserting copies/loads. 2453 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e; 2454 ++i, ++realArgIdx) { 2455 CCValAssign &VA = ArgLocs[i]; 2456 SDValue Arg = OutVals[realArgIdx]; 2457 2458 // Promote the value if needed. 2459 switch (VA.getLocInfo()) { 2460 case CCValAssign::Full: 2461 break; 2462 case CCValAssign::BCvt: 2463 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 2464 break; 2465 case CCValAssign::ZExt: 2466 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 2467 break; 2468 case CCValAssign::SExt: 2469 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 2470 break; 2471 case CCValAssign::AExt: 2472 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 2473 break; 2474 case CCValAssign::FPExt: 2475 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); 2476 break; 2477 default: 2478 llvm_unreachable("Unknown loc info!"); 2479 } 2480 2481 if (VA.isRegLoc()) { 2482 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2483 } else { 2484 assert(VA.isMemLoc()); 2485 2486 SDValue DstAddr; 2487 MachinePointerInfo DstInfo; 2488 2489 unsigned LocMemOffset = VA.getLocMemOffset(); 2490 int32_t Offset = LocMemOffset; 2491 2492 SDValue PtrOff = DAG.getObjectPtrOffset(DL, StackPtr, Offset); 2493 2494 if (IsTailCall) { 2495 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2496 unsigned OpSize = Flags.isByVal() ? 2497 Flags.getByValSize() : VA.getValVT().getStoreSize(); 2498 2499 Offset = Offset + FPDiff; 2500 int FI = MFI.CreateFixedObject(OpSize, Offset, true); 2501 2502 DstAddr = DAG.getObjectPtrOffset(DL, DAG.getFrameIndex(FI, PtrVT), 2503 StackPtr); 2504 DstInfo = MachinePointerInfo::getFixedStack(MF, FI); 2505 2506 // Make sure any stack arguments overlapping with where we're storing 2507 // are loaded before this eventual operation. Otherwise they'll be 2508 // clobbered. 2509 2510 // FIXME: Why is this really necessary? This seems to just result in a 2511 // lot of code to copy the stack and write them back to the same 2512 // locations, which are supposed to be immutable? 2513 Chain = addTokenForArgument(Chain, DAG, MFI, FI); 2514 } else { 2515 DstAddr = PtrOff; 2516 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset); 2517 } 2518 2519 if (Outs[i].Flags.isByVal()) { 2520 SDValue SizeNode = 2521 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32); 2522 SDValue Cpy = DAG.getMemcpy( 2523 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(), 2524 /*isVol = */ false, /*AlwaysInline = */ true, 2525 /*isTailCall = */ false, DstInfo, 2526 MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy( 2527 *DAG.getContext(), AMDGPUASI.PRIVATE_ADDRESS)))); 2528 2529 MemOpChains.push_back(Cpy); 2530 } else { 2531 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo); 2532 MemOpChains.push_back(Store); 2533 } 2534 } 2535 } 2536 2537 // Copy special input registers after user input arguments. 2538 passSpecialInputs(CLI, *Info, RegsToPass, MemOpChains, Chain, StackPtr); 2539 2540 if (!MemOpChains.empty()) 2541 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 2542 2543 // Build a sequence of copy-to-reg nodes chained together with token chain 2544 // and flag operands which copy the outgoing args into the appropriate regs. 2545 SDValue InFlag; 2546 for (auto &RegToPass : RegsToPass) { 2547 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, 2548 RegToPass.second, InFlag); 2549 InFlag = Chain.getValue(1); 2550 } 2551 2552 2553 SDValue PhysReturnAddrReg; 2554 if (IsTailCall) { 2555 // Since the return is being combined with the call, we need to pass on the 2556 // return address. 2557 2558 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2559 SDValue ReturnAddrReg = CreateLiveInRegister( 2560 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); 2561 2562 PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), 2563 MVT::i64); 2564 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag); 2565 InFlag = Chain.getValue(1); 2566 } 2567 2568 // We don't usually want to end the call-sequence here because we would tidy 2569 // the frame up *after* the call, however in the ABI-changing tail-call case 2570 // we've carefully laid out the parameters so that when sp is reset they'll be 2571 // in the correct location. 2572 if (IsTailCall && !IsSibCall) { 2573 Chain = DAG.getCALLSEQ_END(Chain, 2574 DAG.getTargetConstant(NumBytes, DL, MVT::i32), 2575 DAG.getTargetConstant(0, DL, MVT::i32), 2576 InFlag, DL); 2577 InFlag = Chain.getValue(1); 2578 } 2579 2580 std::vector<SDValue> Ops; 2581 Ops.push_back(Chain); 2582 Ops.push_back(Callee); 2583 2584 if (IsTailCall) { 2585 // Each tail call may have to adjust the stack by a different amount, so 2586 // this information must travel along with the operation for eventual 2587 // consumption by emitEpilogue. 2588 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); 2589 2590 Ops.push_back(PhysReturnAddrReg); 2591 } 2592 2593 // Add argument registers to the end of the list so that they are known live 2594 // into the call. 2595 for (auto &RegToPass : RegsToPass) { 2596 Ops.push_back(DAG.getRegister(RegToPass.first, 2597 RegToPass.second.getValueType())); 2598 } 2599 2600 // Add a register mask operand representing the call-preserved registers. 2601 2602 const AMDGPURegisterInfo *TRI = Subtarget->getRegisterInfo(); 2603 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 2604 assert(Mask && "Missing call preserved mask for calling convention"); 2605 Ops.push_back(DAG.getRegisterMask(Mask)); 2606 2607 if (InFlag.getNode()) 2608 Ops.push_back(InFlag); 2609 2610 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2611 2612 // If we're doing a tall call, use a TC_RETURN here rather than an 2613 // actual call instruction. 2614 if (IsTailCall) { 2615 MFI.setHasTailCall(); 2616 return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops); 2617 } 2618 2619 // Returns a chain and a flag for retval copy to use. 2620 SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops); 2621 Chain = Call.getValue(0); 2622 InFlag = Call.getValue(1); 2623 2624 if (CallerSavedFP) { 2625 SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32); 2626 Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag); 2627 InFlag = Chain.getValue(1); 2628 } 2629 2630 uint64_t CalleePopBytes = NumBytes; 2631 Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32), 2632 DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32), 2633 InFlag, DL); 2634 if (!Ins.empty()) 2635 InFlag = Chain.getValue(1); 2636 2637 // Handle result values, copying them out of physregs into vregs that we 2638 // return. 2639 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, 2640 InVals, IsThisReturn, 2641 IsThisReturn ? OutVals[0] : SDValue()); 2642 } 2643 2644 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT, 2645 SelectionDAG &DAG) const { 2646 unsigned Reg = StringSwitch<unsigned>(RegName) 2647 .Case("m0", AMDGPU::M0) 2648 .Case("exec", AMDGPU::EXEC) 2649 .Case("exec_lo", AMDGPU::EXEC_LO) 2650 .Case("exec_hi", AMDGPU::EXEC_HI) 2651 .Case("flat_scratch", AMDGPU::FLAT_SCR) 2652 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) 2653 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) 2654 .Default(AMDGPU::NoRegister); 2655 2656 if (Reg == AMDGPU::NoRegister) { 2657 report_fatal_error(Twine("invalid register name \"" 2658 + StringRef(RegName) + "\".")); 2659 2660 } 2661 2662 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 2663 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { 2664 report_fatal_error(Twine("invalid register \"" 2665 + StringRef(RegName) + "\" for subtarget.")); 2666 } 2667 2668 switch (Reg) { 2669 case AMDGPU::M0: 2670 case AMDGPU::EXEC_LO: 2671 case AMDGPU::EXEC_HI: 2672 case AMDGPU::FLAT_SCR_LO: 2673 case AMDGPU::FLAT_SCR_HI: 2674 if (VT.getSizeInBits() == 32) 2675 return Reg; 2676 break; 2677 case AMDGPU::EXEC: 2678 case AMDGPU::FLAT_SCR: 2679 if (VT.getSizeInBits() == 64) 2680 return Reg; 2681 break; 2682 default: 2683 llvm_unreachable("missing register type checking"); 2684 } 2685 2686 report_fatal_error(Twine("invalid type for register \"" 2687 + StringRef(RegName) + "\".")); 2688 } 2689 2690 // If kill is not the last instruction, split the block so kill is always a 2691 // proper terminator. 2692 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI, 2693 MachineBasicBlock *BB) const { 2694 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 2695 2696 MachineBasicBlock::iterator SplitPoint(&MI); 2697 ++SplitPoint; 2698 2699 if (SplitPoint == BB->end()) { 2700 // Don't bother with a new block. 2701 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); 2702 return BB; 2703 } 2704 2705 MachineFunction *MF = BB->getParent(); 2706 MachineBasicBlock *SplitBB 2707 = MF->CreateMachineBasicBlock(BB->getBasicBlock()); 2708 2709 MF->insert(++MachineFunction::iterator(BB), SplitBB); 2710 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end()); 2711 2712 SplitBB->transferSuccessorsAndUpdatePHIs(BB); 2713 BB->addSuccessor(SplitBB); 2714 2715 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); 2716 return SplitBB; 2717 } 2718 2719 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the 2720 // wavefront. If the value is uniform and just happens to be in a VGPR, this 2721 // will only do one iteration. In the worst case, this will loop 64 times. 2722 // 2723 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. 2724 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop( 2725 const SIInstrInfo *TII, 2726 MachineRegisterInfo &MRI, 2727 MachineBasicBlock &OrigBB, 2728 MachineBasicBlock &LoopBB, 2729 const DebugLoc &DL, 2730 const MachineOperand &IdxReg, 2731 unsigned InitReg, 2732 unsigned ResultReg, 2733 unsigned PhiReg, 2734 unsigned InitSaveExecReg, 2735 int Offset, 2736 bool UseGPRIdxMode) { 2737 MachineBasicBlock::iterator I = LoopBB.begin(); 2738 2739 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2740 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2741 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2742 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2743 2744 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) 2745 .addReg(InitReg) 2746 .addMBB(&OrigBB) 2747 .addReg(ResultReg) 2748 .addMBB(&LoopBB); 2749 2750 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) 2751 .addReg(InitSaveExecReg) 2752 .addMBB(&OrigBB) 2753 .addReg(NewExec) 2754 .addMBB(&LoopBB); 2755 2756 // Read the next variant <- also loop target. 2757 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) 2758 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef())); 2759 2760 // Compare the just read M0 value to all possible Idx values. 2761 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) 2762 .addReg(CurrentIdxReg) 2763 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg()); 2764 2765 if (UseGPRIdxMode) { 2766 unsigned IdxReg; 2767 if (Offset == 0) { 2768 IdxReg = CurrentIdxReg; 2769 } else { 2770 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2771 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg) 2772 .addReg(CurrentIdxReg, RegState::Kill) 2773 .addImm(Offset); 2774 } 2775 2776 MachineInstr *SetIdx = 2777 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_IDX)) 2778 .addReg(IdxReg, RegState::Kill); 2779 SetIdx->getOperand(2).setIsUndef(); 2780 } else { 2781 // Move index from VCC into M0 2782 if (Offset == 0) { 2783 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 2784 .addReg(CurrentIdxReg, RegState::Kill); 2785 } else { 2786 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 2787 .addReg(CurrentIdxReg, RegState::Kill) 2788 .addImm(Offset); 2789 } 2790 } 2791 2792 // Update EXEC, save the original EXEC value to VCC. 2793 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec) 2794 .addReg(CondReg, RegState::Kill); 2795 2796 MRI.setSimpleHint(NewExec, CondReg); 2797 2798 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 2799 MachineInstr *InsertPt = 2800 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 2801 .addReg(AMDGPU::EXEC) 2802 .addReg(NewExec); 2803 2804 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use 2805 // s_cbranch_scc0? 2806 2807 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. 2808 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 2809 .addMBB(&LoopBB); 2810 2811 return InsertPt->getIterator(); 2812 } 2813 2814 // This has slightly sub-optimal regalloc when the source vector is killed by 2815 // the read. The register allocator does not understand that the kill is 2816 // per-workitem, so is kept alive for the whole loop so we end up not re-using a 2817 // subregister from it, using 1 more VGPR than necessary. This was saved when 2818 // this was expanded after register allocation. 2819 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII, 2820 MachineBasicBlock &MBB, 2821 MachineInstr &MI, 2822 unsigned InitResultReg, 2823 unsigned PhiReg, 2824 int Offset, 2825 bool UseGPRIdxMode) { 2826 MachineFunction *MF = MBB.getParent(); 2827 MachineRegisterInfo &MRI = MF->getRegInfo(); 2828 const DebugLoc &DL = MI.getDebugLoc(); 2829 MachineBasicBlock::iterator I(&MI); 2830 2831 unsigned DstReg = MI.getOperand(0).getReg(); 2832 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 2833 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 2834 2835 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); 2836 2837 // Save the EXEC mask 2838 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec) 2839 .addReg(AMDGPU::EXEC); 2840 2841 // To insert the loop we need to split the block. Move everything after this 2842 // point to a new block, and insert a new empty block between the two. 2843 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); 2844 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); 2845 MachineFunction::iterator MBBI(MBB); 2846 ++MBBI; 2847 2848 MF->insert(MBBI, LoopBB); 2849 MF->insert(MBBI, RemainderBB); 2850 2851 LoopBB->addSuccessor(LoopBB); 2852 LoopBB->addSuccessor(RemainderBB); 2853 2854 // Move the rest of the block into a new block. 2855 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 2856 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 2857 2858 MBB.addSuccessor(LoopBB); 2859 2860 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 2861 2862 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, 2863 InitResultReg, DstReg, PhiReg, TmpExec, 2864 Offset, UseGPRIdxMode); 2865 2866 MachineBasicBlock::iterator First = RemainderBB->begin(); 2867 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 2868 .addReg(SaveExec); 2869 2870 return InsPt; 2871 } 2872 2873 // Returns subreg index, offset 2874 static std::pair<unsigned, int> 2875 computeIndirectRegAndOffset(const SIRegisterInfo &TRI, 2876 const TargetRegisterClass *SuperRC, 2877 unsigned VecReg, 2878 int Offset) { 2879 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32; 2880 2881 // Skip out of bounds offsets, or else we would end up using an undefined 2882 // register. 2883 if (Offset >= NumElts || Offset < 0) 2884 return std::make_pair(AMDGPU::sub0, Offset); 2885 2886 return std::make_pair(AMDGPU::sub0 + Offset, 0); 2887 } 2888 2889 // Return true if the index is an SGPR and was set. 2890 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, 2891 MachineRegisterInfo &MRI, 2892 MachineInstr &MI, 2893 int Offset, 2894 bool UseGPRIdxMode, 2895 bool IsIndirectSrc) { 2896 MachineBasicBlock *MBB = MI.getParent(); 2897 const DebugLoc &DL = MI.getDebugLoc(); 2898 MachineBasicBlock::iterator I(&MI); 2899 2900 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 2901 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); 2902 2903 assert(Idx->getReg() != AMDGPU::NoRegister); 2904 2905 if (!TII->getRegisterInfo().isSGPRClass(IdxRC)) 2906 return false; 2907 2908 if (UseGPRIdxMode) { 2909 unsigned IdxMode = IsIndirectSrc ? 2910 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE; 2911 if (Offset == 0) { 2912 MachineInstr *SetOn = 2913 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 2914 .add(*Idx) 2915 .addImm(IdxMode); 2916 2917 SetOn->getOperand(3).setIsUndef(); 2918 } else { 2919 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 2920 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) 2921 .add(*Idx) 2922 .addImm(Offset); 2923 MachineInstr *SetOn = 2924 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 2925 .addReg(Tmp, RegState::Kill) 2926 .addImm(IdxMode); 2927 2928 SetOn->getOperand(3).setIsUndef(); 2929 } 2930 2931 return true; 2932 } 2933 2934 if (Offset == 0) { 2935 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 2936 .add(*Idx); 2937 } else { 2938 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 2939 .add(*Idx) 2940 .addImm(Offset); 2941 } 2942 2943 return true; 2944 } 2945 2946 // Control flow needs to be inserted if indexing with a VGPR. 2947 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, 2948 MachineBasicBlock &MBB, 2949 const SISubtarget &ST) { 2950 const SIInstrInfo *TII = ST.getInstrInfo(); 2951 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 2952 MachineFunction *MF = MBB.getParent(); 2953 MachineRegisterInfo &MRI = MF->getRegInfo(); 2954 2955 unsigned Dst = MI.getOperand(0).getReg(); 2956 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); 2957 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 2958 2959 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); 2960 2961 unsigned SubReg; 2962 std::tie(SubReg, Offset) 2963 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); 2964 2965 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode); 2966 2967 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) { 2968 MachineBasicBlock::iterator I(&MI); 2969 const DebugLoc &DL = MI.getDebugLoc(); 2970 2971 if (UseGPRIdxMode) { 2972 // TODO: Look at the uses to avoid the copy. This may require rescheduling 2973 // to avoid interfering with other uses, so probably requires a new 2974 // optimization pass. 2975 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 2976 .addReg(SrcReg, RegState::Undef, SubReg) 2977 .addReg(SrcReg, RegState::Implicit) 2978 .addReg(AMDGPU::M0, RegState::Implicit); 2979 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 2980 } else { 2981 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 2982 .addReg(SrcReg, RegState::Undef, SubReg) 2983 .addReg(SrcReg, RegState::Implicit); 2984 } 2985 2986 MI.eraseFromParent(); 2987 2988 return &MBB; 2989 } 2990 2991 const DebugLoc &DL = MI.getDebugLoc(); 2992 MachineBasicBlock::iterator I(&MI); 2993 2994 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2995 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2996 2997 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); 2998 2999 if (UseGPRIdxMode) { 3000 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 3001 .addImm(0) // Reset inside loop. 3002 .addImm(VGPRIndexMode::SRC0_ENABLE); 3003 SetOn->getOperand(3).setIsUndef(); 3004 3005 // Disable again after the loop. 3006 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3007 } 3008 3009 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, UseGPRIdxMode); 3010 MachineBasicBlock *LoopBB = InsPt->getParent(); 3011 3012 if (UseGPRIdxMode) { 3013 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 3014 .addReg(SrcReg, RegState::Undef, SubReg) 3015 .addReg(SrcReg, RegState::Implicit) 3016 .addReg(AMDGPU::M0, RegState::Implicit); 3017 } else { 3018 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 3019 .addReg(SrcReg, RegState::Undef, SubReg) 3020 .addReg(SrcReg, RegState::Implicit); 3021 } 3022 3023 MI.eraseFromParent(); 3024 3025 return LoopBB; 3026 } 3027 3028 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI, 3029 const TargetRegisterClass *VecRC) { 3030 switch (TRI.getRegSizeInBits(*VecRC)) { 3031 case 32: // 4 bytes 3032 return AMDGPU::V_MOVRELD_B32_V1; 3033 case 64: // 8 bytes 3034 return AMDGPU::V_MOVRELD_B32_V2; 3035 case 128: // 16 bytes 3036 return AMDGPU::V_MOVRELD_B32_V4; 3037 case 256: // 32 bytes 3038 return AMDGPU::V_MOVRELD_B32_V8; 3039 case 512: // 64 bytes 3040 return AMDGPU::V_MOVRELD_B32_V16; 3041 default: 3042 llvm_unreachable("unsupported size for MOVRELD pseudos"); 3043 } 3044 } 3045 3046 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, 3047 MachineBasicBlock &MBB, 3048 const SISubtarget &ST) { 3049 const SIInstrInfo *TII = ST.getInstrInfo(); 3050 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 3051 MachineFunction *MF = MBB.getParent(); 3052 MachineRegisterInfo &MRI = MF->getRegInfo(); 3053 3054 unsigned Dst = MI.getOperand(0).getReg(); 3055 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); 3056 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 3057 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); 3058 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 3059 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); 3060 3061 // This can be an immediate, but will be folded later. 3062 assert(Val->getReg()); 3063 3064 unsigned SubReg; 3065 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, 3066 SrcVec->getReg(), 3067 Offset); 3068 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode); 3069 3070 if (Idx->getReg() == AMDGPU::NoRegister) { 3071 MachineBasicBlock::iterator I(&MI); 3072 const DebugLoc &DL = MI.getDebugLoc(); 3073 3074 assert(Offset == 0); 3075 3076 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) 3077 .add(*SrcVec) 3078 .add(*Val) 3079 .addImm(SubReg); 3080 3081 MI.eraseFromParent(); 3082 return &MBB; 3083 } 3084 3085 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) { 3086 MachineBasicBlock::iterator I(&MI); 3087 const DebugLoc &DL = MI.getDebugLoc(); 3088 3089 if (UseGPRIdxMode) { 3090 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 3091 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst 3092 .add(*Val) 3093 .addReg(Dst, RegState::ImplicitDefine) 3094 .addReg(SrcVec->getReg(), RegState::Implicit) 3095 .addReg(AMDGPU::M0, RegState::Implicit); 3096 3097 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3098 } else { 3099 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC)); 3100 3101 BuildMI(MBB, I, DL, MovRelDesc) 3102 .addReg(Dst, RegState::Define) 3103 .addReg(SrcVec->getReg()) 3104 .add(*Val) 3105 .addImm(SubReg - AMDGPU::sub0); 3106 } 3107 3108 MI.eraseFromParent(); 3109 return &MBB; 3110 } 3111 3112 if (Val->isReg()) 3113 MRI.clearKillFlags(Val->getReg()); 3114 3115 const DebugLoc &DL = MI.getDebugLoc(); 3116 3117 if (UseGPRIdxMode) { 3118 MachineBasicBlock::iterator I(&MI); 3119 3120 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 3121 .addImm(0) // Reset inside loop. 3122 .addImm(VGPRIndexMode::DST_ENABLE); 3123 SetOn->getOperand(3).setIsUndef(); 3124 3125 // Disable again after the loop. 3126 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3127 } 3128 3129 unsigned PhiReg = MRI.createVirtualRegister(VecRC); 3130 3131 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, 3132 Offset, UseGPRIdxMode); 3133 MachineBasicBlock *LoopBB = InsPt->getParent(); 3134 3135 if (UseGPRIdxMode) { 3136 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 3137 .addReg(PhiReg, RegState::Undef, SubReg) // vdst 3138 .add(*Val) // src0 3139 .addReg(Dst, RegState::ImplicitDefine) 3140 .addReg(PhiReg, RegState::Implicit) 3141 .addReg(AMDGPU::M0, RegState::Implicit); 3142 } else { 3143 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC)); 3144 3145 BuildMI(*LoopBB, InsPt, DL, MovRelDesc) 3146 .addReg(Dst, RegState::Define) 3147 .addReg(PhiReg) 3148 .add(*Val) 3149 .addImm(SubReg - AMDGPU::sub0); 3150 } 3151 3152 MI.eraseFromParent(); 3153 3154 return LoopBB; 3155 } 3156 3157 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( 3158 MachineInstr &MI, MachineBasicBlock *BB) const { 3159 3160 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3161 MachineFunction *MF = BB->getParent(); 3162 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 3163 3164 if (TII->isMIMG(MI)) { 3165 if (MI.memoperands_empty() && MI.mayLoadOrStore()) { 3166 report_fatal_error("missing mem operand from MIMG instruction"); 3167 } 3168 // Add a memoperand for mimg instructions so that they aren't assumed to 3169 // be ordered memory instuctions. 3170 3171 return BB; 3172 } 3173 3174 switch (MI.getOpcode()) { 3175 case AMDGPU::S_ADD_U64_PSEUDO: 3176 case AMDGPU::S_SUB_U64_PSEUDO: { 3177 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 3178 const DebugLoc &DL = MI.getDebugLoc(); 3179 3180 MachineOperand &Dest = MI.getOperand(0); 3181 MachineOperand &Src0 = MI.getOperand(1); 3182 MachineOperand &Src1 = MI.getOperand(2); 3183 3184 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3185 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3186 3187 MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, 3188 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0, 3189 &AMDGPU::SReg_32_XM0RegClass); 3190 MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, 3191 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1, 3192 &AMDGPU::SReg_32_XM0RegClass); 3193 3194 MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, 3195 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0, 3196 &AMDGPU::SReg_32_XM0RegClass); 3197 MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, 3198 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1, 3199 &AMDGPU::SReg_32_XM0RegClass); 3200 3201 bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 3202 3203 unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; 3204 unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; 3205 BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0) 3206 .add(Src0Sub0) 3207 .add(Src1Sub0); 3208 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1) 3209 .add(Src0Sub1) 3210 .add(Src1Sub1); 3211 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) 3212 .addReg(DestSub0) 3213 .addImm(AMDGPU::sub0) 3214 .addReg(DestSub1) 3215 .addImm(AMDGPU::sub1); 3216 MI.eraseFromParent(); 3217 return BB; 3218 } 3219 case AMDGPU::SI_INIT_M0: { 3220 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), 3221 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 3222 .add(MI.getOperand(0)); 3223 MI.eraseFromParent(); 3224 return BB; 3225 } 3226 case AMDGPU::SI_INIT_EXEC: 3227 // This should be before all vector instructions. 3228 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64), 3229 AMDGPU::EXEC) 3230 .addImm(MI.getOperand(0).getImm()); 3231 MI.eraseFromParent(); 3232 return BB; 3233 3234 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: { 3235 // Extract the thread count from an SGPR input and set EXEC accordingly. 3236 // Since BFM can't shift by 64, handle that case with CMP + CMOV. 3237 // 3238 // S_BFE_U32 count, input, {shift, 7} 3239 // S_BFM_B64 exec, count, 0 3240 // S_CMP_EQ_U32 count, 64 3241 // S_CMOV_B64 exec, -1 3242 MachineInstr *FirstMI = &*BB->begin(); 3243 MachineRegisterInfo &MRI = MF->getRegInfo(); 3244 unsigned InputReg = MI.getOperand(0).getReg(); 3245 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3246 bool Found = false; 3247 3248 // Move the COPY of the input reg to the beginning, so that we can use it. 3249 for (auto I = BB->begin(); I != &MI; I++) { 3250 if (I->getOpcode() != TargetOpcode::COPY || 3251 I->getOperand(0).getReg() != InputReg) 3252 continue; 3253 3254 if (I == FirstMI) { 3255 FirstMI = &*++BB->begin(); 3256 } else { 3257 I->removeFromParent(); 3258 BB->insert(FirstMI, &*I); 3259 } 3260 Found = true; 3261 break; 3262 } 3263 assert(Found); 3264 (void)Found; 3265 3266 // This should be before all vector instructions. 3267 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg) 3268 .addReg(InputReg) 3269 .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000); 3270 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64), 3271 AMDGPU::EXEC) 3272 .addReg(CountReg) 3273 .addImm(0); 3274 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32)) 3275 .addReg(CountReg, RegState::Kill) 3276 .addImm(64); 3277 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64), 3278 AMDGPU::EXEC) 3279 .addImm(-1); 3280 MI.eraseFromParent(); 3281 return BB; 3282 } 3283 3284 case AMDGPU::GET_GROUPSTATICSIZE: { 3285 DebugLoc DL = MI.getDebugLoc(); 3286 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) 3287 .add(MI.getOperand(0)) 3288 .addImm(MFI->getLDSSize()); 3289 MI.eraseFromParent(); 3290 return BB; 3291 } 3292 case AMDGPU::SI_INDIRECT_SRC_V1: 3293 case AMDGPU::SI_INDIRECT_SRC_V2: 3294 case AMDGPU::SI_INDIRECT_SRC_V4: 3295 case AMDGPU::SI_INDIRECT_SRC_V8: 3296 case AMDGPU::SI_INDIRECT_SRC_V16: 3297 return emitIndirectSrc(MI, *BB, *getSubtarget()); 3298 case AMDGPU::SI_INDIRECT_DST_V1: 3299 case AMDGPU::SI_INDIRECT_DST_V2: 3300 case AMDGPU::SI_INDIRECT_DST_V4: 3301 case AMDGPU::SI_INDIRECT_DST_V8: 3302 case AMDGPU::SI_INDIRECT_DST_V16: 3303 return emitIndirectDst(MI, *BB, *getSubtarget()); 3304 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 3305 case AMDGPU::SI_KILL_I1_PSEUDO: 3306 return splitKillBlock(MI, BB); 3307 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 3308 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 3309 3310 unsigned Dst = MI.getOperand(0).getReg(); 3311 unsigned Src0 = MI.getOperand(1).getReg(); 3312 unsigned Src1 = MI.getOperand(2).getReg(); 3313 const DebugLoc &DL = MI.getDebugLoc(); 3314 unsigned SrcCond = MI.getOperand(3).getReg(); 3315 3316 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3317 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3318 unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 3319 3320 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy) 3321 .addReg(SrcCond); 3322 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 3323 .addReg(Src0, 0, AMDGPU::sub0) 3324 .addReg(Src1, 0, AMDGPU::sub0) 3325 .addReg(SrcCondCopy); 3326 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 3327 .addReg(Src0, 0, AMDGPU::sub1) 3328 .addReg(Src1, 0, AMDGPU::sub1) 3329 .addReg(SrcCondCopy); 3330 3331 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) 3332 .addReg(DstLo) 3333 .addImm(AMDGPU::sub0) 3334 .addReg(DstHi) 3335 .addImm(AMDGPU::sub1); 3336 MI.eraseFromParent(); 3337 return BB; 3338 } 3339 case AMDGPU::SI_BR_UNDEF: { 3340 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3341 const DebugLoc &DL = MI.getDebugLoc(); 3342 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) 3343 .add(MI.getOperand(0)); 3344 Br->getOperand(1).setIsUndef(true); // read undef SCC 3345 MI.eraseFromParent(); 3346 return BB; 3347 } 3348 case AMDGPU::ADJCALLSTACKUP: 3349 case AMDGPU::ADJCALLSTACKDOWN: { 3350 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 3351 MachineInstrBuilder MIB(*MF, &MI); 3352 MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine) 3353 .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit); 3354 return BB; 3355 } 3356 case AMDGPU::SI_CALL_ISEL: 3357 case AMDGPU::SI_TCRETURN_ISEL: { 3358 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3359 const DebugLoc &DL = MI.getDebugLoc(); 3360 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF); 3361 3362 MachineRegisterInfo &MRI = MF->getRegInfo(); 3363 unsigned GlobalAddrReg = MI.getOperand(0).getReg(); 3364 MachineInstr *PCRel = MRI.getVRegDef(GlobalAddrReg); 3365 assert(PCRel->getOpcode() == AMDGPU::SI_PC_ADD_REL_OFFSET); 3366 3367 const GlobalValue *G = PCRel->getOperand(1).getGlobal(); 3368 3369 MachineInstrBuilder MIB; 3370 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) { 3371 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg) 3372 .add(MI.getOperand(0)) 3373 .addGlobalAddress(G); 3374 } else { 3375 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_TCRETURN)) 3376 .add(MI.getOperand(0)) 3377 .addGlobalAddress(G); 3378 3379 // There is an additional imm operand for tcreturn, but it should be in the 3380 // right place already. 3381 } 3382 3383 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) 3384 MIB.add(MI.getOperand(I)); 3385 3386 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 3387 MI.eraseFromParent(); 3388 return BB; 3389 } 3390 default: 3391 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 3392 } 3393 } 3394 3395 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const { 3396 return isTypeLegal(VT.getScalarType()); 3397 } 3398 3399 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { 3400 // This currently forces unfolding various combinations of fsub into fma with 3401 // free fneg'd operands. As long as we have fast FMA (controlled by 3402 // isFMAFasterThanFMulAndFAdd), we should perform these. 3403 3404 // When fma is quarter rate, for f64 where add / sub are at best half rate, 3405 // most of these combines appear to be cycle neutral but save on instruction 3406 // count / code size. 3407 return true; 3408 } 3409 3410 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, 3411 EVT VT) const { 3412 if (!VT.isVector()) { 3413 return MVT::i1; 3414 } 3415 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); 3416 } 3417 3418 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { 3419 // TODO: Should i16 be used always if legal? For now it would force VALU 3420 // shifts. 3421 return (VT == MVT::i16) ? MVT::i16 : MVT::i32; 3422 } 3423 3424 // Answering this is somewhat tricky and depends on the specific device which 3425 // have different rates for fma or all f64 operations. 3426 // 3427 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other 3428 // regardless of which device (although the number of cycles differs between 3429 // devices), so it is always profitable for f64. 3430 // 3431 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable 3432 // only on full rate devices. Normally, we should prefer selecting v_mad_f32 3433 // which we can always do even without fused FP ops since it returns the same 3434 // result as the separate operations and since it is always full 3435 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 3436 // however does not support denormals, so we do report fma as faster if we have 3437 // a fast fma device and require denormals. 3438 // 3439 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 3440 VT = VT.getScalarType(); 3441 3442 switch (VT.getSimpleVT().SimpleTy) { 3443 case MVT::f32: 3444 // This is as fast on some subtargets. However, we always have full rate f32 3445 // mad available which returns the same result as the separate operations 3446 // which we should prefer over fma. We can't use this if we want to support 3447 // denormals, so only report this in these cases. 3448 return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32(); 3449 case MVT::f64: 3450 return true; 3451 case MVT::f16: 3452 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals(); 3453 default: 3454 break; 3455 } 3456 3457 return false; 3458 } 3459 3460 //===----------------------------------------------------------------------===// 3461 // Custom DAG Lowering Operations 3462 //===----------------------------------------------------------------------===// 3463 3464 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 3465 switch (Op.getOpcode()) { 3466 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 3467 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 3468 case ISD::LOAD: { 3469 SDValue Result = LowerLOAD(Op, DAG); 3470 assert((!Result.getNode() || 3471 Result.getNode()->getNumValues() == 2) && 3472 "Load should return a value and a chain"); 3473 return Result; 3474 } 3475 3476 case ISD::FSIN: 3477 case ISD::FCOS: 3478 return LowerTrig(Op, DAG); 3479 case ISD::SELECT: return LowerSELECT(Op, DAG); 3480 case ISD::FDIV: return LowerFDIV(Op, DAG); 3481 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); 3482 case ISD::STORE: return LowerSTORE(Op, DAG); 3483 case ISD::GlobalAddress: { 3484 MachineFunction &MF = DAG.getMachineFunction(); 3485 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 3486 return LowerGlobalAddress(MFI, Op, DAG); 3487 } 3488 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 3489 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 3490 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 3491 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); 3492 case ISD::INSERT_VECTOR_ELT: 3493 return lowerINSERT_VECTOR_ELT(Op, DAG); 3494 case ISD::EXTRACT_VECTOR_ELT: 3495 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 3496 case ISD::FP_ROUND: 3497 return lowerFP_ROUND(Op, DAG); 3498 case ISD::TRAP: 3499 case ISD::DEBUGTRAP: 3500 return lowerTRAP(Op, DAG); 3501 } 3502 return SDValue(); 3503 } 3504 3505 void SITargetLowering::ReplaceNodeResults(SDNode *N, 3506 SmallVectorImpl<SDValue> &Results, 3507 SelectionDAG &DAG) const { 3508 switch (N->getOpcode()) { 3509 case ISD::INSERT_VECTOR_ELT: { 3510 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) 3511 Results.push_back(Res); 3512 return; 3513 } 3514 case ISD::EXTRACT_VECTOR_ELT: { 3515 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) 3516 Results.push_back(Res); 3517 return; 3518 } 3519 case ISD::INTRINSIC_WO_CHAIN: { 3520 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 3521 switch (IID) { 3522 case Intrinsic::amdgcn_cvt_pkrtz: { 3523 SDValue Src0 = N->getOperand(1); 3524 SDValue Src1 = N->getOperand(2); 3525 SDLoc SL(N); 3526 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32, 3527 Src0, Src1); 3528 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt)); 3529 return; 3530 } 3531 case Intrinsic::amdgcn_cvt_pknorm_i16: 3532 case Intrinsic::amdgcn_cvt_pknorm_u16: 3533 case Intrinsic::amdgcn_cvt_pk_i16: 3534 case Intrinsic::amdgcn_cvt_pk_u16: { 3535 SDValue Src0 = N->getOperand(1); 3536 SDValue Src1 = N->getOperand(2); 3537 SDLoc SL(N); 3538 unsigned Opcode; 3539 3540 if (IID == Intrinsic::amdgcn_cvt_pknorm_i16) 3541 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; 3542 else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16) 3543 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; 3544 else if (IID == Intrinsic::amdgcn_cvt_pk_i16) 3545 Opcode = AMDGPUISD::CVT_PK_I16_I32; 3546 else 3547 Opcode = AMDGPUISD::CVT_PK_U16_U32; 3548 3549 SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1); 3550 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt)); 3551 return; 3552 } 3553 } 3554 break; 3555 } 3556 case ISD::SELECT: { 3557 SDLoc SL(N); 3558 EVT VT = N->getValueType(0); 3559 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 3560 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1)); 3561 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2)); 3562 3563 EVT SelectVT = NewVT; 3564 if (NewVT.bitsLT(MVT::i32)) { 3565 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS); 3566 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS); 3567 SelectVT = MVT::i32; 3568 } 3569 3570 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT, 3571 N->getOperand(0), LHS, RHS); 3572 3573 if (NewVT != SelectVT) 3574 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect); 3575 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect)); 3576 return; 3577 } 3578 default: 3579 break; 3580 } 3581 } 3582 3583 /// \brief Helper function for LowerBRCOND 3584 static SDNode *findUser(SDValue Value, unsigned Opcode) { 3585 3586 SDNode *Parent = Value.getNode(); 3587 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); 3588 I != E; ++I) { 3589 3590 if (I.getUse().get() != Value) 3591 continue; 3592 3593 if (I->getOpcode() == Opcode) 3594 return *I; 3595 } 3596 return nullptr; 3597 } 3598 3599 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { 3600 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 3601 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { 3602 case Intrinsic::amdgcn_if: 3603 return AMDGPUISD::IF; 3604 case Intrinsic::amdgcn_else: 3605 return AMDGPUISD::ELSE; 3606 case Intrinsic::amdgcn_loop: 3607 return AMDGPUISD::LOOP; 3608 case Intrinsic::amdgcn_end_cf: 3609 llvm_unreachable("should not occur"); 3610 default: 3611 return 0; 3612 } 3613 } 3614 3615 // break, if_break, else_break are all only used as inputs to loop, not 3616 // directly as branch conditions. 3617 return 0; 3618 } 3619 3620 void SITargetLowering::createDebuggerPrologueStackObjects( 3621 MachineFunction &MF) const { 3622 // Create stack objects that are used for emitting debugger prologue. 3623 // 3624 // Debugger prologue writes work group IDs and work item IDs to scratch memory 3625 // at fixed location in the following format: 3626 // offset 0: work group ID x 3627 // offset 4: work group ID y 3628 // offset 8: work group ID z 3629 // offset 16: work item ID x 3630 // offset 20: work item ID y 3631 // offset 24: work item ID z 3632 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 3633 int ObjectIdx = 0; 3634 3635 // For each dimension: 3636 for (unsigned i = 0; i < 3; ++i) { 3637 // Create fixed stack object for work group ID. 3638 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true); 3639 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx); 3640 // Create fixed stack object for work item ID. 3641 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true); 3642 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx); 3643 } 3644 } 3645 3646 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { 3647 const Triple &TT = getTargetMachine().getTargetTriple(); 3648 return GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS && 3649 AMDGPU::shouldEmitConstantsToTextSection(TT); 3650 } 3651 3652 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { 3653 return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS || 3654 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) && 3655 !shouldEmitFixup(GV) && 3656 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 3657 } 3658 3659 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { 3660 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); 3661 } 3662 3663 /// This transforms the control flow intrinsics to get the branch destination as 3664 /// last parameter, also switches branch target with BR if the need arise 3665 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, 3666 SelectionDAG &DAG) const { 3667 SDLoc DL(BRCOND); 3668 3669 SDNode *Intr = BRCOND.getOperand(1).getNode(); 3670 SDValue Target = BRCOND.getOperand(2); 3671 SDNode *BR = nullptr; 3672 SDNode *SetCC = nullptr; 3673 3674 if (Intr->getOpcode() == ISD::SETCC) { 3675 // As long as we negate the condition everything is fine 3676 SetCC = Intr; 3677 Intr = SetCC->getOperand(0).getNode(); 3678 3679 } else { 3680 // Get the target from BR if we don't negate the condition 3681 BR = findUser(BRCOND, ISD::BR); 3682 Target = BR->getOperand(1); 3683 } 3684 3685 // FIXME: This changes the types of the intrinsics instead of introducing new 3686 // nodes with the correct types. 3687 // e.g. llvm.amdgcn.loop 3688 3689 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3 3690 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088> 3691 3692 unsigned CFNode = isCFIntrinsic(Intr); 3693 if (CFNode == 0) { 3694 // This is a uniform branch so we don't need to legalize. 3695 return BRCOND; 3696 } 3697 3698 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || 3699 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; 3700 3701 assert(!SetCC || 3702 (SetCC->getConstantOperandVal(1) == 1 && 3703 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == 3704 ISD::SETNE)); 3705 3706 // operands of the new intrinsic call 3707 SmallVector<SDValue, 4> Ops; 3708 if (HaveChain) 3709 Ops.push_back(BRCOND.getOperand(0)); 3710 3711 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end()); 3712 Ops.push_back(Target); 3713 3714 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); 3715 3716 // build the new intrinsic call 3717 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode(); 3718 3719 if (!HaveChain) { 3720 SDValue Ops[] = { 3721 SDValue(Result, 0), 3722 BRCOND.getOperand(0) 3723 }; 3724 3725 Result = DAG.getMergeValues(Ops, DL).getNode(); 3726 } 3727 3728 if (BR) { 3729 // Give the branch instruction our target 3730 SDValue Ops[] = { 3731 BR->getOperand(0), 3732 BRCOND.getOperand(2) 3733 }; 3734 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); 3735 DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); 3736 BR = NewBR.getNode(); 3737 } 3738 3739 SDValue Chain = SDValue(Result, Result->getNumValues() - 1); 3740 3741 // Copy the intrinsic results to registers 3742 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { 3743 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); 3744 if (!CopyToReg) 3745 continue; 3746 3747 Chain = DAG.getCopyToReg( 3748 Chain, DL, 3749 CopyToReg->getOperand(1), 3750 SDValue(Result, i - 1), 3751 SDValue()); 3752 3753 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); 3754 } 3755 3756 // Remove the old intrinsic from the chain 3757 DAG.ReplaceAllUsesOfValueWith( 3758 SDValue(Intr, Intr->getNumValues() - 1), 3759 Intr->getOperand(0)); 3760 3761 return Chain; 3762 } 3763 3764 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG, 3765 SDValue Op, 3766 const SDLoc &DL, 3767 EVT VT) const { 3768 return Op.getValueType().bitsLE(VT) ? 3769 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : 3770 DAG.getNode(ISD::FTRUNC, DL, VT, Op); 3771 } 3772 3773 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 3774 assert(Op.getValueType() == MVT::f16 && 3775 "Do not know how to custom lower FP_ROUND for non-f16 type"); 3776 3777 SDValue Src = Op.getOperand(0); 3778 EVT SrcVT = Src.getValueType(); 3779 if (SrcVT != MVT::f64) 3780 return Op; 3781 3782 SDLoc DL(Op); 3783 3784 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); 3785 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); 3786 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc); 3787 } 3788 3789 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { 3790 SDLoc SL(Op); 3791 MachineFunction &MF = DAG.getMachineFunction(); 3792 SDValue Chain = Op.getOperand(0); 3793 3794 unsigned TrapID = Op.getOpcode() == ISD::DEBUGTRAP ? 3795 SISubtarget::TrapIDLLVMDebugTrap : SISubtarget::TrapIDLLVMTrap; 3796 3797 if (Subtarget->getTrapHandlerAbi() == SISubtarget::TrapHandlerAbiHsa && 3798 Subtarget->isTrapHandlerEnabled()) { 3799 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 3800 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 3801 assert(UserSGPR != AMDGPU::NoRegister); 3802 3803 SDValue QueuePtr = CreateLiveInRegister( 3804 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 3805 3806 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64); 3807 3808 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01, 3809 QueuePtr, SDValue()); 3810 3811 SDValue Ops[] = { 3812 ToReg, 3813 DAG.getTargetConstant(TrapID, SL, MVT::i16), 3814 SGPR01, 3815 ToReg.getValue(1) 3816 }; 3817 3818 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); 3819 } 3820 3821 switch (TrapID) { 3822 case SISubtarget::TrapIDLLVMTrap: 3823 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain); 3824 case SISubtarget::TrapIDLLVMDebugTrap: { 3825 DiagnosticInfoUnsupported NoTrap(MF.getFunction(), 3826 "debugtrap handler not supported", 3827 Op.getDebugLoc(), 3828 DS_Warning); 3829 LLVMContext &Ctx = MF.getFunction().getContext(); 3830 Ctx.diagnose(NoTrap); 3831 return Chain; 3832 } 3833 default: 3834 llvm_unreachable("unsupported trap handler type!"); 3835 } 3836 3837 return Chain; 3838 } 3839 3840 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL, 3841 SelectionDAG &DAG) const { 3842 // FIXME: Use inline constants (src_{shared, private}_base) instead. 3843 if (Subtarget->hasApertureRegs()) { 3844 unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ? 3845 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE : 3846 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE; 3847 unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ? 3848 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE : 3849 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE; 3850 unsigned Encoding = 3851 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ | 3852 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ | 3853 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_; 3854 3855 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16); 3856 SDValue ApertureReg = SDValue( 3857 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0); 3858 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32); 3859 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount); 3860 } 3861 3862 MachineFunction &MF = DAG.getMachineFunction(); 3863 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 3864 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 3865 assert(UserSGPR != AMDGPU::NoRegister); 3866 3867 SDValue QueuePtr = CreateLiveInRegister( 3868 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 3869 3870 // Offset into amd_queue_t for group_segment_aperture_base_hi / 3871 // private_segment_aperture_base_hi. 3872 uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44; 3873 3874 SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset); 3875 3876 // TODO: Use custom target PseudoSourceValue. 3877 // TODO: We should use the value from the IR intrinsic call, but it might not 3878 // be available and how do we get it? 3879 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()), 3880 AMDGPUASI.CONSTANT_ADDRESS)); 3881 3882 MachinePointerInfo PtrInfo(V, StructOffset); 3883 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo, 3884 MinAlign(64, StructOffset), 3885 MachineMemOperand::MODereferenceable | 3886 MachineMemOperand::MOInvariant); 3887 } 3888 3889 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, 3890 SelectionDAG &DAG) const { 3891 SDLoc SL(Op); 3892 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); 3893 3894 SDValue Src = ASC->getOperand(0); 3895 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); 3896 3897 const AMDGPUTargetMachine &TM = 3898 static_cast<const AMDGPUTargetMachine &>(getTargetMachine()); 3899 3900 // flat -> local/private 3901 if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) { 3902 unsigned DestAS = ASC->getDestAddressSpace(); 3903 3904 if (DestAS == AMDGPUASI.LOCAL_ADDRESS || 3905 DestAS == AMDGPUASI.PRIVATE_ADDRESS) { 3906 unsigned NullVal = TM.getNullPointerValue(DestAS); 3907 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 3908 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); 3909 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); 3910 3911 return DAG.getNode(ISD::SELECT, SL, MVT::i32, 3912 NonNull, Ptr, SegmentNullPtr); 3913 } 3914 } 3915 3916 // local/private -> flat 3917 if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) { 3918 unsigned SrcAS = ASC->getSrcAddressSpace(); 3919 3920 if (SrcAS == AMDGPUASI.LOCAL_ADDRESS || 3921 SrcAS == AMDGPUASI.PRIVATE_ADDRESS) { 3922 unsigned NullVal = TM.getNullPointerValue(SrcAS); 3923 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 3924 3925 SDValue NonNull 3926 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); 3927 3928 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG); 3929 SDValue CvtPtr 3930 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); 3931 3932 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, 3933 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), 3934 FlatNullPtr); 3935 } 3936 } 3937 3938 // global <-> flat are no-ops and never emitted. 3939 3940 const MachineFunction &MF = DAG.getMachineFunction(); 3941 DiagnosticInfoUnsupported InvalidAddrSpaceCast( 3942 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); 3943 DAG.getContext()->diagnose(InvalidAddrSpaceCast); 3944 3945 return DAG.getUNDEF(ASC->getValueType(0)); 3946 } 3947 3948 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 3949 SelectionDAG &DAG) const { 3950 SDValue Idx = Op.getOperand(2); 3951 if (isa<ConstantSDNode>(Idx)) 3952 return SDValue(); 3953 3954 // Avoid stack access for dynamic indexing. 3955 SDLoc SL(Op); 3956 SDValue Vec = Op.getOperand(0); 3957 SDValue Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Op.getOperand(1)); 3958 3959 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec 3960 SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Val); 3961 3962 // Convert vector index to bit-index. 3963 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, 3964 DAG.getConstant(16, SL, MVT::i32)); 3965 3966 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 3967 3968 SDValue BFM = DAG.getNode(ISD::SHL, SL, MVT::i32, 3969 DAG.getConstant(0xffff, SL, MVT::i32), 3970 ScaledIdx); 3971 3972 SDValue LHS = DAG.getNode(ISD::AND, SL, MVT::i32, BFM, ExtVal); 3973 SDValue RHS = DAG.getNode(ISD::AND, SL, MVT::i32, 3974 DAG.getNOT(SL, BFM, MVT::i32), BCVec); 3975 3976 SDValue BFI = DAG.getNode(ISD::OR, SL, MVT::i32, LHS, RHS); 3977 return DAG.getNode(ISD::BITCAST, SL, Op.getValueType(), BFI); 3978 } 3979 3980 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 3981 SelectionDAG &DAG) const { 3982 SDLoc SL(Op); 3983 3984 EVT ResultVT = Op.getValueType(); 3985 SDValue Vec = Op.getOperand(0); 3986 SDValue Idx = Op.getOperand(1); 3987 3988 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); 3989 3990 // Make sure we we do any optimizations that will make it easier to fold 3991 // source modifiers before obscuring it with bit operations. 3992 3993 // XXX - Why doesn't this get called when vector_shuffle is expanded? 3994 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI)) 3995 return Combined; 3996 3997 if (const ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) { 3998 SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 3999 4000 if (CIdx->getZExtValue() == 1) { 4001 Result = DAG.getNode(ISD::SRL, SL, MVT::i32, Result, 4002 DAG.getConstant(16, SL, MVT::i32)); 4003 } else { 4004 assert(CIdx->getZExtValue() == 0); 4005 } 4006 4007 if (ResultVT.bitsLT(MVT::i32)) 4008 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result); 4009 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 4010 } 4011 4012 SDValue Sixteen = DAG.getConstant(16, SL, MVT::i32); 4013 4014 // Convert vector index to bit-index. 4015 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, Sixteen); 4016 4017 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 4018 SDValue Elt = DAG.getNode(ISD::SRL, SL, MVT::i32, BC, ScaledIdx); 4019 4020 SDValue Result = Elt; 4021 if (ResultVT.bitsLT(MVT::i32)) 4022 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result); 4023 4024 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 4025 } 4026 4027 bool 4028 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 4029 // We can fold offsets for anything that doesn't require a GOT relocation. 4030 return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS || 4031 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) && 4032 !shouldEmitGOTReloc(GA->getGlobal()); 4033 } 4034 4035 static SDValue 4036 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, 4037 const SDLoc &DL, unsigned Offset, EVT PtrVT, 4038 unsigned GAFlags = SIInstrInfo::MO_NONE) { 4039 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is 4040 // lowered to the following code sequence: 4041 // 4042 // For constant address space: 4043 // s_getpc_b64 s[0:1] 4044 // s_add_u32 s0, s0, $symbol 4045 // s_addc_u32 s1, s1, 0 4046 // 4047 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 4048 // a fixup or relocation is emitted to replace $symbol with a literal 4049 // constant, which is a pc-relative offset from the encoding of the $symbol 4050 // operand to the global variable. 4051 // 4052 // For global address space: 4053 // s_getpc_b64 s[0:1] 4054 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo 4055 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi 4056 // 4057 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 4058 // fixups or relocations are emitted to replace $symbol@*@lo and 4059 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, 4060 // which is a 64-bit pc-relative offset from the encoding of the $symbol 4061 // operand to the global variable. 4062 // 4063 // What we want here is an offset from the value returned by s_getpc 4064 // (which is the address of the s_add_u32 instruction) to the global 4065 // variable, but since the encoding of $symbol starts 4 bytes after the start 4066 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too 4067 // small. This requires us to add 4 to the global variable offset in order to 4068 // compute the correct address. 4069 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 4070 GAFlags); 4071 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 4072 GAFlags == SIInstrInfo::MO_NONE ? 4073 GAFlags : GAFlags + 1); 4074 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); 4075 } 4076 4077 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 4078 SDValue Op, 4079 SelectionDAG &DAG) const { 4080 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 4081 const GlobalValue *GV = GSD->getGlobal(); 4082 4083 if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS && 4084 GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS && 4085 // FIXME: It isn't correct to rely on the type of the pointer. This should 4086 // be removed when address space 0 is 64-bit. 4087 !GV->getType()->getElementType()->isFunctionTy()) 4088 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 4089 4090 SDLoc DL(GSD); 4091 EVT PtrVT = Op.getValueType(); 4092 4093 if (shouldEmitFixup(GV)) 4094 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); 4095 else if (shouldEmitPCReloc(GV)) 4096 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, 4097 SIInstrInfo::MO_REL32); 4098 4099 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, 4100 SIInstrInfo::MO_GOTPCREL32); 4101 4102 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); 4103 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS); 4104 const DataLayout &DataLayout = DAG.getDataLayout(); 4105 unsigned Align = DataLayout.getABITypeAlignment(PtrTy); 4106 // FIXME: Use a PseudoSourceValue once those can be assigned an address space. 4107 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 4108 4109 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align, 4110 MachineMemOperand::MODereferenceable | 4111 MachineMemOperand::MOInvariant); 4112 } 4113 4114 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, 4115 const SDLoc &DL, SDValue V) const { 4116 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as 4117 // the destination register. 4118 // 4119 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, 4120 // so we will end up with redundant moves to m0. 4121 // 4122 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. 4123 4124 // A Null SDValue creates a glue result. 4125 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, 4126 V, Chain); 4127 return SDValue(M0, 0); 4128 } 4129 4130 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, 4131 SDValue Op, 4132 MVT VT, 4133 unsigned Offset) const { 4134 SDLoc SL(Op); 4135 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL, 4136 DAG.getEntryNode(), Offset, false); 4137 // The local size values will have the hi 16-bits as zero. 4138 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, 4139 DAG.getValueType(VT)); 4140 } 4141 4142 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 4143 EVT VT) { 4144 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), 4145 "non-hsa intrinsic with hsa target", 4146 DL.getDebugLoc()); 4147 DAG.getContext()->diagnose(BadIntrin); 4148 return DAG.getUNDEF(VT); 4149 } 4150 4151 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 4152 EVT VT) { 4153 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), 4154 "intrinsic not supported on subtarget", 4155 DL.getDebugLoc()); 4156 DAG.getContext()->diagnose(BadIntrin); 4157 return DAG.getUNDEF(VT); 4158 } 4159 4160 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 4161 SelectionDAG &DAG) const { 4162 MachineFunction &MF = DAG.getMachineFunction(); 4163 auto MFI = MF.getInfo<SIMachineFunctionInfo>(); 4164 4165 EVT VT = Op.getValueType(); 4166 SDLoc DL(Op); 4167 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4168 4169 // TODO: Should this propagate fast-math-flags? 4170 4171 switch (IntrinsicID) { 4172 case Intrinsic::amdgcn_implicit_buffer_ptr: { 4173 if (getSubtarget()->isAmdCodeObjectV2(MF)) 4174 return emitNonHSAIntrinsicError(DAG, DL, VT); 4175 return getPreloadedValue(DAG, *MFI, VT, 4176 AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR); 4177 } 4178 case Intrinsic::amdgcn_dispatch_ptr: 4179 case Intrinsic::amdgcn_queue_ptr: { 4180 if (!Subtarget->isAmdCodeObjectV2(MF)) { 4181 DiagnosticInfoUnsupported BadIntrin( 4182 MF.getFunction(), "unsupported hsa intrinsic without hsa target", 4183 DL.getDebugLoc()); 4184 DAG.getContext()->diagnose(BadIntrin); 4185 return DAG.getUNDEF(VT); 4186 } 4187 4188 auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? 4189 AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR; 4190 return getPreloadedValue(DAG, *MFI, VT, RegID); 4191 } 4192 case Intrinsic::amdgcn_implicitarg_ptr: { 4193 if (MFI->isEntryFunction()) 4194 return getImplicitArgPtr(DAG, DL); 4195 return getPreloadedValue(DAG, *MFI, VT, 4196 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); 4197 } 4198 case Intrinsic::amdgcn_kernarg_segment_ptr: { 4199 return getPreloadedValue(DAG, *MFI, VT, 4200 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 4201 } 4202 case Intrinsic::amdgcn_dispatch_id: { 4203 return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID); 4204 } 4205 case Intrinsic::amdgcn_rcp: 4206 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); 4207 case Intrinsic::amdgcn_rsq: 4208 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 4209 case Intrinsic::amdgcn_rsq_legacy: 4210 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 4211 return emitRemovedIntrinsicError(DAG, DL, VT); 4212 4213 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); 4214 case Intrinsic::amdgcn_rcp_legacy: 4215 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 4216 return emitRemovedIntrinsicError(DAG, DL, VT); 4217 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); 4218 case Intrinsic::amdgcn_rsq_clamp: { 4219 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 4220 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 4221 4222 Type *Type = VT.getTypeForEVT(*DAG.getContext()); 4223 APFloat Max = APFloat::getLargest(Type->getFltSemantics()); 4224 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); 4225 4226 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 4227 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, 4228 DAG.getConstantFP(Max, DL, VT)); 4229 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, 4230 DAG.getConstantFP(Min, DL, VT)); 4231 } 4232 case Intrinsic::r600_read_ngroups_x: 4233 if (Subtarget->isAmdHsaOS()) 4234 return emitNonHSAIntrinsicError(DAG, DL, VT); 4235 4236 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4237 SI::KernelInputOffsets::NGROUPS_X, false); 4238 case Intrinsic::r600_read_ngroups_y: 4239 if (Subtarget->isAmdHsaOS()) 4240 return emitNonHSAIntrinsicError(DAG, DL, VT); 4241 4242 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4243 SI::KernelInputOffsets::NGROUPS_Y, false); 4244 case Intrinsic::r600_read_ngroups_z: 4245 if (Subtarget->isAmdHsaOS()) 4246 return emitNonHSAIntrinsicError(DAG, DL, VT); 4247 4248 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4249 SI::KernelInputOffsets::NGROUPS_Z, false); 4250 case Intrinsic::r600_read_global_size_x: 4251 if (Subtarget->isAmdHsaOS()) 4252 return emitNonHSAIntrinsicError(DAG, DL, VT); 4253 4254 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4255 SI::KernelInputOffsets::GLOBAL_SIZE_X, false); 4256 case Intrinsic::r600_read_global_size_y: 4257 if (Subtarget->isAmdHsaOS()) 4258 return emitNonHSAIntrinsicError(DAG, DL, VT); 4259 4260 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4261 SI::KernelInputOffsets::GLOBAL_SIZE_Y, false); 4262 case Intrinsic::r600_read_global_size_z: 4263 if (Subtarget->isAmdHsaOS()) 4264 return emitNonHSAIntrinsicError(DAG, DL, VT); 4265 4266 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4267 SI::KernelInputOffsets::GLOBAL_SIZE_Z, false); 4268 case Intrinsic::r600_read_local_size_x: 4269 if (Subtarget->isAmdHsaOS()) 4270 return emitNonHSAIntrinsicError(DAG, DL, VT); 4271 4272 return lowerImplicitZextParam(DAG, Op, MVT::i16, 4273 SI::KernelInputOffsets::LOCAL_SIZE_X); 4274 case Intrinsic::r600_read_local_size_y: 4275 if (Subtarget->isAmdHsaOS()) 4276 return emitNonHSAIntrinsicError(DAG, DL, VT); 4277 4278 return lowerImplicitZextParam(DAG, Op, MVT::i16, 4279 SI::KernelInputOffsets::LOCAL_SIZE_Y); 4280 case Intrinsic::r600_read_local_size_z: 4281 if (Subtarget->isAmdHsaOS()) 4282 return emitNonHSAIntrinsicError(DAG, DL, VT); 4283 4284 return lowerImplicitZextParam(DAG, Op, MVT::i16, 4285 SI::KernelInputOffsets::LOCAL_SIZE_Z); 4286 case Intrinsic::amdgcn_workgroup_id_x: 4287 case Intrinsic::r600_read_tgid_x: 4288 return getPreloadedValue(DAG, *MFI, VT, 4289 AMDGPUFunctionArgInfo::WORKGROUP_ID_X); 4290 case Intrinsic::amdgcn_workgroup_id_y: 4291 case Intrinsic::r600_read_tgid_y: 4292 return getPreloadedValue(DAG, *MFI, VT, 4293 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); 4294 case Intrinsic::amdgcn_workgroup_id_z: 4295 case Intrinsic::r600_read_tgid_z: 4296 return getPreloadedValue(DAG, *MFI, VT, 4297 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); 4298 case Intrinsic::amdgcn_workitem_id_x: { 4299 case Intrinsic::r600_read_tidig_x: 4300 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 4301 SDLoc(DAG.getEntryNode()), 4302 MFI->getArgInfo().WorkItemIDX); 4303 } 4304 case Intrinsic::amdgcn_workitem_id_y: 4305 case Intrinsic::r600_read_tidig_y: 4306 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 4307 SDLoc(DAG.getEntryNode()), 4308 MFI->getArgInfo().WorkItemIDY); 4309 case Intrinsic::amdgcn_workitem_id_z: 4310 case Intrinsic::r600_read_tidig_z: 4311 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 4312 SDLoc(DAG.getEntryNode()), 4313 MFI->getArgInfo().WorkItemIDZ); 4314 case AMDGPUIntrinsic::SI_load_const: { 4315 SDValue Ops[] = { 4316 Op.getOperand(1), 4317 Op.getOperand(2) 4318 }; 4319 4320 MachineMemOperand *MMO = MF.getMachineMemOperand( 4321 MachinePointerInfo(), 4322 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 4323 MachineMemOperand::MOInvariant, 4324 VT.getStoreSize(), 4); 4325 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL, 4326 Op->getVTList(), Ops, VT, MMO); 4327 } 4328 case Intrinsic::amdgcn_fdiv_fast: 4329 return lowerFDIV_FAST(Op, DAG); 4330 case Intrinsic::amdgcn_interp_mov: { 4331 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 4332 SDValue Glue = M0.getValue(1); 4333 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1), 4334 Op.getOperand(2), Op.getOperand(3), Glue); 4335 } 4336 case Intrinsic::amdgcn_interp_p1: { 4337 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 4338 SDValue Glue = M0.getValue(1); 4339 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), 4340 Op.getOperand(2), Op.getOperand(3), Glue); 4341 } 4342 case Intrinsic::amdgcn_interp_p2: { 4343 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 4344 SDValue Glue = SDValue(M0.getNode(), 1); 4345 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), 4346 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), 4347 Glue); 4348 } 4349 case Intrinsic::amdgcn_sin: 4350 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); 4351 4352 case Intrinsic::amdgcn_cos: 4353 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); 4354 4355 case Intrinsic::amdgcn_log_clamp: { 4356 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 4357 return SDValue(); 4358 4359 DiagnosticInfoUnsupported BadIntrin( 4360 MF.getFunction(), "intrinsic not supported on subtarget", 4361 DL.getDebugLoc()); 4362 DAG.getContext()->diagnose(BadIntrin); 4363 return DAG.getUNDEF(VT); 4364 } 4365 case Intrinsic::amdgcn_ldexp: 4366 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, 4367 Op.getOperand(1), Op.getOperand(2)); 4368 4369 case Intrinsic::amdgcn_fract: 4370 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); 4371 4372 case Intrinsic::amdgcn_class: 4373 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, 4374 Op.getOperand(1), Op.getOperand(2)); 4375 case Intrinsic::amdgcn_div_fmas: 4376 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, 4377 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 4378 Op.getOperand(4)); 4379 4380 case Intrinsic::amdgcn_div_fixup: 4381 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, 4382 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4383 4384 case Intrinsic::amdgcn_trig_preop: 4385 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, 4386 Op.getOperand(1), Op.getOperand(2)); 4387 case Intrinsic::amdgcn_div_scale: { 4388 // 3rd parameter required to be a constant. 4389 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4390 if (!Param) 4391 return DAG.getMergeValues({ DAG.getUNDEF(VT), DAG.getUNDEF(MVT::i1) }, DL); 4392 4393 // Translate to the operands expected by the machine instruction. The 4394 // first parameter must be the same as the first instruction. 4395 SDValue Numerator = Op.getOperand(1); 4396 SDValue Denominator = Op.getOperand(2); 4397 4398 // Note this order is opposite of the machine instruction's operations, 4399 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The 4400 // intrinsic has the numerator as the first operand to match a normal 4401 // division operation. 4402 4403 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; 4404 4405 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, 4406 Denominator, Numerator); 4407 } 4408 case Intrinsic::amdgcn_icmp: { 4409 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4410 if (!CD) 4411 return DAG.getUNDEF(VT); 4412 4413 int CondCode = CD->getSExtValue(); 4414 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || 4415 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE) 4416 return DAG.getUNDEF(VT); 4417 4418 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); 4419 ISD::CondCode CCOpcode = getICmpCondCode(IcInput); 4420 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 4421 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 4422 } 4423 case Intrinsic::amdgcn_fcmp: { 4424 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4425 if (!CD) 4426 return DAG.getUNDEF(VT); 4427 4428 int CondCode = CD->getSExtValue(); 4429 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE || 4430 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) 4431 return DAG.getUNDEF(VT); 4432 4433 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); 4434 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); 4435 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 4436 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 4437 } 4438 case Intrinsic::amdgcn_fmed3: 4439 return DAG.getNode(AMDGPUISD::FMED3, DL, VT, 4440 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4441 case Intrinsic::amdgcn_fmul_legacy: 4442 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, 4443 Op.getOperand(1), Op.getOperand(2)); 4444 case Intrinsic::amdgcn_sffbh: 4445 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); 4446 case Intrinsic::amdgcn_sbfe: 4447 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, 4448 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4449 case Intrinsic::amdgcn_ubfe: 4450 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, 4451 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4452 case Intrinsic::amdgcn_cvt_pkrtz: 4453 case Intrinsic::amdgcn_cvt_pknorm_i16: 4454 case Intrinsic::amdgcn_cvt_pknorm_u16: 4455 case Intrinsic::amdgcn_cvt_pk_i16: 4456 case Intrinsic::amdgcn_cvt_pk_u16: { 4457 // FIXME: Stop adding cast if v2f16/v2i16 are legal. 4458 EVT VT = Op.getValueType(); 4459 unsigned Opcode; 4460 4461 if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz) 4462 Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32; 4463 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16) 4464 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; 4465 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16) 4466 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; 4467 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16) 4468 Opcode = AMDGPUISD::CVT_PK_I16_I32; 4469 else 4470 Opcode = AMDGPUISD::CVT_PK_U16_U32; 4471 4472 SDValue Node = DAG.getNode(Opcode, DL, MVT::i32, 4473 Op.getOperand(1), Op.getOperand(2)); 4474 return DAG.getNode(ISD::BITCAST, DL, VT, Node); 4475 } 4476 case Intrinsic::amdgcn_wqm: { 4477 SDValue Src = Op.getOperand(1); 4478 return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src), 4479 0); 4480 } 4481 case Intrinsic::amdgcn_wwm: { 4482 SDValue Src = Op.getOperand(1); 4483 return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src), 4484 0); 4485 } 4486 case Intrinsic::amdgcn_image_getlod: 4487 case Intrinsic::amdgcn_image_getresinfo: { 4488 unsigned Idx = (IntrinsicID == Intrinsic::amdgcn_image_getresinfo) ? 3 : 4; 4489 4490 // Replace dmask with everything disabled with undef. 4491 const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(Idx)); 4492 if (!DMask || DMask->isNullValue()) 4493 return DAG.getUNDEF(Op.getValueType()); 4494 return SDValue(); 4495 } 4496 default: 4497 return Op; 4498 } 4499 } 4500 4501 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 4502 SelectionDAG &DAG) const { 4503 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 4504 SDLoc DL(Op); 4505 4506 switch (IntrID) { 4507 case Intrinsic::amdgcn_atomic_inc: 4508 case Intrinsic::amdgcn_atomic_dec: { 4509 MemSDNode *M = cast<MemSDNode>(Op); 4510 unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ? 4511 AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC; 4512 SDValue Ops[] = { 4513 M->getOperand(0), // Chain 4514 M->getOperand(2), // Ptr 4515 M->getOperand(3) // Value 4516 }; 4517 4518 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, 4519 M->getMemoryVT(), M->getMemOperand()); 4520 } 4521 case Intrinsic::amdgcn_buffer_load: 4522 case Intrinsic::amdgcn_buffer_load_format: { 4523 SDValue Ops[] = { 4524 Op.getOperand(0), // Chain 4525 Op.getOperand(2), // rsrc 4526 Op.getOperand(3), // vindex 4527 Op.getOperand(4), // offset 4528 Op.getOperand(5), // glc 4529 Op.getOperand(6) // slc 4530 }; 4531 4532 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? 4533 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; 4534 EVT VT = Op.getValueType(); 4535 EVT IntVT = VT.changeTypeToInteger(); 4536 4537 auto *M = cast<MemSDNode>(Op); 4538 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, 4539 M->getMemOperand()); 4540 } 4541 case Intrinsic::amdgcn_tbuffer_load: { 4542 MemSDNode *M = cast<MemSDNode>(Op); 4543 SDValue Ops[] = { 4544 Op.getOperand(0), // Chain 4545 Op.getOperand(2), // rsrc 4546 Op.getOperand(3), // vindex 4547 Op.getOperand(4), // voffset 4548 Op.getOperand(5), // soffset 4549 Op.getOperand(6), // offset 4550 Op.getOperand(7), // dfmt 4551 Op.getOperand(8), // nfmt 4552 Op.getOperand(9), // glc 4553 Op.getOperand(10) // slc 4554 }; 4555 4556 EVT VT = Op.getValueType(); 4557 4558 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, 4559 Op->getVTList(), Ops, VT, M->getMemOperand()); 4560 } 4561 case Intrinsic::amdgcn_buffer_atomic_swap: 4562 case Intrinsic::amdgcn_buffer_atomic_add: 4563 case Intrinsic::amdgcn_buffer_atomic_sub: 4564 case Intrinsic::amdgcn_buffer_atomic_smin: 4565 case Intrinsic::amdgcn_buffer_atomic_umin: 4566 case Intrinsic::amdgcn_buffer_atomic_smax: 4567 case Intrinsic::amdgcn_buffer_atomic_umax: 4568 case Intrinsic::amdgcn_buffer_atomic_and: 4569 case Intrinsic::amdgcn_buffer_atomic_or: 4570 case Intrinsic::amdgcn_buffer_atomic_xor: { 4571 SDValue Ops[] = { 4572 Op.getOperand(0), // Chain 4573 Op.getOperand(2), // vdata 4574 Op.getOperand(3), // rsrc 4575 Op.getOperand(4), // vindex 4576 Op.getOperand(5), // offset 4577 Op.getOperand(6) // slc 4578 }; 4579 EVT VT = Op.getValueType(); 4580 4581 auto *M = cast<MemSDNode>(Op); 4582 unsigned Opcode = 0; 4583 4584 switch (IntrID) { 4585 case Intrinsic::amdgcn_buffer_atomic_swap: 4586 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; 4587 break; 4588 case Intrinsic::amdgcn_buffer_atomic_add: 4589 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; 4590 break; 4591 case Intrinsic::amdgcn_buffer_atomic_sub: 4592 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; 4593 break; 4594 case Intrinsic::amdgcn_buffer_atomic_smin: 4595 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; 4596 break; 4597 case Intrinsic::amdgcn_buffer_atomic_umin: 4598 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; 4599 break; 4600 case Intrinsic::amdgcn_buffer_atomic_smax: 4601 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; 4602 break; 4603 case Intrinsic::amdgcn_buffer_atomic_umax: 4604 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; 4605 break; 4606 case Intrinsic::amdgcn_buffer_atomic_and: 4607 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; 4608 break; 4609 case Intrinsic::amdgcn_buffer_atomic_or: 4610 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; 4611 break; 4612 case Intrinsic::amdgcn_buffer_atomic_xor: 4613 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; 4614 break; 4615 default: 4616 llvm_unreachable("unhandled atomic opcode"); 4617 } 4618 4619 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, 4620 M->getMemOperand()); 4621 } 4622 4623 case Intrinsic::amdgcn_buffer_atomic_cmpswap: { 4624 SDValue Ops[] = { 4625 Op.getOperand(0), // Chain 4626 Op.getOperand(2), // src 4627 Op.getOperand(3), // cmp 4628 Op.getOperand(4), // rsrc 4629 Op.getOperand(5), // vindex 4630 Op.getOperand(6), // offset 4631 Op.getOperand(7) // slc 4632 }; 4633 EVT VT = Op.getValueType(); 4634 auto *M = cast<MemSDNode>(Op); 4635 4636 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, 4637 Op->getVTList(), Ops, VT, M->getMemOperand()); 4638 } 4639 4640 // Basic sample. 4641 case Intrinsic::amdgcn_image_sample: 4642 case Intrinsic::amdgcn_image_sample_cl: 4643 case Intrinsic::amdgcn_image_sample_d: 4644 case Intrinsic::amdgcn_image_sample_d_cl: 4645 case Intrinsic::amdgcn_image_sample_l: 4646 case Intrinsic::amdgcn_image_sample_b: 4647 case Intrinsic::amdgcn_image_sample_b_cl: 4648 case Intrinsic::amdgcn_image_sample_lz: 4649 case Intrinsic::amdgcn_image_sample_cd: 4650 case Intrinsic::amdgcn_image_sample_cd_cl: 4651 4652 // Sample with comparison. 4653 case Intrinsic::amdgcn_image_sample_c: 4654 case Intrinsic::amdgcn_image_sample_c_cl: 4655 case Intrinsic::amdgcn_image_sample_c_d: 4656 case Intrinsic::amdgcn_image_sample_c_d_cl: 4657 case Intrinsic::amdgcn_image_sample_c_l: 4658 case Intrinsic::amdgcn_image_sample_c_b: 4659 case Intrinsic::amdgcn_image_sample_c_b_cl: 4660 case Intrinsic::amdgcn_image_sample_c_lz: 4661 case Intrinsic::amdgcn_image_sample_c_cd: 4662 case Intrinsic::amdgcn_image_sample_c_cd_cl: 4663 4664 // Sample with offsets. 4665 case Intrinsic::amdgcn_image_sample_o: 4666 case Intrinsic::amdgcn_image_sample_cl_o: 4667 case Intrinsic::amdgcn_image_sample_d_o: 4668 case Intrinsic::amdgcn_image_sample_d_cl_o: 4669 case Intrinsic::amdgcn_image_sample_l_o: 4670 case Intrinsic::amdgcn_image_sample_b_o: 4671 case Intrinsic::amdgcn_image_sample_b_cl_o: 4672 case Intrinsic::amdgcn_image_sample_lz_o: 4673 case Intrinsic::amdgcn_image_sample_cd_o: 4674 case Intrinsic::amdgcn_image_sample_cd_cl_o: 4675 4676 // Sample with comparison and offsets. 4677 case Intrinsic::amdgcn_image_sample_c_o: 4678 case Intrinsic::amdgcn_image_sample_c_cl_o: 4679 case Intrinsic::amdgcn_image_sample_c_d_o: 4680 case Intrinsic::amdgcn_image_sample_c_d_cl_o: 4681 case Intrinsic::amdgcn_image_sample_c_l_o: 4682 case Intrinsic::amdgcn_image_sample_c_b_o: 4683 case Intrinsic::amdgcn_image_sample_c_b_cl_o: 4684 case Intrinsic::amdgcn_image_sample_c_lz_o: 4685 case Intrinsic::amdgcn_image_sample_c_cd_o: 4686 case Intrinsic::amdgcn_image_sample_c_cd_cl_o: { 4687 // Replace dmask with everything disabled with undef. 4688 const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(5)); 4689 if (!DMask || DMask->isNullValue()) { 4690 SDValue Undef = DAG.getUNDEF(Op.getValueType()); 4691 return DAG.getMergeValues({ Undef, Op.getOperand(0) }, SDLoc(Op)); 4692 } 4693 4694 return SDValue(); 4695 } 4696 default: 4697 return SDValue(); 4698 } 4699 } 4700 4701 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, 4702 SelectionDAG &DAG) const { 4703 SDLoc DL(Op); 4704 SDValue Chain = Op.getOperand(0); 4705 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 4706 MachineFunction &MF = DAG.getMachineFunction(); 4707 4708 switch (IntrinsicID) { 4709 case Intrinsic::amdgcn_exp: { 4710 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 4711 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 4712 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8)); 4713 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9)); 4714 4715 const SDValue Ops[] = { 4716 Chain, 4717 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 4718 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 4719 Op.getOperand(4), // src0 4720 Op.getOperand(5), // src1 4721 Op.getOperand(6), // src2 4722 Op.getOperand(7), // src3 4723 DAG.getTargetConstant(0, DL, MVT::i1), // compr 4724 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 4725 }; 4726 4727 unsigned Opc = Done->isNullValue() ? 4728 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 4729 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 4730 } 4731 case Intrinsic::amdgcn_exp_compr: { 4732 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 4733 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 4734 SDValue Src0 = Op.getOperand(4); 4735 SDValue Src1 = Op.getOperand(5); 4736 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); 4737 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7)); 4738 4739 SDValue Undef = DAG.getUNDEF(MVT::f32); 4740 const SDValue Ops[] = { 4741 Chain, 4742 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 4743 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 4744 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), 4745 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), 4746 Undef, // src2 4747 Undef, // src3 4748 DAG.getTargetConstant(1, DL, MVT::i1), // compr 4749 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 4750 }; 4751 4752 unsigned Opc = Done->isNullValue() ? 4753 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 4754 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 4755 } 4756 case Intrinsic::amdgcn_s_sendmsg: 4757 case Intrinsic::amdgcn_s_sendmsghalt: { 4758 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ? 4759 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT; 4760 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 4761 SDValue Glue = Chain.getValue(1); 4762 return DAG.getNode(NodeOp, DL, MVT::Other, Chain, 4763 Op.getOperand(2), Glue); 4764 } 4765 case Intrinsic::amdgcn_init_exec: { 4766 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain, 4767 Op.getOperand(2)); 4768 } 4769 case Intrinsic::amdgcn_init_exec_from_input: { 4770 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain, 4771 Op.getOperand(2), Op.getOperand(3)); 4772 } 4773 case AMDGPUIntrinsic::AMDGPU_kill: { 4774 SDValue Src = Op.getOperand(2); 4775 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) { 4776 if (!K->isNegative()) 4777 return Chain; 4778 4779 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32); 4780 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne); 4781 } 4782 4783 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src); 4784 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast); 4785 } 4786 case Intrinsic::amdgcn_s_barrier: { 4787 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) { 4788 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 4789 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second; 4790 if (WGSize <= ST.getWavefrontSize()) 4791 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other, 4792 Op.getOperand(0)), 0); 4793 } 4794 return SDValue(); 4795 }; 4796 case AMDGPUIntrinsic::SI_tbuffer_store: { 4797 4798 // Extract vindex and voffset from vaddr as appropriate 4799 const ConstantSDNode *OffEn = cast<ConstantSDNode>(Op.getOperand(10)); 4800 const ConstantSDNode *IdxEn = cast<ConstantSDNode>(Op.getOperand(11)); 4801 SDValue VAddr = Op.getOperand(5); 4802 4803 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32); 4804 4805 assert(!(OffEn->isOne() && IdxEn->isOne()) && 4806 "Legacy intrinsic doesn't support both offset and index - use new version"); 4807 4808 SDValue VIndex = IdxEn->isOne() ? VAddr : Zero; 4809 SDValue VOffset = OffEn->isOne() ? VAddr : Zero; 4810 4811 // Deal with the vec-3 case 4812 const ConstantSDNode *NumChannels = cast<ConstantSDNode>(Op.getOperand(4)); 4813 auto Opcode = NumChannels->getZExtValue() == 3 ? 4814 AMDGPUISD::TBUFFER_STORE_FORMAT_X3 : AMDGPUISD::TBUFFER_STORE_FORMAT; 4815 4816 SDValue Ops[] = { 4817 Chain, 4818 Op.getOperand(3), // vdata 4819 Op.getOperand(2), // rsrc 4820 VIndex, 4821 VOffset, 4822 Op.getOperand(6), // soffset 4823 Op.getOperand(7), // inst_offset 4824 Op.getOperand(8), // dfmt 4825 Op.getOperand(9), // nfmt 4826 Op.getOperand(12), // glc 4827 Op.getOperand(13), // slc 4828 }; 4829 4830 assert((cast<ConstantSDNode>(Op.getOperand(14)))->getZExtValue() == 0 && 4831 "Value of tfe other than zero is unsupported"); 4832 4833 EVT VT = Op.getOperand(3).getValueType(); 4834 MachineMemOperand *MMO = MF.getMachineMemOperand( 4835 MachinePointerInfo(), 4836 MachineMemOperand::MOStore, 4837 VT.getStoreSize(), 4); 4838 return DAG.getMemIntrinsicNode(Opcode, DL, 4839 Op->getVTList(), Ops, VT, MMO); 4840 } 4841 4842 case Intrinsic::amdgcn_tbuffer_store: { 4843 SDValue Ops[] = { 4844 Chain, 4845 Op.getOperand(2), // vdata 4846 Op.getOperand(3), // rsrc 4847 Op.getOperand(4), // vindex 4848 Op.getOperand(5), // voffset 4849 Op.getOperand(6), // soffset 4850 Op.getOperand(7), // offset 4851 Op.getOperand(8), // dfmt 4852 Op.getOperand(9), // nfmt 4853 Op.getOperand(10), // glc 4854 Op.getOperand(11) // slc 4855 }; 4856 EVT VT = Op.getOperand(3).getValueType(); 4857 MachineMemOperand *MMO = MF.getMachineMemOperand( 4858 MachinePointerInfo(), 4859 MachineMemOperand::MOStore, 4860 VT.getStoreSize(), 4); 4861 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL, 4862 Op->getVTList(), Ops, VT, MMO); 4863 } 4864 4865 case Intrinsic::amdgcn_buffer_store: 4866 case Intrinsic::amdgcn_buffer_store_format: { 4867 SDValue Ops[] = { 4868 Chain, 4869 Op.getOperand(2), // vdata 4870 Op.getOperand(3), // rsrc 4871 Op.getOperand(4), // vindex 4872 Op.getOperand(5), // offset 4873 Op.getOperand(6), // glc 4874 Op.getOperand(7) // slc 4875 }; 4876 EVT VT = Op.getOperand(3).getValueType(); 4877 MachineMemOperand *MMO = MF.getMachineMemOperand( 4878 MachinePointerInfo(), 4879 MachineMemOperand::MOStore | 4880 MachineMemOperand::MODereferenceable, 4881 VT.getStoreSize(), 4); 4882 4883 unsigned Opcode = IntrinsicID == Intrinsic::amdgcn_buffer_store ? 4884 AMDGPUISD::BUFFER_STORE : 4885 AMDGPUISD::BUFFER_STORE_FORMAT; 4886 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, MMO); 4887 } 4888 4889 default: 4890 return Op; 4891 } 4892 } 4893 4894 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 4895 SDLoc DL(Op); 4896 LoadSDNode *Load = cast<LoadSDNode>(Op); 4897 ISD::LoadExtType ExtType = Load->getExtensionType(); 4898 EVT MemVT = Load->getMemoryVT(); 4899 4900 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { 4901 if (MemVT == MVT::i16 && isTypeLegal(MVT::i16)) 4902 return SDValue(); 4903 4904 // FIXME: Copied from PPC 4905 // First, load into 32 bits, then truncate to 1 bit. 4906 4907 SDValue Chain = Load->getChain(); 4908 SDValue BasePtr = Load->getBasePtr(); 4909 MachineMemOperand *MMO = Load->getMemOperand(); 4910 4911 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; 4912 4913 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 4914 BasePtr, RealMemVT, MMO); 4915 4916 SDValue Ops[] = { 4917 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), 4918 NewLD.getValue(1) 4919 }; 4920 4921 return DAG.getMergeValues(Ops, DL); 4922 } 4923 4924 if (!MemVT.isVector()) 4925 return SDValue(); 4926 4927 assert(Op.getValueType().getVectorElementType() == MVT::i32 && 4928 "Custom lowering for non-i32 vectors hasn't been implemented."); 4929 4930 unsigned AS = Load->getAddressSpace(); 4931 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, 4932 AS, Load->getAlignment())) { 4933 SDValue Ops[2]; 4934 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); 4935 return DAG.getMergeValues(Ops, DL); 4936 } 4937 4938 MachineFunction &MF = DAG.getMachineFunction(); 4939 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 4940 // If there is a possibilty that flat instruction access scratch memory 4941 // then we need to use the same legalization rules we use for private. 4942 if (AS == AMDGPUASI.FLAT_ADDRESS) 4943 AS = MFI->hasFlatScratchInit() ? 4944 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS; 4945 4946 unsigned NumElements = MemVT.getVectorNumElements(); 4947 if (AS == AMDGPUASI.CONSTANT_ADDRESS) { 4948 if (isMemOpUniform(Load)) 4949 return SDValue(); 4950 // Non-uniform loads will be selected to MUBUF instructions, so they 4951 // have the same legalization requirements as global and private 4952 // loads. 4953 // 4954 } 4955 if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS) { 4956 if (Subtarget->getScalarizeGlobalBehavior() && isMemOpUniform(Load) && 4957 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load)) 4958 return SDValue(); 4959 // Non-uniform loads will be selected to MUBUF instructions, so they 4960 // have the same legalization requirements as global and private 4961 // loads. 4962 // 4963 } 4964 if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS || 4965 AS == AMDGPUASI.FLAT_ADDRESS) { 4966 if (NumElements > 4) 4967 return SplitVectorLoad(Op, DAG); 4968 // v4 loads are supported for private and global memory. 4969 return SDValue(); 4970 } 4971 if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 4972 // Depending on the setting of the private_element_size field in the 4973 // resource descriptor, we can only make private accesses up to a certain 4974 // size. 4975 switch (Subtarget->getMaxPrivateElementSize()) { 4976 case 4: 4977 return scalarizeVectorLoad(Load, DAG); 4978 case 8: 4979 if (NumElements > 2) 4980 return SplitVectorLoad(Op, DAG); 4981 return SDValue(); 4982 case 16: 4983 // Same as global/flat 4984 if (NumElements > 4) 4985 return SplitVectorLoad(Op, DAG); 4986 return SDValue(); 4987 default: 4988 llvm_unreachable("unsupported private_element_size"); 4989 } 4990 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 4991 if (NumElements > 2) 4992 return SplitVectorLoad(Op, DAG); 4993 4994 if (NumElements == 2) 4995 return SDValue(); 4996 4997 // If properly aligned, if we split we might be able to use ds_read_b64. 4998 return SplitVectorLoad(Op, DAG); 4999 } 5000 return SDValue(); 5001 } 5002 5003 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 5004 if (Op.getValueType() != MVT::i64) 5005 return SDValue(); 5006 5007 SDLoc DL(Op); 5008 SDValue Cond = Op.getOperand(0); 5009 5010 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 5011 SDValue One = DAG.getConstant(1, DL, MVT::i32); 5012 5013 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); 5014 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); 5015 5016 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); 5017 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); 5018 5019 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); 5020 5021 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); 5022 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); 5023 5024 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); 5025 5026 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); 5027 return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res); 5028 } 5029 5030 // Catch division cases where we can use shortcuts with rcp and rsq 5031 // instructions. 5032 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, 5033 SelectionDAG &DAG) const { 5034 SDLoc SL(Op); 5035 SDValue LHS = Op.getOperand(0); 5036 SDValue RHS = Op.getOperand(1); 5037 EVT VT = Op.getValueType(); 5038 const SDNodeFlags Flags = Op->getFlags(); 5039 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || 5040 Flags.hasUnsafeAlgebra() || Flags.hasAllowReciprocal(); 5041 5042 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals()) 5043 return SDValue(); 5044 5045 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { 5046 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) { 5047 if (CLHS->isExactlyValue(1.0)) { 5048 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 5049 // the CI documentation has a worst case error of 1 ulp. 5050 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 5051 // use it as long as we aren't trying to use denormals. 5052 // 5053 // v_rcp_f16 and v_rsq_f16 DO support denormals. 5054 5055 // 1.0 / sqrt(x) -> rsq(x) 5056 5057 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP 5058 // error seems really high at 2^29 ULP. 5059 if (RHS.getOpcode() == ISD::FSQRT) 5060 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); 5061 5062 // 1.0 / x -> rcp(x) 5063 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 5064 } 5065 5066 // Same as for 1.0, but expand the sign out of the constant. 5067 if (CLHS->isExactlyValue(-1.0)) { 5068 // -1.0 / x -> rcp (fneg x) 5069 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 5070 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); 5071 } 5072 } 5073 } 5074 5075 if (Unsafe) { 5076 // Turn into multiply by the reciprocal. 5077 // x / y -> x * (1.0 / y) 5078 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 5079 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags); 5080 } 5081 5082 return SDValue(); 5083 } 5084 5085 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 5086 EVT VT, SDValue A, SDValue B, SDValue GlueChain) { 5087 if (GlueChain->getNumValues() <= 1) { 5088 return DAG.getNode(Opcode, SL, VT, A, B); 5089 } 5090 5091 assert(GlueChain->getNumValues() == 3); 5092 5093 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 5094 switch (Opcode) { 5095 default: llvm_unreachable("no chain equivalent for opcode"); 5096 case ISD::FMUL: 5097 Opcode = AMDGPUISD::FMUL_W_CHAIN; 5098 break; 5099 } 5100 5101 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, 5102 GlueChain.getValue(2)); 5103 } 5104 5105 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 5106 EVT VT, SDValue A, SDValue B, SDValue C, 5107 SDValue GlueChain) { 5108 if (GlueChain->getNumValues() <= 1) { 5109 return DAG.getNode(Opcode, SL, VT, A, B, C); 5110 } 5111 5112 assert(GlueChain->getNumValues() == 3); 5113 5114 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 5115 switch (Opcode) { 5116 default: llvm_unreachable("no chain equivalent for opcode"); 5117 case ISD::FMA: 5118 Opcode = AMDGPUISD::FMA_W_CHAIN; 5119 break; 5120 } 5121 5122 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C, 5123 GlueChain.getValue(2)); 5124 } 5125 5126 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { 5127 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 5128 return FastLowered; 5129 5130 SDLoc SL(Op); 5131 SDValue Src0 = Op.getOperand(0); 5132 SDValue Src1 = Op.getOperand(1); 5133 5134 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); 5135 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); 5136 5137 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); 5138 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); 5139 5140 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); 5141 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); 5142 5143 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); 5144 } 5145 5146 // Faster 2.5 ULP division that does not support denormals. 5147 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { 5148 SDLoc SL(Op); 5149 SDValue LHS = Op.getOperand(1); 5150 SDValue RHS = Op.getOperand(2); 5151 5152 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); 5153 5154 const APFloat K0Val(BitsToFloat(0x6f800000)); 5155 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); 5156 5157 const APFloat K1Val(BitsToFloat(0x2f800000)); 5158 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); 5159 5160 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 5161 5162 EVT SetCCVT = 5163 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 5164 5165 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); 5166 5167 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); 5168 5169 // TODO: Should this propagate fast-math-flags? 5170 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); 5171 5172 // rcp does not support denormals. 5173 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); 5174 5175 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); 5176 5177 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); 5178 } 5179 5180 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { 5181 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 5182 return FastLowered; 5183 5184 SDLoc SL(Op); 5185 SDValue LHS = Op.getOperand(0); 5186 SDValue RHS = Op.getOperand(1); 5187 5188 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 5189 5190 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); 5191 5192 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 5193 RHS, RHS, LHS); 5194 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 5195 LHS, RHS, LHS); 5196 5197 // Denominator is scaled to not be denormal, so using rcp is ok. 5198 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, 5199 DenominatorScaled); 5200 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, 5201 DenominatorScaled); 5202 5203 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | 5204 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | 5205 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); 5206 5207 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16); 5208 5209 if (!Subtarget->hasFP32Denormals()) { 5210 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); 5211 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE, 5212 SL, MVT::i32); 5213 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs, 5214 DAG.getEntryNode(), 5215 EnableDenormValue, BitField); 5216 SDValue Ops[3] = { 5217 NegDivScale0, 5218 EnableDenorm.getValue(0), 5219 EnableDenorm.getValue(1) 5220 }; 5221 5222 NegDivScale0 = DAG.getMergeValues(Ops, SL); 5223 } 5224 5225 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, 5226 ApproxRcp, One, NegDivScale0); 5227 5228 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, 5229 ApproxRcp, Fma0); 5230 5231 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, 5232 Fma1, Fma1); 5233 5234 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, 5235 NumeratorScaled, Mul); 5236 5237 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2); 5238 5239 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, 5240 NumeratorScaled, Fma3); 5241 5242 if (!Subtarget->hasFP32Denormals()) { 5243 const SDValue DisableDenormValue = 5244 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32); 5245 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other, 5246 Fma4.getValue(1), 5247 DisableDenormValue, 5248 BitField, 5249 Fma4.getValue(2)); 5250 5251 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 5252 DisableDenorm, DAG.getRoot()); 5253 DAG.setRoot(OutputChain); 5254 } 5255 5256 SDValue Scale = NumeratorScaled.getValue(1); 5257 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, 5258 Fma4, Fma1, Fma3, Scale); 5259 5260 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS); 5261 } 5262 5263 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { 5264 if (DAG.getTarget().Options.UnsafeFPMath) 5265 return lowerFastUnsafeFDIV(Op, DAG); 5266 5267 SDLoc SL(Op); 5268 SDValue X = Op.getOperand(0); 5269 SDValue Y = Op.getOperand(1); 5270 5271 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 5272 5273 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); 5274 5275 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); 5276 5277 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); 5278 5279 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); 5280 5281 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); 5282 5283 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); 5284 5285 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); 5286 5287 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); 5288 5289 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); 5290 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); 5291 5292 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, 5293 NegDivScale0, Mul, DivScale1); 5294 5295 SDValue Scale; 5296 5297 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 5298 // Workaround a hardware bug on SI where the condition output from div_scale 5299 // is not usable. 5300 5301 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); 5302 5303 // Figure out if the scale to use for div_fmas. 5304 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 5305 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); 5306 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); 5307 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); 5308 5309 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); 5310 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); 5311 5312 SDValue Scale0Hi 5313 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); 5314 SDValue Scale1Hi 5315 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); 5316 5317 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); 5318 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); 5319 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); 5320 } else { 5321 Scale = DivScale1.getValue(1); 5322 } 5323 5324 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, 5325 Fma4, Fma3, Mul, Scale); 5326 5327 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); 5328 } 5329 5330 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { 5331 EVT VT = Op.getValueType(); 5332 5333 if (VT == MVT::f32) 5334 return LowerFDIV32(Op, DAG); 5335 5336 if (VT == MVT::f64) 5337 return LowerFDIV64(Op, DAG); 5338 5339 if (VT == MVT::f16) 5340 return LowerFDIV16(Op, DAG); 5341 5342 llvm_unreachable("Unexpected type for fdiv"); 5343 } 5344 5345 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 5346 SDLoc DL(Op); 5347 StoreSDNode *Store = cast<StoreSDNode>(Op); 5348 EVT VT = Store->getMemoryVT(); 5349 5350 if (VT == MVT::i1) { 5351 return DAG.getTruncStore(Store->getChain(), DL, 5352 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), 5353 Store->getBasePtr(), MVT::i1, Store->getMemOperand()); 5354 } 5355 5356 assert(VT.isVector() && 5357 Store->getValue().getValueType().getScalarType() == MVT::i32); 5358 5359 unsigned AS = Store->getAddressSpace(); 5360 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 5361 AS, Store->getAlignment())) { 5362 return expandUnalignedStore(Store, DAG); 5363 } 5364 5365 MachineFunction &MF = DAG.getMachineFunction(); 5366 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 5367 // If there is a possibilty that flat instruction access scratch memory 5368 // then we need to use the same legalization rules we use for private. 5369 if (AS == AMDGPUASI.FLAT_ADDRESS) 5370 AS = MFI->hasFlatScratchInit() ? 5371 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS; 5372 5373 unsigned NumElements = VT.getVectorNumElements(); 5374 if (AS == AMDGPUASI.GLOBAL_ADDRESS || 5375 AS == AMDGPUASI.FLAT_ADDRESS) { 5376 if (NumElements > 4) 5377 return SplitVectorStore(Op, DAG); 5378 return SDValue(); 5379 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 5380 switch (Subtarget->getMaxPrivateElementSize()) { 5381 case 4: 5382 return scalarizeVectorStore(Store, DAG); 5383 case 8: 5384 if (NumElements > 2) 5385 return SplitVectorStore(Op, DAG); 5386 return SDValue(); 5387 case 16: 5388 if (NumElements > 4) 5389 return SplitVectorStore(Op, DAG); 5390 return SDValue(); 5391 default: 5392 llvm_unreachable("unsupported private_element_size"); 5393 } 5394 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 5395 if (NumElements > 2) 5396 return SplitVectorStore(Op, DAG); 5397 5398 if (NumElements == 2) 5399 return Op; 5400 5401 // If properly aligned, if we split we might be able to use ds_write_b64. 5402 return SplitVectorStore(Op, DAG); 5403 } else { 5404 llvm_unreachable("unhandled address space"); 5405 } 5406 } 5407 5408 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 5409 SDLoc DL(Op); 5410 EVT VT = Op.getValueType(); 5411 SDValue Arg = Op.getOperand(0); 5412 // TODO: Should this propagate fast-math-flags? 5413 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, 5414 DAG.getNode(ISD::FMUL, DL, VT, Arg, 5415 DAG.getConstantFP(0.5/M_PI, DL, 5416 VT))); 5417 5418 switch (Op.getOpcode()) { 5419 case ISD::FCOS: 5420 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart); 5421 case ISD::FSIN: 5422 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart); 5423 default: 5424 llvm_unreachable("Wrong trig opcode"); 5425 } 5426 } 5427 5428 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 5429 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); 5430 assert(AtomicNode->isCompareAndSwap()); 5431 unsigned AS = AtomicNode->getAddressSpace(); 5432 5433 // No custom lowering required for local address space 5434 if (!isFlatGlobalAddrSpace(AS, AMDGPUASI)) 5435 return Op; 5436 5437 // Non-local address space requires custom lowering for atomic compare 5438 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 5439 SDLoc DL(Op); 5440 SDValue ChainIn = Op.getOperand(0); 5441 SDValue Addr = Op.getOperand(1); 5442 SDValue Old = Op.getOperand(2); 5443 SDValue New = Op.getOperand(3); 5444 EVT VT = Op.getValueType(); 5445 MVT SimpleVT = VT.getSimpleVT(); 5446 MVT VecType = MVT::getVectorVT(SimpleVT, 2); 5447 5448 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); 5449 SDValue Ops[] = { ChainIn, Addr, NewOld }; 5450 5451 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), 5452 Ops, VT, AtomicNode->getMemOperand()); 5453 } 5454 5455 //===----------------------------------------------------------------------===// 5456 // Custom DAG optimizations 5457 //===----------------------------------------------------------------------===// 5458 5459 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, 5460 DAGCombinerInfo &DCI) const { 5461 EVT VT = N->getValueType(0); 5462 EVT ScalarVT = VT.getScalarType(); 5463 if (ScalarVT != MVT::f32) 5464 return SDValue(); 5465 5466 SelectionDAG &DAG = DCI.DAG; 5467 SDLoc DL(N); 5468 5469 SDValue Src = N->getOperand(0); 5470 EVT SrcVT = Src.getValueType(); 5471 5472 // TODO: We could try to match extracting the higher bytes, which would be 5473 // easier if i8 vectors weren't promoted to i32 vectors, particularly after 5474 // types are legalized. v4i8 -> v4f32 is probably the only case to worry 5475 // about in practice. 5476 if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) { 5477 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { 5478 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); 5479 DCI.AddToWorklist(Cvt.getNode()); 5480 return Cvt; 5481 } 5482 } 5483 5484 return SDValue(); 5485 } 5486 5487 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) 5488 5489 // This is a variant of 5490 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), 5491 // 5492 // The normal DAG combiner will do this, but only if the add has one use since 5493 // that would increase the number of instructions. 5494 // 5495 // This prevents us from seeing a constant offset that can be folded into a 5496 // memory instruction's addressing mode. If we know the resulting add offset of 5497 // a pointer can be folded into an addressing offset, we can replace the pointer 5498 // operand with the add of new constant offset. This eliminates one of the uses, 5499 // and may allow the remaining use to also be simplified. 5500 // 5501 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, 5502 unsigned AddrSpace, 5503 EVT MemVT, 5504 DAGCombinerInfo &DCI) const { 5505 SDValue N0 = N->getOperand(0); 5506 SDValue N1 = N->getOperand(1); 5507 5508 // We only do this to handle cases where it's profitable when there are 5509 // multiple uses of the add, so defer to the standard combine. 5510 if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) || 5511 N0->hasOneUse()) 5512 return SDValue(); 5513 5514 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); 5515 if (!CN1) 5516 return SDValue(); 5517 5518 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 5519 if (!CAdd) 5520 return SDValue(); 5521 5522 // If the resulting offset is too large, we can't fold it into the addressing 5523 // mode offset. 5524 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); 5525 Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext()); 5526 5527 AddrMode AM; 5528 AM.HasBaseReg = true; 5529 AM.BaseOffs = Offset.getSExtValue(); 5530 if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace)) 5531 return SDValue(); 5532 5533 SelectionDAG &DAG = DCI.DAG; 5534 SDLoc SL(N); 5535 EVT VT = N->getValueType(0); 5536 5537 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); 5538 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); 5539 5540 SDNodeFlags Flags; 5541 Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() && 5542 (N0.getOpcode() == ISD::OR || 5543 N0->getFlags().hasNoUnsignedWrap())); 5544 5545 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags); 5546 } 5547 5548 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, 5549 DAGCombinerInfo &DCI) const { 5550 SDValue Ptr = N->getBasePtr(); 5551 SelectionDAG &DAG = DCI.DAG; 5552 SDLoc SL(N); 5553 5554 // TODO: We could also do this for multiplies. 5555 if (Ptr.getOpcode() == ISD::SHL) { 5556 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(), 5557 N->getMemoryVT(), DCI); 5558 if (NewPtr) { 5559 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); 5560 5561 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; 5562 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); 5563 } 5564 } 5565 5566 return SDValue(); 5567 } 5568 5569 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { 5570 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || 5571 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || 5572 (Opc == ISD::XOR && Val == 0); 5573 } 5574 5575 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This 5576 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit 5577 // integer combine opportunities since most 64-bit operations are decomposed 5578 // this way. TODO: We won't want this for SALU especially if it is an inline 5579 // immediate. 5580 SDValue SITargetLowering::splitBinaryBitConstantOp( 5581 DAGCombinerInfo &DCI, 5582 const SDLoc &SL, 5583 unsigned Opc, SDValue LHS, 5584 const ConstantSDNode *CRHS) const { 5585 uint64_t Val = CRHS->getZExtValue(); 5586 uint32_t ValLo = Lo_32(Val); 5587 uint32_t ValHi = Hi_32(Val); 5588 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 5589 5590 if ((bitOpWithConstantIsReducible(Opc, ValLo) || 5591 bitOpWithConstantIsReducible(Opc, ValHi)) || 5592 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { 5593 // If we need to materialize a 64-bit immediate, it will be split up later 5594 // anyway. Avoid creating the harder to understand 64-bit immediate 5595 // materialization. 5596 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); 5597 } 5598 5599 return SDValue(); 5600 } 5601 5602 // Returns true if argument is a boolean value which is not serialized into 5603 // memory or argument and does not require v_cmdmask_b32 to be deserialized. 5604 static bool isBoolSGPR(SDValue V) { 5605 if (V.getValueType() != MVT::i1) 5606 return false; 5607 switch (V.getOpcode()) { 5608 default: break; 5609 case ISD::SETCC: 5610 case ISD::AND: 5611 case ISD::OR: 5612 case ISD::XOR: 5613 case AMDGPUISD::FP_CLASS: 5614 return true; 5615 } 5616 return false; 5617 } 5618 5619 SDValue SITargetLowering::performAndCombine(SDNode *N, 5620 DAGCombinerInfo &DCI) const { 5621 if (DCI.isBeforeLegalize()) 5622 return SDValue(); 5623 5624 SelectionDAG &DAG = DCI.DAG; 5625 EVT VT = N->getValueType(0); 5626 SDValue LHS = N->getOperand(0); 5627 SDValue RHS = N->getOperand(1); 5628 5629 5630 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 5631 if (VT == MVT::i64 && CRHS) { 5632 if (SDValue Split 5633 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) 5634 return Split; 5635 } 5636 5637 if (CRHS && VT == MVT::i32) { 5638 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb 5639 // nb = number of trailing zeroes in mask 5640 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass, 5641 // given that we are selecting 8 or 16 bit fields starting at byte boundary. 5642 uint64_t Mask = CRHS->getZExtValue(); 5643 unsigned Bits = countPopulation(Mask); 5644 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL && 5645 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) { 5646 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) { 5647 unsigned Shift = CShift->getZExtValue(); 5648 unsigned NB = CRHS->getAPIntValue().countTrailingZeros(); 5649 unsigned Offset = NB + Shift; 5650 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary. 5651 SDLoc SL(N); 5652 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 5653 LHS->getOperand(0), 5654 DAG.getConstant(Offset, SL, MVT::i32), 5655 DAG.getConstant(Bits, SL, MVT::i32)); 5656 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits); 5657 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE, 5658 DAG.getValueType(NarrowVT)); 5659 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext, 5660 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32)); 5661 return Shl; 5662 } 5663 } 5664 } 5665 } 5666 5667 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> 5668 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) 5669 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { 5670 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 5671 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); 5672 5673 SDValue X = LHS.getOperand(0); 5674 SDValue Y = RHS.getOperand(0); 5675 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) 5676 return SDValue(); 5677 5678 if (LCC == ISD::SETO) { 5679 if (X != LHS.getOperand(1)) 5680 return SDValue(); 5681 5682 if (RCC == ISD::SETUNE) { 5683 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); 5684 if (!C1 || !C1->isInfinity() || C1->isNegative()) 5685 return SDValue(); 5686 5687 const uint32_t Mask = SIInstrFlags::N_NORMAL | 5688 SIInstrFlags::N_SUBNORMAL | 5689 SIInstrFlags::N_ZERO | 5690 SIInstrFlags::P_ZERO | 5691 SIInstrFlags::P_SUBNORMAL | 5692 SIInstrFlags::P_NORMAL; 5693 5694 static_assert(((~(SIInstrFlags::S_NAN | 5695 SIInstrFlags::Q_NAN | 5696 SIInstrFlags::N_INFINITY | 5697 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, 5698 "mask not equal"); 5699 5700 SDLoc DL(N); 5701 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 5702 X, DAG.getConstant(Mask, DL, MVT::i32)); 5703 } 5704 } 5705 } 5706 5707 if (VT == MVT::i32 && 5708 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) { 5709 // and x, (sext cc from i1) => select cc, x, 0 5710 if (RHS.getOpcode() != ISD::SIGN_EXTEND) 5711 std::swap(LHS, RHS); 5712 if (isBoolSGPR(RHS.getOperand(0))) 5713 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0), 5714 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32)); 5715 } 5716 5717 return SDValue(); 5718 } 5719 5720 SDValue SITargetLowering::performOrCombine(SDNode *N, 5721 DAGCombinerInfo &DCI) const { 5722 SelectionDAG &DAG = DCI.DAG; 5723 SDValue LHS = N->getOperand(0); 5724 SDValue RHS = N->getOperand(1); 5725 5726 EVT VT = N->getValueType(0); 5727 if (VT == MVT::i1) { 5728 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) 5729 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && 5730 RHS.getOpcode() == AMDGPUISD::FP_CLASS) { 5731 SDValue Src = LHS.getOperand(0); 5732 if (Src != RHS.getOperand(0)) 5733 return SDValue(); 5734 5735 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 5736 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 5737 if (!CLHS || !CRHS) 5738 return SDValue(); 5739 5740 // Only 10 bits are used. 5741 static const uint32_t MaxMask = 0x3ff; 5742 5743 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; 5744 SDLoc DL(N); 5745 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 5746 Src, DAG.getConstant(NewMask, DL, MVT::i32)); 5747 } 5748 5749 return SDValue(); 5750 } 5751 5752 if (VT != MVT::i64) 5753 return SDValue(); 5754 5755 // TODO: This could be a generic combine with a predicate for extracting the 5756 // high half of an integer being free. 5757 5758 // (or i64:x, (zero_extend i32:y)) -> 5759 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) 5760 if (LHS.getOpcode() == ISD::ZERO_EXTEND && 5761 RHS.getOpcode() != ISD::ZERO_EXTEND) 5762 std::swap(LHS, RHS); 5763 5764 if (RHS.getOpcode() == ISD::ZERO_EXTEND) { 5765 SDValue ExtSrc = RHS.getOperand(0); 5766 EVT SrcVT = ExtSrc.getValueType(); 5767 if (SrcVT == MVT::i32) { 5768 SDLoc SL(N); 5769 SDValue LowLHS, HiBits; 5770 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); 5771 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); 5772 5773 DCI.AddToWorklist(LowOr.getNode()); 5774 DCI.AddToWorklist(HiBits.getNode()); 5775 5776 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 5777 LowOr, HiBits); 5778 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 5779 } 5780 } 5781 5782 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 5783 if (CRHS) { 5784 if (SDValue Split 5785 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) 5786 return Split; 5787 } 5788 5789 return SDValue(); 5790 } 5791 5792 SDValue SITargetLowering::performXorCombine(SDNode *N, 5793 DAGCombinerInfo &DCI) const { 5794 EVT VT = N->getValueType(0); 5795 if (VT != MVT::i64) 5796 return SDValue(); 5797 5798 SDValue LHS = N->getOperand(0); 5799 SDValue RHS = N->getOperand(1); 5800 5801 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 5802 if (CRHS) { 5803 if (SDValue Split 5804 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) 5805 return Split; 5806 } 5807 5808 return SDValue(); 5809 } 5810 5811 // Instructions that will be lowered with a final instruction that zeros the 5812 // high result bits. 5813 // XXX - probably only need to list legal operations. 5814 static bool fp16SrcZerosHighBits(unsigned Opc) { 5815 switch (Opc) { 5816 case ISD::FADD: 5817 case ISD::FSUB: 5818 case ISD::FMUL: 5819 case ISD::FDIV: 5820 case ISD::FREM: 5821 case ISD::FMA: 5822 case ISD::FMAD: 5823 case ISD::FCANONICALIZE: 5824 case ISD::FP_ROUND: 5825 case ISD::UINT_TO_FP: 5826 case ISD::SINT_TO_FP: 5827 case ISD::FABS: 5828 // Fabs is lowered to a bit operation, but it's an and which will clear the 5829 // high bits anyway. 5830 case ISD::FSQRT: 5831 case ISD::FSIN: 5832 case ISD::FCOS: 5833 case ISD::FPOWI: 5834 case ISD::FPOW: 5835 case ISD::FLOG: 5836 case ISD::FLOG2: 5837 case ISD::FLOG10: 5838 case ISD::FEXP: 5839 case ISD::FEXP2: 5840 case ISD::FCEIL: 5841 case ISD::FTRUNC: 5842 case ISD::FRINT: 5843 case ISD::FNEARBYINT: 5844 case ISD::FROUND: 5845 case ISD::FFLOOR: 5846 case ISD::FMINNUM: 5847 case ISD::FMAXNUM: 5848 case AMDGPUISD::FRACT: 5849 case AMDGPUISD::CLAMP: 5850 case AMDGPUISD::COS_HW: 5851 case AMDGPUISD::SIN_HW: 5852 case AMDGPUISD::FMIN3: 5853 case AMDGPUISD::FMAX3: 5854 case AMDGPUISD::FMED3: 5855 case AMDGPUISD::FMAD_FTZ: 5856 case AMDGPUISD::RCP: 5857 case AMDGPUISD::RSQ: 5858 case AMDGPUISD::LDEXP: 5859 return true; 5860 default: 5861 // fcopysign, select and others may be lowered to 32-bit bit operations 5862 // which don't zero the high bits. 5863 return false; 5864 } 5865 } 5866 5867 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N, 5868 DAGCombinerInfo &DCI) const { 5869 if (!Subtarget->has16BitInsts() || 5870 DCI.getDAGCombineLevel() < AfterLegalizeDAG) 5871 return SDValue(); 5872 5873 EVT VT = N->getValueType(0); 5874 if (VT != MVT::i32) 5875 return SDValue(); 5876 5877 SDValue Src = N->getOperand(0); 5878 if (Src.getValueType() != MVT::i16) 5879 return SDValue(); 5880 5881 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src 5882 // FIXME: It is not universally true that the high bits are zeroed on gfx9. 5883 if (Src.getOpcode() == ISD::BITCAST) { 5884 SDValue BCSrc = Src.getOperand(0); 5885 if (BCSrc.getValueType() == MVT::f16 && 5886 fp16SrcZerosHighBits(BCSrc.getOpcode())) 5887 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc); 5888 } 5889 5890 return SDValue(); 5891 } 5892 5893 SDValue SITargetLowering::performClassCombine(SDNode *N, 5894 DAGCombinerInfo &DCI) const { 5895 SelectionDAG &DAG = DCI.DAG; 5896 SDValue Mask = N->getOperand(1); 5897 5898 // fp_class x, 0 -> false 5899 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { 5900 if (CMask->isNullValue()) 5901 return DAG.getConstant(0, SDLoc(N), MVT::i1); 5902 } 5903 5904 if (N->getOperand(0).isUndef()) 5905 return DAG.getUNDEF(MVT::i1); 5906 5907 return SDValue(); 5908 } 5909 5910 static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) { 5911 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions()) 5912 return true; 5913 5914 return DAG.isKnownNeverNaN(Op); 5915 } 5916 5917 static bool isCanonicalized(SelectionDAG &DAG, SDValue Op, 5918 const SISubtarget *ST, unsigned MaxDepth=5) { 5919 // If source is a result of another standard FP operation it is already in 5920 // canonical form. 5921 5922 switch (Op.getOpcode()) { 5923 default: 5924 break; 5925 5926 // These will flush denorms if required. 5927 case ISD::FADD: 5928 case ISD::FSUB: 5929 case ISD::FMUL: 5930 case ISD::FSQRT: 5931 case ISD::FCEIL: 5932 case ISD::FFLOOR: 5933 case ISD::FMA: 5934 case ISD::FMAD: 5935 5936 case ISD::FCANONICALIZE: 5937 return true; 5938 5939 case ISD::FP_ROUND: 5940 return Op.getValueType().getScalarType() != MVT::f16 || 5941 ST->hasFP16Denormals(); 5942 5943 case ISD::FP_EXTEND: 5944 return Op.getOperand(0).getValueType().getScalarType() != MVT::f16 || 5945 ST->hasFP16Denormals(); 5946 5947 case ISD::FP16_TO_FP: 5948 case ISD::FP_TO_FP16: 5949 return ST->hasFP16Denormals(); 5950 5951 // It can/will be lowered or combined as a bit operation. 5952 // Need to check their input recursively to handle. 5953 case ISD::FNEG: 5954 case ISD::FABS: 5955 return (MaxDepth > 0) && 5956 isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1); 5957 5958 case ISD::FSIN: 5959 case ISD::FCOS: 5960 case ISD::FSINCOS: 5961 return Op.getValueType().getScalarType() != MVT::f16; 5962 5963 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. 5964 // For such targets need to check their input recursively. 5965 case ISD::FMINNUM: 5966 case ISD::FMAXNUM: 5967 case ISD::FMINNAN: 5968 case ISD::FMAXNAN: 5969 5970 if (ST->supportsMinMaxDenormModes() && 5971 DAG.isKnownNeverNaN(Op.getOperand(0)) && 5972 DAG.isKnownNeverNaN(Op.getOperand(1))) 5973 return true; 5974 5975 return (MaxDepth > 0) && 5976 isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1) && 5977 isCanonicalized(DAG, Op.getOperand(1), ST, MaxDepth - 1); 5978 5979 case ISD::ConstantFP: { 5980 auto F = cast<ConstantFPSDNode>(Op)->getValueAPF(); 5981 return !F.isDenormal() && !(F.isNaN() && F.isSignaling()); 5982 } 5983 } 5984 return false; 5985 } 5986 5987 // Constant fold canonicalize. 5988 SDValue SITargetLowering::performFCanonicalizeCombine( 5989 SDNode *N, 5990 DAGCombinerInfo &DCI) const { 5991 SelectionDAG &DAG = DCI.DAG; 5992 ConstantFPSDNode *CFP = isConstOrConstSplatFP(N->getOperand(0)); 5993 5994 if (!CFP) { 5995 SDValue N0 = N->getOperand(0); 5996 EVT VT = N0.getValueType().getScalarType(); 5997 auto ST = getSubtarget(); 5998 5999 if (((VT == MVT::f32 && ST->hasFP32Denormals()) || 6000 (VT == MVT::f64 && ST->hasFP64Denormals()) || 6001 (VT == MVT::f16 && ST->hasFP16Denormals())) && 6002 DAG.isKnownNeverNaN(N0)) 6003 return N0; 6004 6005 bool IsIEEEMode = Subtarget->enableIEEEBit(DAG.getMachineFunction()); 6006 6007 if ((IsIEEEMode || isKnownNeverSNan(DAG, N0)) && 6008 isCanonicalized(DAG, N0, ST)) 6009 return N0; 6010 6011 return SDValue(); 6012 } 6013 6014 const APFloat &C = CFP->getValueAPF(); 6015 6016 // Flush denormals to 0 if not enabled. 6017 if (C.isDenormal()) { 6018 EVT VT = N->getValueType(0); 6019 EVT SVT = VT.getScalarType(); 6020 if (SVT == MVT::f32 && !Subtarget->hasFP32Denormals()) 6021 return DAG.getConstantFP(0.0, SDLoc(N), VT); 6022 6023 if (SVT == MVT::f64 && !Subtarget->hasFP64Denormals()) 6024 return DAG.getConstantFP(0.0, SDLoc(N), VT); 6025 6026 if (SVT == MVT::f16 && !Subtarget->hasFP16Denormals()) 6027 return DAG.getConstantFP(0.0, SDLoc(N), VT); 6028 } 6029 6030 if (C.isNaN()) { 6031 EVT VT = N->getValueType(0); 6032 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); 6033 if (C.isSignaling()) { 6034 // Quiet a signaling NaN. 6035 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 6036 } 6037 6038 // Make sure it is the canonical NaN bitpattern. 6039 // 6040 // TODO: Can we use -1 as the canonical NaN value since it's an inline 6041 // immediate? 6042 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) 6043 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 6044 } 6045 6046 return N->getOperand(0); 6047 } 6048 6049 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { 6050 switch (Opc) { 6051 case ISD::FMAXNUM: 6052 return AMDGPUISD::FMAX3; 6053 case ISD::SMAX: 6054 return AMDGPUISD::SMAX3; 6055 case ISD::UMAX: 6056 return AMDGPUISD::UMAX3; 6057 case ISD::FMINNUM: 6058 return AMDGPUISD::FMIN3; 6059 case ISD::SMIN: 6060 return AMDGPUISD::SMIN3; 6061 case ISD::UMIN: 6062 return AMDGPUISD::UMIN3; 6063 default: 6064 llvm_unreachable("Not a min/max opcode"); 6065 } 6066 } 6067 6068 SDValue SITargetLowering::performIntMed3ImmCombine( 6069 SelectionDAG &DAG, const SDLoc &SL, 6070 SDValue Op0, SDValue Op1, bool Signed) const { 6071 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); 6072 if (!K1) 6073 return SDValue(); 6074 6075 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 6076 if (!K0) 6077 return SDValue(); 6078 6079 if (Signed) { 6080 if (K0->getAPIntValue().sge(K1->getAPIntValue())) 6081 return SDValue(); 6082 } else { 6083 if (K0->getAPIntValue().uge(K1->getAPIntValue())) 6084 return SDValue(); 6085 } 6086 6087 EVT VT = K0->getValueType(0); 6088 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3; 6089 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) { 6090 return DAG.getNode(Med3Opc, SL, VT, 6091 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); 6092 } 6093 6094 // If there isn't a 16-bit med3 operation, convert to 32-bit. 6095 MVT NVT = MVT::i32; 6096 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6097 6098 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); 6099 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); 6100 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); 6101 6102 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3); 6103 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3); 6104 } 6105 6106 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) { 6107 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 6108 return C; 6109 6110 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) { 6111 if (ConstantFPSDNode *C = BV->getConstantFPSplatNode()) 6112 return C; 6113 } 6114 6115 return nullptr; 6116 } 6117 6118 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG, 6119 const SDLoc &SL, 6120 SDValue Op0, 6121 SDValue Op1) const { 6122 ConstantFPSDNode *K1 = getSplatConstantFP(Op1); 6123 if (!K1) 6124 return SDValue(); 6125 6126 ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1)); 6127 if (!K0) 6128 return SDValue(); 6129 6130 // Ordered >= (although NaN inputs should have folded away by now). 6131 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF()); 6132 if (Cmp == APFloat::cmpGreaterThan) 6133 return SDValue(); 6134 6135 // TODO: Check IEEE bit enabled? 6136 EVT VT = Op0.getValueType(); 6137 if (Subtarget->enableDX10Clamp()) { 6138 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the 6139 // hardware fmed3 behavior converting to a min. 6140 // FIXME: Should this be allowing -0.0? 6141 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0)) 6142 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0)); 6143 } 6144 6145 // med3 for f16 is only available on gfx9+, and not available for v2f16. 6146 if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) { 6147 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a 6148 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would 6149 // then give the other result, which is different from med3 with a NaN 6150 // input. 6151 SDValue Var = Op0.getOperand(0); 6152 if (!isKnownNeverSNan(DAG, Var)) 6153 return SDValue(); 6154 6155 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), 6156 Var, SDValue(K0, 0), SDValue(K1, 0)); 6157 } 6158 6159 return SDValue(); 6160 } 6161 6162 SDValue SITargetLowering::performMinMaxCombine(SDNode *N, 6163 DAGCombinerInfo &DCI) const { 6164 SelectionDAG &DAG = DCI.DAG; 6165 6166 EVT VT = N->getValueType(0); 6167 unsigned Opc = N->getOpcode(); 6168 SDValue Op0 = N->getOperand(0); 6169 SDValue Op1 = N->getOperand(1); 6170 6171 // Only do this if the inner op has one use since this will just increases 6172 // register pressure for no benefit. 6173 6174 6175 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && 6176 VT != MVT::f64 && 6177 ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) { 6178 // max(max(a, b), c) -> max3(a, b, c) 6179 // min(min(a, b), c) -> min3(a, b, c) 6180 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { 6181 SDLoc DL(N); 6182 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 6183 DL, 6184 N->getValueType(0), 6185 Op0.getOperand(0), 6186 Op0.getOperand(1), 6187 Op1); 6188 } 6189 6190 // Try commuted. 6191 // max(a, max(b, c)) -> max3(a, b, c) 6192 // min(a, min(b, c)) -> min3(a, b, c) 6193 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { 6194 SDLoc DL(N); 6195 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 6196 DL, 6197 N->getValueType(0), 6198 Op0, 6199 Op1.getOperand(0), 6200 Op1.getOperand(1)); 6201 } 6202 } 6203 6204 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) 6205 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { 6206 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) 6207 return Med3; 6208 } 6209 6210 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { 6211 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) 6212 return Med3; 6213 } 6214 6215 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) 6216 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || 6217 (Opc == AMDGPUISD::FMIN_LEGACY && 6218 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && 6219 (VT == MVT::f32 || VT == MVT::f64 || 6220 (VT == MVT::f16 && Subtarget->has16BitInsts()) || 6221 (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) && 6222 Op0.hasOneUse()) { 6223 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) 6224 return Res; 6225 } 6226 6227 return SDValue(); 6228 } 6229 6230 static bool isClampZeroToOne(SDValue A, SDValue B) { 6231 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) { 6232 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) { 6233 // FIXME: Should this be allowing -0.0? 6234 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) || 6235 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0)); 6236 } 6237 } 6238 6239 return false; 6240 } 6241 6242 // FIXME: Should only worry about snans for version with chain. 6243 SDValue SITargetLowering::performFMed3Combine(SDNode *N, 6244 DAGCombinerInfo &DCI) const { 6245 EVT VT = N->getValueType(0); 6246 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and 6247 // NaNs. With a NaN input, the order of the operands may change the result. 6248 6249 SelectionDAG &DAG = DCI.DAG; 6250 SDLoc SL(N); 6251 6252 SDValue Src0 = N->getOperand(0); 6253 SDValue Src1 = N->getOperand(1); 6254 SDValue Src2 = N->getOperand(2); 6255 6256 if (isClampZeroToOne(Src0, Src1)) { 6257 // const_a, const_b, x -> clamp is safe in all cases including signaling 6258 // nans. 6259 // FIXME: Should this be allowing -0.0? 6260 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2); 6261 } 6262 6263 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother 6264 // handling no dx10-clamp? 6265 if (Subtarget->enableDX10Clamp()) { 6266 // If NaNs is clamped to 0, we are free to reorder the inputs. 6267 6268 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 6269 std::swap(Src0, Src1); 6270 6271 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2)) 6272 std::swap(Src1, Src2); 6273 6274 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 6275 std::swap(Src0, Src1); 6276 6277 if (isClampZeroToOne(Src1, Src2)) 6278 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0); 6279 } 6280 6281 return SDValue(); 6282 } 6283 6284 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N, 6285 DAGCombinerInfo &DCI) const { 6286 SDValue Src0 = N->getOperand(0); 6287 SDValue Src1 = N->getOperand(1); 6288 if (Src0.isUndef() && Src1.isUndef()) 6289 return DCI.DAG.getUNDEF(N->getValueType(0)); 6290 return SDValue(); 6291 } 6292 6293 SDValue SITargetLowering::performExtractVectorEltCombine( 6294 SDNode *N, DAGCombinerInfo &DCI) const { 6295 SDValue Vec = N->getOperand(0); 6296 6297 SelectionDAG &DAG = DCI.DAG; 6298 if (Vec.getOpcode() == ISD::FNEG && allUsesHaveSourceMods(N)) { 6299 SDLoc SL(N); 6300 EVT EltVT = N->getValueType(0); 6301 SDValue Idx = N->getOperand(1); 6302 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 6303 Vec.getOperand(0), Idx); 6304 return DAG.getNode(ISD::FNEG, SL, EltVT, Elt); 6305 } 6306 6307 return SDValue(); 6308 } 6309 6310 static bool convertBuildVectorCastElt(SelectionDAG &DAG, 6311 SDValue &Lo, SDValue &Hi) { 6312 if (Hi.getOpcode() == ISD::BITCAST && 6313 Hi.getOperand(0).getValueType() == MVT::f16 && 6314 (isa<ConstantSDNode>(Lo) || Lo.isUndef())) { 6315 Lo = DAG.getNode(ISD::BITCAST, SDLoc(Lo), MVT::f16, Lo); 6316 Hi = Hi.getOperand(0); 6317 return true; 6318 } 6319 6320 return false; 6321 } 6322 6323 SDValue SITargetLowering::performBuildVectorCombine( 6324 SDNode *N, DAGCombinerInfo &DCI) const { 6325 SDLoc SL(N); 6326 6327 if (!isTypeLegal(MVT::v2i16)) 6328 return SDValue(); 6329 SelectionDAG &DAG = DCI.DAG; 6330 EVT VT = N->getValueType(0); 6331 6332 if (VT == MVT::v2i16) { 6333 SDValue Lo = N->getOperand(0); 6334 SDValue Hi = N->getOperand(1); 6335 6336 // v2i16 build_vector (const|undef), (bitcast f16:$x) 6337 // -> bitcast (v2f16 build_vector const|undef, $x 6338 if (convertBuildVectorCastElt(DAG, Lo, Hi)) { 6339 SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Lo, Hi }); 6340 return DAG.getNode(ISD::BITCAST, SL, VT, NewVec); 6341 } 6342 6343 if (convertBuildVectorCastElt(DAG, Hi, Lo)) { 6344 SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Hi, Lo }); 6345 return DAG.getNode(ISD::BITCAST, SL, VT, NewVec); 6346 } 6347 } 6348 6349 return SDValue(); 6350 } 6351 6352 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, 6353 const SDNode *N0, 6354 const SDNode *N1) const { 6355 EVT VT = N0->getValueType(0); 6356 6357 // Only do this if we are not trying to support denormals. v_mad_f32 does not 6358 // support denormals ever. 6359 if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) || 6360 (VT == MVT::f16 && !Subtarget->hasFP16Denormals())) 6361 return ISD::FMAD; 6362 6363 const TargetOptions &Options = DAG.getTarget().Options; 6364 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || 6365 (N0->getFlags().hasUnsafeAlgebra() && 6366 N1->getFlags().hasUnsafeAlgebra())) && 6367 isFMAFasterThanFMulAndFAdd(VT)) { 6368 return ISD::FMA; 6369 } 6370 6371 return 0; 6372 } 6373 6374 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, 6375 EVT VT, 6376 SDValue N0, SDValue N1, SDValue N2, 6377 bool Signed) { 6378 unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32; 6379 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1); 6380 SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2); 6381 return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad); 6382 } 6383 6384 SDValue SITargetLowering::performAddCombine(SDNode *N, 6385 DAGCombinerInfo &DCI) const { 6386 SelectionDAG &DAG = DCI.DAG; 6387 EVT VT = N->getValueType(0); 6388 SDLoc SL(N); 6389 SDValue LHS = N->getOperand(0); 6390 SDValue RHS = N->getOperand(1); 6391 6392 if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL) 6393 && Subtarget->hasMad64_32() && 6394 !VT.isVector() && VT.getScalarSizeInBits() > 32 && 6395 VT.getScalarSizeInBits() <= 64) { 6396 if (LHS.getOpcode() != ISD::MUL) 6397 std::swap(LHS, RHS); 6398 6399 SDValue MulLHS = LHS.getOperand(0); 6400 SDValue MulRHS = LHS.getOperand(1); 6401 SDValue AddRHS = RHS; 6402 6403 // TODO: Maybe restrict if SGPR inputs. 6404 if (numBitsUnsigned(MulLHS, DAG) <= 32 && 6405 numBitsUnsigned(MulRHS, DAG) <= 32) { 6406 MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32); 6407 MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32); 6408 AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64); 6409 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false); 6410 } 6411 6412 if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) { 6413 MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32); 6414 MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32); 6415 AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64); 6416 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true); 6417 } 6418 6419 return SDValue(); 6420 } 6421 6422 if (VT != MVT::i32) 6423 return SDValue(); 6424 6425 // add x, zext (setcc) => addcarry x, 0, setcc 6426 // add x, sext (setcc) => subcarry x, 0, setcc 6427 unsigned Opc = LHS.getOpcode(); 6428 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND || 6429 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY) 6430 std::swap(RHS, LHS); 6431 6432 Opc = RHS.getOpcode(); 6433 switch (Opc) { 6434 default: break; 6435 case ISD::ZERO_EXTEND: 6436 case ISD::SIGN_EXTEND: 6437 case ISD::ANY_EXTEND: { 6438 auto Cond = RHS.getOperand(0); 6439 if (!isBoolSGPR(Cond)) 6440 break; 6441 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); 6442 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; 6443 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY; 6444 return DAG.getNode(Opc, SL, VTList, Args); 6445 } 6446 case ISD::ADDCARRY: { 6447 // add x, (addcarry y, 0, cc) => addcarry x, y, cc 6448 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 6449 if (!C || C->getZExtValue() != 0) break; 6450 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) }; 6451 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args); 6452 } 6453 } 6454 return SDValue(); 6455 } 6456 6457 SDValue SITargetLowering::performSubCombine(SDNode *N, 6458 DAGCombinerInfo &DCI) const { 6459 SelectionDAG &DAG = DCI.DAG; 6460 EVT VT = N->getValueType(0); 6461 6462 if (VT != MVT::i32) 6463 return SDValue(); 6464 6465 SDLoc SL(N); 6466 SDValue LHS = N->getOperand(0); 6467 SDValue RHS = N->getOperand(1); 6468 6469 unsigned Opc = LHS.getOpcode(); 6470 if (Opc != ISD::SUBCARRY) 6471 std::swap(RHS, LHS); 6472 6473 if (LHS.getOpcode() == ISD::SUBCARRY) { 6474 // sub (subcarry x, 0, cc), y => subcarry x, y, cc 6475 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 6476 if (!C || C->getZExtValue() != 0) 6477 return SDValue(); 6478 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) }; 6479 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args); 6480 } 6481 return SDValue(); 6482 } 6483 6484 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N, 6485 DAGCombinerInfo &DCI) const { 6486 6487 if (N->getValueType(0) != MVT::i32) 6488 return SDValue(); 6489 6490 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6491 if (!C || C->getZExtValue() != 0) 6492 return SDValue(); 6493 6494 SelectionDAG &DAG = DCI.DAG; 6495 SDValue LHS = N->getOperand(0); 6496 6497 // addcarry (add x, y), 0, cc => addcarry x, y, cc 6498 // subcarry (sub x, y), 0, cc => subcarry x, y, cc 6499 unsigned LHSOpc = LHS.getOpcode(); 6500 unsigned Opc = N->getOpcode(); 6501 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) || 6502 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) { 6503 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) }; 6504 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args); 6505 } 6506 return SDValue(); 6507 } 6508 6509 SDValue SITargetLowering::performFAddCombine(SDNode *N, 6510 DAGCombinerInfo &DCI) const { 6511 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 6512 return SDValue(); 6513 6514 SelectionDAG &DAG = DCI.DAG; 6515 EVT VT = N->getValueType(0); 6516 6517 SDLoc SL(N); 6518 SDValue LHS = N->getOperand(0); 6519 SDValue RHS = N->getOperand(1); 6520 6521 // These should really be instruction patterns, but writing patterns with 6522 // source modiifiers is a pain. 6523 6524 // fadd (fadd (a, a), b) -> mad 2.0, a, b 6525 if (LHS.getOpcode() == ISD::FADD) { 6526 SDValue A = LHS.getOperand(0); 6527 if (A == LHS.getOperand(1)) { 6528 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 6529 if (FusedOp != 0) { 6530 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 6531 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); 6532 } 6533 } 6534 } 6535 6536 // fadd (b, fadd (a, a)) -> mad 2.0, a, b 6537 if (RHS.getOpcode() == ISD::FADD) { 6538 SDValue A = RHS.getOperand(0); 6539 if (A == RHS.getOperand(1)) { 6540 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 6541 if (FusedOp != 0) { 6542 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 6543 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); 6544 } 6545 } 6546 } 6547 6548 return SDValue(); 6549 } 6550 6551 SDValue SITargetLowering::performFSubCombine(SDNode *N, 6552 DAGCombinerInfo &DCI) const { 6553 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 6554 return SDValue(); 6555 6556 SelectionDAG &DAG = DCI.DAG; 6557 SDLoc SL(N); 6558 EVT VT = N->getValueType(0); 6559 assert(!VT.isVector()); 6560 6561 // Try to get the fneg to fold into the source modifier. This undoes generic 6562 // DAG combines and folds them into the mad. 6563 // 6564 // Only do this if we are not trying to support denormals. v_mad_f32 does 6565 // not support denormals ever. 6566 SDValue LHS = N->getOperand(0); 6567 SDValue RHS = N->getOperand(1); 6568 if (LHS.getOpcode() == ISD::FADD) { 6569 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) 6570 SDValue A = LHS.getOperand(0); 6571 if (A == LHS.getOperand(1)) { 6572 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 6573 if (FusedOp != 0){ 6574 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 6575 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 6576 6577 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); 6578 } 6579 } 6580 } 6581 6582 if (RHS.getOpcode() == ISD::FADD) { 6583 // (fsub c, (fadd a, a)) -> mad -2.0, a, c 6584 6585 SDValue A = RHS.getOperand(0); 6586 if (A == RHS.getOperand(1)) { 6587 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 6588 if (FusedOp != 0){ 6589 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); 6590 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); 6591 } 6592 } 6593 } 6594 6595 return SDValue(); 6596 } 6597 6598 SDValue SITargetLowering::performSetCCCombine(SDNode *N, 6599 DAGCombinerInfo &DCI) const { 6600 SelectionDAG &DAG = DCI.DAG; 6601 SDLoc SL(N); 6602 6603 SDValue LHS = N->getOperand(0); 6604 SDValue RHS = N->getOperand(1); 6605 EVT VT = LHS.getValueType(); 6606 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 6607 6608 auto CRHS = dyn_cast<ConstantSDNode>(RHS); 6609 if (!CRHS) { 6610 CRHS = dyn_cast<ConstantSDNode>(LHS); 6611 if (CRHS) { 6612 std::swap(LHS, RHS); 6613 CC = getSetCCSwappedOperands(CC); 6614 } 6615 } 6616 6617 if (CRHS && VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND && 6618 isBoolSGPR(LHS.getOperand(0))) { 6619 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1 6620 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc 6621 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1 6622 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc 6623 if ((CRHS->isAllOnesValue() && 6624 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) || 6625 (CRHS->isNullValue() && 6626 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE))) 6627 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), 6628 DAG.getConstant(-1, SL, MVT::i1)); 6629 if ((CRHS->isAllOnesValue() && 6630 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) || 6631 (CRHS->isNullValue() && 6632 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT))) 6633 return LHS.getOperand(0); 6634 } 6635 6636 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && 6637 VT != MVT::f16)) 6638 return SDValue(); 6639 6640 // Match isinf pattern 6641 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) 6642 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { 6643 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 6644 if (!CRHS) 6645 return SDValue(); 6646 6647 const APFloat &APF = CRHS->getValueAPF(); 6648 if (APF.isInfinity() && !APF.isNegative()) { 6649 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY; 6650 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), 6651 DAG.getConstant(Mask, SL, MVT::i32)); 6652 } 6653 } 6654 6655 return SDValue(); 6656 } 6657 6658 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, 6659 DAGCombinerInfo &DCI) const { 6660 SelectionDAG &DAG = DCI.DAG; 6661 SDLoc SL(N); 6662 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; 6663 6664 SDValue Src = N->getOperand(0); 6665 SDValue Srl = N->getOperand(0); 6666 if (Srl.getOpcode() == ISD::ZERO_EXTEND) 6667 Srl = Srl.getOperand(0); 6668 6669 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero. 6670 if (Srl.getOpcode() == ISD::SRL) { 6671 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x 6672 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x 6673 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x 6674 6675 if (const ConstantSDNode *C = 6676 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) { 6677 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)), 6678 EVT(MVT::i32)); 6679 6680 unsigned SrcOffset = C->getZExtValue() + 8 * Offset; 6681 if (SrcOffset < 32 && SrcOffset % 8 == 0) { 6682 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL, 6683 MVT::f32, Srl); 6684 } 6685 } 6686 } 6687 6688 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); 6689 6690 KnownBits Known; 6691 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 6692 !DCI.isBeforeLegalizeOps()); 6693 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6694 if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) || 6695 TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) { 6696 DCI.CommitTargetLoweringOpt(TLO); 6697 } 6698 6699 return SDValue(); 6700 } 6701 6702 SDValue SITargetLowering::PerformDAGCombine(SDNode *N, 6703 DAGCombinerInfo &DCI) const { 6704 switch (N->getOpcode()) { 6705 default: 6706 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 6707 case ISD::ADD: 6708 return performAddCombine(N, DCI); 6709 case ISD::SUB: 6710 return performSubCombine(N, DCI); 6711 case ISD::ADDCARRY: 6712 case ISD::SUBCARRY: 6713 return performAddCarrySubCarryCombine(N, DCI); 6714 case ISD::FADD: 6715 return performFAddCombine(N, DCI); 6716 case ISD::FSUB: 6717 return performFSubCombine(N, DCI); 6718 case ISD::SETCC: 6719 return performSetCCCombine(N, DCI); 6720 case ISD::FMAXNUM: 6721 case ISD::FMINNUM: 6722 case ISD::SMAX: 6723 case ISD::SMIN: 6724 case ISD::UMAX: 6725 case ISD::UMIN: 6726 case AMDGPUISD::FMIN_LEGACY: 6727 case AMDGPUISD::FMAX_LEGACY: { 6728 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG && 6729 getTargetMachine().getOptLevel() > CodeGenOpt::None) 6730 return performMinMaxCombine(N, DCI); 6731 break; 6732 } 6733 case ISD::LOAD: 6734 case ISD::STORE: 6735 case ISD::ATOMIC_LOAD: 6736 case ISD::ATOMIC_STORE: 6737 case ISD::ATOMIC_CMP_SWAP: 6738 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 6739 case ISD::ATOMIC_SWAP: 6740 case ISD::ATOMIC_LOAD_ADD: 6741 case ISD::ATOMIC_LOAD_SUB: 6742 case ISD::ATOMIC_LOAD_AND: 6743 case ISD::ATOMIC_LOAD_OR: 6744 case ISD::ATOMIC_LOAD_XOR: 6745 case ISD::ATOMIC_LOAD_NAND: 6746 case ISD::ATOMIC_LOAD_MIN: 6747 case ISD::ATOMIC_LOAD_MAX: 6748 case ISD::ATOMIC_LOAD_UMIN: 6749 case ISD::ATOMIC_LOAD_UMAX: 6750 case AMDGPUISD::ATOMIC_INC: 6751 case AMDGPUISD::ATOMIC_DEC: // TODO: Target mem intrinsics. 6752 if (DCI.isBeforeLegalize()) 6753 break; 6754 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI); 6755 case ISD::AND: 6756 return performAndCombine(N, DCI); 6757 case ISD::OR: 6758 return performOrCombine(N, DCI); 6759 case ISD::XOR: 6760 return performXorCombine(N, DCI); 6761 case ISD::ZERO_EXTEND: 6762 return performZeroExtendCombine(N, DCI); 6763 case AMDGPUISD::FP_CLASS: 6764 return performClassCombine(N, DCI); 6765 case ISD::FCANONICALIZE: 6766 return performFCanonicalizeCombine(N, DCI); 6767 case AMDGPUISD::FRACT: 6768 case AMDGPUISD::RCP: 6769 case AMDGPUISD::RSQ: 6770 case AMDGPUISD::RCP_LEGACY: 6771 case AMDGPUISD::RSQ_LEGACY: 6772 case AMDGPUISD::RSQ_CLAMP: 6773 case AMDGPUISD::LDEXP: { 6774 SDValue Src = N->getOperand(0); 6775 if (Src.isUndef()) 6776 return Src; 6777 break; 6778 } 6779 case ISD::SINT_TO_FP: 6780 case ISD::UINT_TO_FP: 6781 return performUCharToFloatCombine(N, DCI); 6782 case AMDGPUISD::CVT_F32_UBYTE0: 6783 case AMDGPUISD::CVT_F32_UBYTE1: 6784 case AMDGPUISD::CVT_F32_UBYTE2: 6785 case AMDGPUISD::CVT_F32_UBYTE3: 6786 return performCvtF32UByteNCombine(N, DCI); 6787 case AMDGPUISD::FMED3: 6788 return performFMed3Combine(N, DCI); 6789 case AMDGPUISD::CVT_PKRTZ_F16_F32: 6790 return performCvtPkRTZCombine(N, DCI); 6791 case ISD::SCALAR_TO_VECTOR: { 6792 SelectionDAG &DAG = DCI.DAG; 6793 EVT VT = N->getValueType(0); 6794 6795 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x)) 6796 if (VT == MVT::v2i16 || VT == MVT::v2f16) { 6797 SDLoc SL(N); 6798 SDValue Src = N->getOperand(0); 6799 EVT EltVT = Src.getValueType(); 6800 if (EltVT == MVT::f16) 6801 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src); 6802 6803 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src); 6804 return DAG.getNode(ISD::BITCAST, SL, VT, Ext); 6805 } 6806 6807 break; 6808 } 6809 case ISD::EXTRACT_VECTOR_ELT: 6810 return performExtractVectorEltCombine(N, DCI); 6811 case ISD::BUILD_VECTOR: 6812 return performBuildVectorCombine(N, DCI); 6813 } 6814 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 6815 } 6816 6817 /// \brief Helper function for adjustWritemask 6818 static unsigned SubIdx2Lane(unsigned Idx) { 6819 switch (Idx) { 6820 default: return 0; 6821 case AMDGPU::sub0: return 0; 6822 case AMDGPU::sub1: return 1; 6823 case AMDGPU::sub2: return 2; 6824 case AMDGPU::sub3: return 3; 6825 } 6826 } 6827 6828 /// \brief Adjust the writemask of MIMG instructions 6829 SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node, 6830 SelectionDAG &DAG) const { 6831 SDNode *Users[4] = { nullptr }; 6832 unsigned Lane = 0; 6833 unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3; 6834 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); 6835 unsigned NewDmask = 0; 6836 bool HasChain = Node->getNumValues() > 1; 6837 6838 if (OldDmask == 0) { 6839 // These are folded out, but on the chance it happens don't assert. 6840 return Node; 6841 } 6842 6843 // Try to figure out the used register components 6844 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); 6845 I != E; ++I) { 6846 6847 // Don't look at users of the chain. 6848 if (I.getUse().getResNo() != 0) 6849 continue; 6850 6851 // Abort if we can't understand the usage 6852 if (!I->isMachineOpcode() || 6853 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) 6854 return Node; 6855 6856 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used. 6857 // Note that subregs are packed, i.e. Lane==0 is the first bit set 6858 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 6859 // set, etc. 6860 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 6861 6862 // Set which texture component corresponds to the lane. 6863 unsigned Comp; 6864 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) { 6865 Comp = countTrailingZeros(Dmask); 6866 Dmask &= ~(1 << Comp); 6867 } 6868 6869 // Abort if we have more than one user per component 6870 if (Users[Lane]) 6871 return Node; 6872 6873 Users[Lane] = *I; 6874 NewDmask |= 1 << Comp; 6875 } 6876 6877 // Abort if there's no change 6878 if (NewDmask == OldDmask) 6879 return Node; 6880 6881 unsigned BitsSet = countPopulation(NewDmask); 6882 6883 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 6884 int NewOpcode = AMDGPU::getMaskedMIMGOp(*TII, 6885 Node->getMachineOpcode(), BitsSet); 6886 assert(NewOpcode != -1 && 6887 NewOpcode != static_cast<int>(Node->getMachineOpcode()) && 6888 "failed to find equivalent MIMG op"); 6889 6890 // Adjust the writemask in the node 6891 SmallVector<SDValue, 12> Ops; 6892 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); 6893 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); 6894 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); 6895 6896 MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT(); 6897 6898 MVT ResultVT = BitsSet == 1 ? 6899 SVT : MVT::getVectorVT(SVT, BitsSet == 3 ? 4 : BitsSet); 6900 SDVTList NewVTList = HasChain ? 6901 DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT); 6902 6903 6904 MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node), 6905 NewVTList, Ops); 6906 6907 if (HasChain) { 6908 // Update chain. 6909 NewNode->setMemRefs(Node->memoperands_begin(), Node->memoperands_end()); 6910 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1)); 6911 } 6912 6913 if (BitsSet == 1) { 6914 assert(Node->hasNUsesOfValue(1, 0)); 6915 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY, 6916 SDLoc(Node), Users[Lane]->getValueType(0), 6917 SDValue(NewNode, 0)); 6918 DAG.ReplaceAllUsesWith(Users[Lane], Copy); 6919 return nullptr; 6920 } 6921 6922 // Update the users of the node with the new indices 6923 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { 6924 SDNode *User = Users[i]; 6925 if (!User) 6926 continue; 6927 6928 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); 6929 DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op); 6930 6931 switch (Idx) { 6932 default: break; 6933 case AMDGPU::sub0: Idx = AMDGPU::sub1; break; 6934 case AMDGPU::sub1: Idx = AMDGPU::sub2; break; 6935 case AMDGPU::sub2: Idx = AMDGPU::sub3; break; 6936 } 6937 } 6938 6939 DAG.RemoveDeadNode(Node); 6940 return nullptr; 6941 } 6942 6943 static bool isFrameIndexOp(SDValue Op) { 6944 if (Op.getOpcode() == ISD::AssertZext) 6945 Op = Op.getOperand(0); 6946 6947 return isa<FrameIndexSDNode>(Op); 6948 } 6949 6950 /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) 6951 /// with frame index operands. 6952 /// LLVM assumes that inputs are to these instructions are registers. 6953 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, 6954 SelectionDAG &DAG) const { 6955 if (Node->getOpcode() == ISD::CopyToReg) { 6956 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1)); 6957 SDValue SrcVal = Node->getOperand(2); 6958 6959 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have 6960 // to try understanding copies to physical registers. 6961 if (SrcVal.getValueType() == MVT::i1 && 6962 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) { 6963 SDLoc SL(Node); 6964 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 6965 SDValue VReg = DAG.getRegister( 6966 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1); 6967 6968 SDNode *Glued = Node->getGluedNode(); 6969 SDValue ToVReg 6970 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal, 6971 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0)); 6972 SDValue ToResultReg 6973 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0), 6974 VReg, ToVReg.getValue(1)); 6975 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode()); 6976 DAG.RemoveDeadNode(Node); 6977 return ToResultReg.getNode(); 6978 } 6979 } 6980 6981 SmallVector<SDValue, 8> Ops; 6982 for (unsigned i = 0; i < Node->getNumOperands(); ++i) { 6983 if (!isFrameIndexOp(Node->getOperand(i))) { 6984 Ops.push_back(Node->getOperand(i)); 6985 continue; 6986 } 6987 6988 SDLoc DL(Node); 6989 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, 6990 Node->getOperand(i).getValueType(), 6991 Node->getOperand(i)), 0)); 6992 } 6993 6994 return DAG.UpdateNodeOperands(Node, Ops); 6995 } 6996 6997 /// \brief Fold the instructions after selecting them. 6998 /// Returns null if users were already updated. 6999 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, 7000 SelectionDAG &DAG) const { 7001 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 7002 unsigned Opcode = Node->getMachineOpcode(); 7003 7004 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && 7005 !TII->isGather4(Opcode)) { 7006 return adjustWritemask(Node, DAG); 7007 } 7008 7009 if (Opcode == AMDGPU::INSERT_SUBREG || 7010 Opcode == AMDGPU::REG_SEQUENCE) { 7011 legalizeTargetIndependentNode(Node, DAG); 7012 return Node; 7013 } 7014 7015 switch (Opcode) { 7016 case AMDGPU::V_DIV_SCALE_F32: 7017 case AMDGPU::V_DIV_SCALE_F64: { 7018 // Satisfy the operand register constraint when one of the inputs is 7019 // undefined. Ordinarily each undef value will have its own implicit_def of 7020 // a vreg, so force these to use a single register. 7021 SDValue Src0 = Node->getOperand(0); 7022 SDValue Src1 = Node->getOperand(1); 7023 SDValue Src2 = Node->getOperand(2); 7024 7025 if ((Src0.isMachineOpcode() && 7026 Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) && 7027 (Src0 == Src1 || Src0 == Src2)) 7028 break; 7029 7030 MVT VT = Src0.getValueType().getSimpleVT(); 7031 const TargetRegisterClass *RC = getRegClassFor(VT); 7032 7033 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 7034 SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT); 7035 7036 SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node), 7037 UndefReg, Src0, SDValue()); 7038 7039 // src0 must be the same register as src1 or src2, even if the value is 7040 // undefined, so make sure we don't violate this constraint. 7041 if (Src0.isMachineOpcode() && 7042 Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) { 7043 if (Src1.isMachineOpcode() && 7044 Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) 7045 Src0 = Src1; 7046 else if (Src2.isMachineOpcode() && 7047 Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) 7048 Src0 = Src2; 7049 else { 7050 assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF); 7051 Src0 = UndefReg; 7052 Src1 = UndefReg; 7053 } 7054 } else 7055 break; 7056 7057 SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 }; 7058 for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I) 7059 Ops.push_back(Node->getOperand(I)); 7060 7061 Ops.push_back(ImpDef.getValue(1)); 7062 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); 7063 } 7064 default: 7065 break; 7066 } 7067 7068 return Node; 7069 } 7070 7071 /// \brief Assign the register class depending on the number of 7072 /// bits set in the writemask 7073 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 7074 SDNode *Node) const { 7075 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 7076 7077 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 7078 7079 if (TII->isVOP3(MI.getOpcode())) { 7080 // Make sure constant bus requirements are respected. 7081 TII->legalizeOperandsVOP3(MRI, MI); 7082 return; 7083 } 7084 7085 // Replace unused atomics with the no return version. 7086 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); 7087 if (NoRetAtomicOp != -1) { 7088 if (!Node->hasAnyUseOfValue(0)) { 7089 MI.setDesc(TII->get(NoRetAtomicOp)); 7090 MI.RemoveOperand(0); 7091 return; 7092 } 7093 7094 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg 7095 // instruction, because the return type of these instructions is a vec2 of 7096 // the memory type, so it can be tied to the input operand. 7097 // This means these instructions always have a use, so we need to add a 7098 // special case to check if the atomic has only one extract_subreg use, 7099 // which itself has no uses. 7100 if ((Node->hasNUsesOfValue(1, 0) && 7101 Node->use_begin()->isMachineOpcode() && 7102 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && 7103 !Node->use_begin()->hasAnyUseOfValue(0))) { 7104 unsigned Def = MI.getOperand(0).getReg(); 7105 7106 // Change this into a noret atomic. 7107 MI.setDesc(TII->get(NoRetAtomicOp)); 7108 MI.RemoveOperand(0); 7109 7110 // If we only remove the def operand from the atomic instruction, the 7111 // extract_subreg will be left with a use of a vreg without a def. 7112 // So we need to insert an implicit_def to avoid machine verifier 7113 // errors. 7114 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), 7115 TII->get(AMDGPU::IMPLICIT_DEF), Def); 7116 } 7117 return; 7118 } 7119 } 7120 7121 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, 7122 uint64_t Val) { 7123 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); 7124 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); 7125 } 7126 7127 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, 7128 const SDLoc &DL, 7129 SDValue Ptr) const { 7130 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 7131 7132 // Build the half of the subregister with the constants before building the 7133 // full 128-bit register. If we are building multiple resource descriptors, 7134 // this will allow CSEing of the 2-component register. 7135 const SDValue Ops0[] = { 7136 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), 7137 buildSMovImm32(DAG, DL, 0), 7138 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 7139 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), 7140 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) 7141 }; 7142 7143 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, 7144 MVT::v2i32, Ops0), 0); 7145 7146 // Combine the constants and the pointer. 7147 const SDValue Ops1[] = { 7148 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 7149 Ptr, 7150 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), 7151 SubRegHi, 7152 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) 7153 }; 7154 7155 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); 7156 } 7157 7158 /// \brief Return a resource descriptor with the 'Add TID' bit enabled 7159 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] 7160 /// of the resource descriptor) to create an offset, which is added to 7161 /// the resource pointer. 7162 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, 7163 SDValue Ptr, uint32_t RsrcDword1, 7164 uint64_t RsrcDword2And3) const { 7165 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); 7166 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); 7167 if (RsrcDword1) { 7168 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, 7169 DAG.getConstant(RsrcDword1, DL, MVT::i32)), 7170 0); 7171 } 7172 7173 SDValue DataLo = buildSMovImm32(DAG, DL, 7174 RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); 7175 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); 7176 7177 const SDValue Ops[] = { 7178 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 7179 PtrLo, 7180 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 7181 PtrHi, 7182 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), 7183 DataLo, 7184 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), 7185 DataHi, 7186 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) 7187 }; 7188 7189 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); 7190 } 7191 7192 //===----------------------------------------------------------------------===// 7193 // SI Inline Assembly Support 7194 //===----------------------------------------------------------------------===// 7195 7196 std::pair<unsigned, const TargetRegisterClass *> 7197 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 7198 StringRef Constraint, 7199 MVT VT) const { 7200 if (!isTypeLegal(VT)) 7201 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 7202 7203 if (Constraint.size() == 1) { 7204 switch (Constraint[0]) { 7205 case 's': 7206 case 'r': 7207 switch (VT.getSizeInBits()) { 7208 default: 7209 return std::make_pair(0U, nullptr); 7210 case 32: 7211 case 16: 7212 return std::make_pair(0U, &AMDGPU::SReg_32_XM0RegClass); 7213 case 64: 7214 return std::make_pair(0U, &AMDGPU::SGPR_64RegClass); 7215 case 128: 7216 return std::make_pair(0U, &AMDGPU::SReg_128RegClass); 7217 case 256: 7218 return std::make_pair(0U, &AMDGPU::SReg_256RegClass); 7219 case 512: 7220 return std::make_pair(0U, &AMDGPU::SReg_512RegClass); 7221 } 7222 7223 case 'v': 7224 switch (VT.getSizeInBits()) { 7225 default: 7226 return std::make_pair(0U, nullptr); 7227 case 32: 7228 case 16: 7229 return std::make_pair(0U, &AMDGPU::VGPR_32RegClass); 7230 case 64: 7231 return std::make_pair(0U, &AMDGPU::VReg_64RegClass); 7232 case 96: 7233 return std::make_pair(0U, &AMDGPU::VReg_96RegClass); 7234 case 128: 7235 return std::make_pair(0U, &AMDGPU::VReg_128RegClass); 7236 case 256: 7237 return std::make_pair(0U, &AMDGPU::VReg_256RegClass); 7238 case 512: 7239 return std::make_pair(0U, &AMDGPU::VReg_512RegClass); 7240 } 7241 } 7242 } 7243 7244 if (Constraint.size() > 1) { 7245 const TargetRegisterClass *RC = nullptr; 7246 if (Constraint[1] == 'v') { 7247 RC = &AMDGPU::VGPR_32RegClass; 7248 } else if (Constraint[1] == 's') { 7249 RC = &AMDGPU::SGPR_32RegClass; 7250 } 7251 7252 if (RC) { 7253 uint32_t Idx; 7254 bool Failed = Constraint.substr(2).getAsInteger(10, Idx); 7255 if (!Failed && Idx < RC->getNumRegs()) 7256 return std::make_pair(RC->getRegister(Idx), RC); 7257 } 7258 } 7259 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 7260 } 7261 7262 SITargetLowering::ConstraintType 7263 SITargetLowering::getConstraintType(StringRef Constraint) const { 7264 if (Constraint.size() == 1) { 7265 switch (Constraint[0]) { 7266 default: break; 7267 case 's': 7268 case 'v': 7269 return C_RegisterClass; 7270 } 7271 } 7272 return TargetLowering::getConstraintType(Constraint); 7273 } 7274 7275 // Figure out which registers should be reserved for stack access. Only after 7276 // the function is legalized do we know all of the non-spill stack objects or if 7277 // calls are present. 7278 void SITargetLowering::finalizeLowering(MachineFunction &MF) const { 7279 MachineRegisterInfo &MRI = MF.getRegInfo(); 7280 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 7281 const MachineFrameInfo &MFI = MF.getFrameInfo(); 7282 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 7283 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 7284 7285 if (Info->isEntryFunction()) { 7286 // Callable functions have fixed registers used for stack access. 7287 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info); 7288 } 7289 7290 // We have to assume the SP is needed in case there are calls in the function 7291 // during lowering. Calls are only detected after the function is 7292 // lowered. We're about to reserve registers, so don't bother using it if we 7293 // aren't really going to use it. 7294 bool NeedSP = !Info->isEntryFunction() || 7295 MFI.hasVarSizedObjects() || 7296 MFI.hasCalls(); 7297 7298 if (NeedSP) { 7299 unsigned ReservedStackPtrOffsetReg = TRI->reservedStackPtrOffsetReg(MF); 7300 Info->setStackPtrOffsetReg(ReservedStackPtrOffsetReg); 7301 7302 assert(Info->getStackPtrOffsetReg() != Info->getFrameOffsetReg()); 7303 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(), 7304 Info->getStackPtrOffsetReg())); 7305 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg()); 7306 } 7307 7308 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg()); 7309 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg()); 7310 MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG, 7311 Info->getScratchWaveOffsetReg()); 7312 7313 TargetLoweringBase::finalizeLowering(MF); 7314 } 7315 7316 void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op, 7317 KnownBits &Known, 7318 const APInt &DemandedElts, 7319 const SelectionDAG &DAG, 7320 unsigned Depth) const { 7321 TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts, 7322 DAG, Depth); 7323 7324 if (getSubtarget()->enableHugePrivateBuffer()) 7325 return; 7326 7327 // Technically it may be possible to have a dispatch with a single workitem 7328 // that uses the full private memory size, but that's not really useful. We 7329 // can't use vaddr in MUBUF instructions if we don't know the address 7330 // calculation won't overflow, so assume the sign bit is never set. 7331 Known.Zero.setHighBits(AssumeFrameIndexHighZeroBits); 7332 } 7333