1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief Custom DAG lowering for SI 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifdef _MSC_VER 16 // Provide M_PI. 17 #define _USE_MATH_DEFINES 18 #endif 19 20 #include "SIISelLowering.h" 21 #include "AMDGPU.h" 22 #include "AMDGPUIntrinsicInfo.h" 23 #include "AMDGPUSubtarget.h" 24 #include "AMDGPUTargetMachine.h" 25 #include "SIDefines.h" 26 #include "SIInstrInfo.h" 27 #include "SIMachineFunctionInfo.h" 28 #include "SIRegisterInfo.h" 29 #include "Utils/AMDGPUBaseInfo.h" 30 #include "llvm/ADT/APFloat.h" 31 #include "llvm/ADT/APInt.h" 32 #include "llvm/ADT/ArrayRef.h" 33 #include "llvm/ADT/BitVector.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/StringSwitch.h" 38 #include "llvm/ADT/Twine.h" 39 #include "llvm/CodeGen/Analysis.h" 40 #include "llvm/CodeGen/CallingConvLower.h" 41 #include "llvm/CodeGen/DAGCombine.h" 42 #include "llvm/CodeGen/ISDOpcodes.h" 43 #include "llvm/CodeGen/MachineBasicBlock.h" 44 #include "llvm/CodeGen/MachineFrameInfo.h" 45 #include "llvm/CodeGen/MachineFunction.h" 46 #include "llvm/CodeGen/MachineInstr.h" 47 #include "llvm/CodeGen/MachineInstrBuilder.h" 48 #include "llvm/CodeGen/MachineMemOperand.h" 49 #include "llvm/CodeGen/MachineModuleInfo.h" 50 #include "llvm/CodeGen/MachineOperand.h" 51 #include "llvm/CodeGen/MachineRegisterInfo.h" 52 #include "llvm/CodeGen/MachineValueType.h" 53 #include "llvm/CodeGen/SelectionDAG.h" 54 #include "llvm/CodeGen/SelectionDAGNodes.h" 55 #include "llvm/CodeGen/TargetCallingConv.h" 56 #include "llvm/CodeGen/TargetRegisterInfo.h" 57 #include "llvm/CodeGen/ValueTypes.h" 58 #include "llvm/IR/Constants.h" 59 #include "llvm/IR/DataLayout.h" 60 #include "llvm/IR/DebugLoc.h" 61 #include "llvm/IR/DerivedTypes.h" 62 #include "llvm/IR/DiagnosticInfo.h" 63 #include "llvm/IR/Function.h" 64 #include "llvm/IR/GlobalValue.h" 65 #include "llvm/IR/InstrTypes.h" 66 #include "llvm/IR/Instruction.h" 67 #include "llvm/IR/Instructions.h" 68 #include "llvm/IR/IntrinsicInst.h" 69 #include "llvm/IR/Type.h" 70 #include "llvm/Support/Casting.h" 71 #include "llvm/Support/CodeGen.h" 72 #include "llvm/Support/CommandLine.h" 73 #include "llvm/Support/Compiler.h" 74 #include "llvm/Support/ErrorHandling.h" 75 #include "llvm/Support/KnownBits.h" 76 #include "llvm/Support/MathExtras.h" 77 #include "llvm/Target/TargetOptions.h" 78 #include <cassert> 79 #include <cmath> 80 #include <cstdint> 81 #include <iterator> 82 #include <tuple> 83 #include <utility> 84 #include <vector> 85 86 using namespace llvm; 87 88 #define DEBUG_TYPE "si-lower" 89 90 STATISTIC(NumTailCalls, "Number of tail calls"); 91 92 static cl::opt<bool> EnableVGPRIndexMode( 93 "amdgpu-vgpr-index-mode", 94 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), 95 cl::init(false)); 96 97 static cl::opt<unsigned> AssumeFrameIndexHighZeroBits( 98 "amdgpu-frame-index-zero-bits", 99 cl::desc("High bits of frame index assumed to be zero"), 100 cl::init(5), 101 cl::ReallyHidden); 102 103 static unsigned findFirstFreeSGPR(CCState &CCInfo) { 104 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); 105 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { 106 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { 107 return AMDGPU::SGPR0 + Reg; 108 } 109 } 110 llvm_unreachable("Cannot allocate sgpr"); 111 } 112 113 SITargetLowering::SITargetLowering(const TargetMachine &TM, 114 const SISubtarget &STI) 115 : AMDGPUTargetLowering(TM, STI) { 116 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); 117 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); 118 119 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass); 120 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); 121 122 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); 123 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); 124 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); 125 126 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); 127 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); 128 129 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); 130 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); 131 132 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); 133 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); 134 135 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); 136 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); 137 138 if (Subtarget->has16BitInsts()) { 139 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass); 140 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass); 141 } 142 143 if (Subtarget->hasVOP3PInsts()) { 144 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass); 145 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass); 146 } 147 148 computeRegisterProperties(STI.getRegisterInfo()); 149 150 // We need to custom lower vector stores from local memory 151 setOperationAction(ISD::LOAD, MVT::v2i32, Custom); 152 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 153 setOperationAction(ISD::LOAD, MVT::v8i32, Custom); 154 setOperationAction(ISD::LOAD, MVT::v16i32, Custom); 155 setOperationAction(ISD::LOAD, MVT::i1, Custom); 156 157 setOperationAction(ISD::STORE, MVT::v2i32, Custom); 158 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 159 setOperationAction(ISD::STORE, MVT::v8i32, Custom); 160 setOperationAction(ISD::STORE, MVT::v16i32, Custom); 161 setOperationAction(ISD::STORE, MVT::i1, Custom); 162 163 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); 164 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); 165 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); 166 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); 167 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); 168 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); 169 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); 170 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); 171 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); 172 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); 173 174 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 175 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 176 setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand); 177 178 setOperationAction(ISD::SELECT, MVT::i1, Promote); 179 setOperationAction(ISD::SELECT, MVT::i64, Custom); 180 setOperationAction(ISD::SELECT, MVT::f64, Promote); 181 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); 182 183 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 184 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 185 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 186 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 187 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); 188 189 setOperationAction(ISD::SETCC, MVT::i1, Promote); 190 setOperationAction(ISD::SETCC, MVT::v2i1, Expand); 191 setOperationAction(ISD::SETCC, MVT::v4i1, Expand); 192 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); 193 194 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); 195 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 196 197 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); 198 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); 199 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 200 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); 201 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 202 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); 203 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); 204 205 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 206 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); 207 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); 208 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom); 209 210 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom); 211 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom); 212 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 213 214 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 215 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); 216 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); 217 setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom); 218 219 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 220 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 221 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 222 setOperationAction(ISD::BR_CC, MVT::i64, Expand); 223 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 224 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 225 226 setOperationAction(ISD::UADDO, MVT::i32, Legal); 227 setOperationAction(ISD::USUBO, MVT::i32, Legal); 228 229 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal); 230 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal); 231 232 #if 0 233 setOperationAction(ISD::ADDCARRY, MVT::i64, Legal); 234 setOperationAction(ISD::SUBCARRY, MVT::i64, Legal); 235 #endif 236 237 //setOperationAction(ISD::ADDC, MVT::i64, Expand); 238 //setOperationAction(ISD::SUBC, MVT::i64, Expand); 239 240 // We only support LOAD/STORE and vector manipulation ops for vectors 241 // with > 4 elements. 242 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, 243 MVT::v2i64, MVT::v2f64}) { 244 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 245 switch (Op) { 246 case ISD::LOAD: 247 case ISD::STORE: 248 case ISD::BUILD_VECTOR: 249 case ISD::BITCAST: 250 case ISD::EXTRACT_VECTOR_ELT: 251 case ISD::INSERT_VECTOR_ELT: 252 case ISD::INSERT_SUBVECTOR: 253 case ISD::EXTRACT_SUBVECTOR: 254 case ISD::SCALAR_TO_VECTOR: 255 break; 256 case ISD::CONCAT_VECTORS: 257 setOperationAction(Op, VT, Custom); 258 break; 259 default: 260 setOperationAction(Op, VT, Expand); 261 break; 262 } 263 } 264 } 265 266 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that 267 // is expanded to avoid having two separate loops in case the index is a VGPR. 268 269 // Most operations are naturally 32-bit vector operations. We only support 270 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. 271 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { 272 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 273 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); 274 275 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 276 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); 277 278 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 279 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); 280 281 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 282 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); 283 } 284 285 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); 286 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); 287 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); 288 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); 289 290 // Avoid stack access for these. 291 // TODO: Generalize to more vector types. 292 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); 293 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); 294 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 295 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 296 297 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, 298 // and output demarshalling 299 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 300 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 301 302 // We can't return success/failure, only the old value, 303 // let LLVM add the comparison 304 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); 305 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); 306 307 if (getSubtarget()->hasFlatAddressSpace()) { 308 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); 309 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); 310 } 311 312 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 313 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 314 315 // On SI this is s_memtime and s_memrealtime on VI. 316 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 317 setOperationAction(ISD::TRAP, MVT::Other, Custom); 318 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom); 319 320 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 321 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 322 323 if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) { 324 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 325 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 326 setOperationAction(ISD::FRINT, MVT::f64, Legal); 327 } 328 329 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 330 331 setOperationAction(ISD::FSIN, MVT::f32, Custom); 332 setOperationAction(ISD::FCOS, MVT::f32, Custom); 333 setOperationAction(ISD::FDIV, MVT::f32, Custom); 334 setOperationAction(ISD::FDIV, MVT::f64, Custom); 335 336 if (Subtarget->has16BitInsts()) { 337 setOperationAction(ISD::Constant, MVT::i16, Legal); 338 339 setOperationAction(ISD::SMIN, MVT::i16, Legal); 340 setOperationAction(ISD::SMAX, MVT::i16, Legal); 341 342 setOperationAction(ISD::UMIN, MVT::i16, Legal); 343 setOperationAction(ISD::UMAX, MVT::i16, Legal); 344 345 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); 346 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); 347 348 setOperationAction(ISD::ROTR, MVT::i16, Promote); 349 setOperationAction(ISD::ROTL, MVT::i16, Promote); 350 351 setOperationAction(ISD::SDIV, MVT::i16, Promote); 352 setOperationAction(ISD::UDIV, MVT::i16, Promote); 353 setOperationAction(ISD::SREM, MVT::i16, Promote); 354 setOperationAction(ISD::UREM, MVT::i16, Promote); 355 356 setOperationAction(ISD::BSWAP, MVT::i16, Promote); 357 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); 358 359 setOperationAction(ISD::CTTZ, MVT::i16, Promote); 360 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); 361 setOperationAction(ISD::CTLZ, MVT::i16, Promote); 362 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); 363 364 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); 365 366 setOperationAction(ISD::BR_CC, MVT::i16, Expand); 367 368 setOperationAction(ISD::LOAD, MVT::i16, Custom); 369 370 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 371 372 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); 373 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); 374 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); 375 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); 376 377 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); 378 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); 379 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); 380 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); 381 382 // F16 - Constant Actions. 383 setOperationAction(ISD::ConstantFP, MVT::f16, Legal); 384 385 // F16 - Load/Store Actions. 386 setOperationAction(ISD::LOAD, MVT::f16, Promote); 387 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); 388 setOperationAction(ISD::STORE, MVT::f16, Promote); 389 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); 390 391 // F16 - VOP1 Actions. 392 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); 393 setOperationAction(ISD::FCOS, MVT::f16, Promote); 394 setOperationAction(ISD::FSIN, MVT::f16, Promote); 395 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); 396 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); 397 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); 398 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); 399 setOperationAction(ISD::FROUND, MVT::f16, Custom); 400 401 // F16 - VOP2 Actions. 402 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 403 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 404 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 405 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 406 setOperationAction(ISD::FDIV, MVT::f16, Custom); 407 408 // F16 - VOP3 Actions. 409 setOperationAction(ISD::FMA, MVT::f16, Legal); 410 if (!Subtarget->hasFP16Denormals()) 411 setOperationAction(ISD::FMAD, MVT::f16, Legal); 412 } 413 414 if (Subtarget->hasVOP3PInsts()) { 415 for (MVT VT : {MVT::v2i16, MVT::v2f16}) { 416 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 417 switch (Op) { 418 case ISD::LOAD: 419 case ISD::STORE: 420 case ISD::BUILD_VECTOR: 421 case ISD::BITCAST: 422 case ISD::EXTRACT_VECTOR_ELT: 423 case ISD::INSERT_VECTOR_ELT: 424 case ISD::INSERT_SUBVECTOR: 425 case ISD::EXTRACT_SUBVECTOR: 426 case ISD::SCALAR_TO_VECTOR: 427 break; 428 case ISD::CONCAT_VECTORS: 429 setOperationAction(Op, VT, Custom); 430 break; 431 default: 432 setOperationAction(Op, VT, Expand); 433 break; 434 } 435 } 436 } 437 438 // XXX - Do these do anything? Vector constants turn into build_vector. 439 setOperationAction(ISD::Constant, MVT::v2i16, Legal); 440 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal); 441 442 setOperationAction(ISD::STORE, MVT::v2i16, Promote); 443 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32); 444 setOperationAction(ISD::STORE, MVT::v2f16, Promote); 445 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32); 446 447 setOperationAction(ISD::LOAD, MVT::v2i16, Promote); 448 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32); 449 setOperationAction(ISD::LOAD, MVT::v2f16, Promote); 450 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32); 451 452 setOperationAction(ISD::AND, MVT::v2i16, Promote); 453 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32); 454 setOperationAction(ISD::OR, MVT::v2i16, Promote); 455 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32); 456 setOperationAction(ISD::XOR, MVT::v2i16, Promote); 457 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32); 458 setOperationAction(ISD::SELECT, MVT::v2i16, Promote); 459 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32); 460 setOperationAction(ISD::SELECT, MVT::v2f16, Promote); 461 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32); 462 463 setOperationAction(ISD::ADD, MVT::v2i16, Legal); 464 setOperationAction(ISD::SUB, MVT::v2i16, Legal); 465 setOperationAction(ISD::MUL, MVT::v2i16, Legal); 466 setOperationAction(ISD::SHL, MVT::v2i16, Legal); 467 setOperationAction(ISD::SRL, MVT::v2i16, Legal); 468 setOperationAction(ISD::SRA, MVT::v2i16, Legal); 469 setOperationAction(ISD::SMIN, MVT::v2i16, Legal); 470 setOperationAction(ISD::UMIN, MVT::v2i16, Legal); 471 setOperationAction(ISD::SMAX, MVT::v2i16, Legal); 472 setOperationAction(ISD::UMAX, MVT::v2i16, Legal); 473 474 setOperationAction(ISD::FADD, MVT::v2f16, Legal); 475 setOperationAction(ISD::FNEG, MVT::v2f16, Legal); 476 setOperationAction(ISD::FMUL, MVT::v2f16, Legal); 477 setOperationAction(ISD::FMA, MVT::v2f16, Legal); 478 setOperationAction(ISD::FMINNUM, MVT::v2f16, Legal); 479 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Legal); 480 481 // This isn't really legal, but this avoids the legalizer unrolling it (and 482 // allows matching fneg (fabs x) patterns) 483 setOperationAction(ISD::FABS, MVT::v2f16, Legal); 484 485 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 486 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 487 488 setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand); 489 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand); 490 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand); 491 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); 492 } else { 493 setOperationAction(ISD::SELECT, MVT::v2i16, Custom); 494 setOperationAction(ISD::SELECT, MVT::v2f16, Custom); 495 } 496 497 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) { 498 setOperationAction(ISD::SELECT, VT, Custom); 499 } 500 501 setTargetDAGCombine(ISD::ADD); 502 setTargetDAGCombine(ISD::ADDCARRY); 503 setTargetDAGCombine(ISD::SUB); 504 setTargetDAGCombine(ISD::SUBCARRY); 505 setTargetDAGCombine(ISD::FADD); 506 setTargetDAGCombine(ISD::FSUB); 507 setTargetDAGCombine(ISD::FMINNUM); 508 setTargetDAGCombine(ISD::FMAXNUM); 509 setTargetDAGCombine(ISD::SMIN); 510 setTargetDAGCombine(ISD::SMAX); 511 setTargetDAGCombine(ISD::UMIN); 512 setTargetDAGCombine(ISD::UMAX); 513 setTargetDAGCombine(ISD::SETCC); 514 setTargetDAGCombine(ISD::AND); 515 setTargetDAGCombine(ISD::OR); 516 setTargetDAGCombine(ISD::XOR); 517 setTargetDAGCombine(ISD::SINT_TO_FP); 518 setTargetDAGCombine(ISD::UINT_TO_FP); 519 setTargetDAGCombine(ISD::FCANONICALIZE); 520 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); 521 setTargetDAGCombine(ISD::ZERO_EXTEND); 522 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 523 setTargetDAGCombine(ISD::BUILD_VECTOR); 524 525 // All memory operations. Some folding on the pointer operand is done to help 526 // matching the constant offsets in the addressing modes. 527 setTargetDAGCombine(ISD::LOAD); 528 setTargetDAGCombine(ISD::STORE); 529 setTargetDAGCombine(ISD::ATOMIC_LOAD); 530 setTargetDAGCombine(ISD::ATOMIC_STORE); 531 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); 532 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 533 setTargetDAGCombine(ISD::ATOMIC_SWAP); 534 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); 535 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); 536 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); 537 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); 538 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); 539 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); 540 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); 541 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); 542 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); 543 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); 544 545 setSchedulingPreference(Sched::RegPressure); 546 } 547 548 const SISubtarget *SITargetLowering::getSubtarget() const { 549 return static_cast<const SISubtarget *>(Subtarget); 550 } 551 552 //===----------------------------------------------------------------------===// 553 // TargetLowering queries 554 //===----------------------------------------------------------------------===// 555 556 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const { 557 // SI has some legal vector types, but no legal vector operations. Say no 558 // shuffles are legal in order to prefer scalarizing some vector operations. 559 return false; 560 } 561 562 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 563 const CallInst &CI, 564 MachineFunction &MF, 565 unsigned IntrID) const { 566 switch (IntrID) { 567 case Intrinsic::amdgcn_atomic_inc: 568 case Intrinsic::amdgcn_atomic_dec: 569 case Intrinsic::amdgcn_atomic_fadd: 570 case Intrinsic::amdgcn_atomic_fmin: 571 case Intrinsic::amdgcn_atomic_fmax: { 572 Info.opc = ISD::INTRINSIC_W_CHAIN; 573 Info.memVT = MVT::getVT(CI.getType()); 574 Info.ptrVal = CI.getOperand(0); 575 Info.align = 0; 576 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 577 578 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4)); 579 if (!Vol || !Vol->isZero()) 580 Info.flags |= MachineMemOperand::MOVolatile; 581 582 return true; 583 } 584 585 // Image load. 586 case Intrinsic::amdgcn_image_load: 587 case Intrinsic::amdgcn_image_load_mip: 588 589 // Sample. 590 case Intrinsic::amdgcn_image_sample: 591 case Intrinsic::amdgcn_image_sample_cl: 592 case Intrinsic::amdgcn_image_sample_d: 593 case Intrinsic::amdgcn_image_sample_d_cl: 594 case Intrinsic::amdgcn_image_sample_l: 595 case Intrinsic::amdgcn_image_sample_b: 596 case Intrinsic::amdgcn_image_sample_b_cl: 597 case Intrinsic::amdgcn_image_sample_lz: 598 case Intrinsic::amdgcn_image_sample_cd: 599 case Intrinsic::amdgcn_image_sample_cd_cl: 600 601 // Sample with comparison. 602 case Intrinsic::amdgcn_image_sample_c: 603 case Intrinsic::amdgcn_image_sample_c_cl: 604 case Intrinsic::amdgcn_image_sample_c_d: 605 case Intrinsic::amdgcn_image_sample_c_d_cl: 606 case Intrinsic::amdgcn_image_sample_c_l: 607 case Intrinsic::amdgcn_image_sample_c_b: 608 case Intrinsic::amdgcn_image_sample_c_b_cl: 609 case Intrinsic::amdgcn_image_sample_c_lz: 610 case Intrinsic::amdgcn_image_sample_c_cd: 611 case Intrinsic::amdgcn_image_sample_c_cd_cl: 612 613 // Sample with offsets. 614 case Intrinsic::amdgcn_image_sample_o: 615 case Intrinsic::amdgcn_image_sample_cl_o: 616 case Intrinsic::amdgcn_image_sample_d_o: 617 case Intrinsic::amdgcn_image_sample_d_cl_o: 618 case Intrinsic::amdgcn_image_sample_l_o: 619 case Intrinsic::amdgcn_image_sample_b_o: 620 case Intrinsic::amdgcn_image_sample_b_cl_o: 621 case Intrinsic::amdgcn_image_sample_lz_o: 622 case Intrinsic::amdgcn_image_sample_cd_o: 623 case Intrinsic::amdgcn_image_sample_cd_cl_o: 624 625 // Sample with comparison and offsets. 626 case Intrinsic::amdgcn_image_sample_c_o: 627 case Intrinsic::amdgcn_image_sample_c_cl_o: 628 case Intrinsic::amdgcn_image_sample_c_d_o: 629 case Intrinsic::amdgcn_image_sample_c_d_cl_o: 630 case Intrinsic::amdgcn_image_sample_c_l_o: 631 case Intrinsic::amdgcn_image_sample_c_b_o: 632 case Intrinsic::amdgcn_image_sample_c_b_cl_o: 633 case Intrinsic::amdgcn_image_sample_c_lz_o: 634 case Intrinsic::amdgcn_image_sample_c_cd_o: 635 case Intrinsic::amdgcn_image_sample_c_cd_cl_o: 636 637 // Basic gather4 638 case Intrinsic::amdgcn_image_gather4: 639 case Intrinsic::amdgcn_image_gather4_cl: 640 case Intrinsic::amdgcn_image_gather4_l: 641 case Intrinsic::amdgcn_image_gather4_b: 642 case Intrinsic::amdgcn_image_gather4_b_cl: 643 case Intrinsic::amdgcn_image_gather4_lz: 644 645 // Gather4 with comparison 646 case Intrinsic::amdgcn_image_gather4_c: 647 case Intrinsic::amdgcn_image_gather4_c_cl: 648 case Intrinsic::amdgcn_image_gather4_c_l: 649 case Intrinsic::amdgcn_image_gather4_c_b: 650 case Intrinsic::amdgcn_image_gather4_c_b_cl: 651 case Intrinsic::amdgcn_image_gather4_c_lz: 652 653 // Gather4 with offsets 654 case Intrinsic::amdgcn_image_gather4_o: 655 case Intrinsic::amdgcn_image_gather4_cl_o: 656 case Intrinsic::amdgcn_image_gather4_l_o: 657 case Intrinsic::amdgcn_image_gather4_b_o: 658 case Intrinsic::amdgcn_image_gather4_b_cl_o: 659 case Intrinsic::amdgcn_image_gather4_lz_o: 660 661 // Gather4 with comparison and offsets 662 case Intrinsic::amdgcn_image_gather4_c_o: 663 case Intrinsic::amdgcn_image_gather4_c_cl_o: 664 case Intrinsic::amdgcn_image_gather4_c_l_o: 665 case Intrinsic::amdgcn_image_gather4_c_b_o: 666 case Intrinsic::amdgcn_image_gather4_c_b_cl_o: 667 case Intrinsic::amdgcn_image_gather4_c_lz_o: { 668 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 669 Info.opc = ISD::INTRINSIC_W_CHAIN; 670 Info.memVT = MVT::getVT(CI.getType()); 671 Info.ptrVal = MFI->getImagePSV( 672 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 673 CI.getArgOperand(1)); 674 Info.align = 0; 675 Info.flags = MachineMemOperand::MOLoad | 676 MachineMemOperand::MODereferenceable; 677 return true; 678 } 679 case Intrinsic::amdgcn_image_store: 680 case Intrinsic::amdgcn_image_store_mip: { 681 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 682 Info.opc = ISD::INTRINSIC_VOID; 683 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType()); 684 Info.ptrVal = MFI->getImagePSV( 685 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 686 CI.getArgOperand(2)); 687 Info.flags = MachineMemOperand::MOStore | 688 MachineMemOperand::MODereferenceable; 689 Info.align = 0; 690 return true; 691 } 692 case Intrinsic::amdgcn_image_atomic_swap: 693 case Intrinsic::amdgcn_image_atomic_add: 694 case Intrinsic::amdgcn_image_atomic_sub: 695 case Intrinsic::amdgcn_image_atomic_smin: 696 case Intrinsic::amdgcn_image_atomic_umin: 697 case Intrinsic::amdgcn_image_atomic_smax: 698 case Intrinsic::amdgcn_image_atomic_umax: 699 case Intrinsic::amdgcn_image_atomic_and: 700 case Intrinsic::amdgcn_image_atomic_or: 701 case Intrinsic::amdgcn_image_atomic_xor: 702 case Intrinsic::amdgcn_image_atomic_inc: 703 case Intrinsic::amdgcn_image_atomic_dec: { 704 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 705 Info.opc = ISD::INTRINSIC_W_CHAIN; 706 Info.memVT = MVT::getVT(CI.getType()); 707 Info.ptrVal = MFI->getImagePSV( 708 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 709 CI.getArgOperand(2)); 710 711 Info.flags = MachineMemOperand::MOLoad | 712 MachineMemOperand::MOStore | 713 MachineMemOperand::MODereferenceable; 714 715 // XXX - Should this be volatile without known ordering? 716 Info.flags |= MachineMemOperand::MOVolatile; 717 return true; 718 } 719 case Intrinsic::amdgcn_image_atomic_cmpswap: { 720 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 721 Info.opc = ISD::INTRINSIC_W_CHAIN; 722 Info.memVT = MVT::getVT(CI.getType()); 723 Info.ptrVal = MFI->getImagePSV( 724 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 725 CI.getArgOperand(3)); 726 727 Info.flags = MachineMemOperand::MOLoad | 728 MachineMemOperand::MOStore | 729 MachineMemOperand::MODereferenceable; 730 731 // XXX - Should this be volatile without known ordering? 732 Info.flags |= MachineMemOperand::MOVolatile; 733 return true; 734 } 735 case Intrinsic::amdgcn_tbuffer_load: 736 case Intrinsic::amdgcn_buffer_load: 737 case Intrinsic::amdgcn_buffer_load_format: { 738 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 739 Info.opc = ISD::INTRINSIC_W_CHAIN; 740 Info.ptrVal = MFI->getBufferPSV( 741 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 742 CI.getArgOperand(0)); 743 Info.memVT = MVT::getVT(CI.getType()); 744 Info.flags = MachineMemOperand::MOLoad | 745 MachineMemOperand::MODereferenceable; 746 747 // There is a constant offset component, but there are additional register 748 // offsets which could break AA if we set the offset to anything non-0. 749 return true; 750 } 751 case Intrinsic::amdgcn_tbuffer_store: 752 case Intrinsic::amdgcn_buffer_store: 753 case Intrinsic::amdgcn_buffer_store_format: { 754 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 755 Info.opc = ISD::INTRINSIC_VOID; 756 Info.ptrVal = MFI->getBufferPSV( 757 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 758 CI.getArgOperand(1)); 759 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType()); 760 Info.flags = MachineMemOperand::MOStore | 761 MachineMemOperand::MODereferenceable; 762 return true; 763 } 764 case Intrinsic::amdgcn_buffer_atomic_swap: 765 case Intrinsic::amdgcn_buffer_atomic_add: 766 case Intrinsic::amdgcn_buffer_atomic_sub: 767 case Intrinsic::amdgcn_buffer_atomic_smin: 768 case Intrinsic::amdgcn_buffer_atomic_umin: 769 case Intrinsic::amdgcn_buffer_atomic_smax: 770 case Intrinsic::amdgcn_buffer_atomic_umax: 771 case Intrinsic::amdgcn_buffer_atomic_and: 772 case Intrinsic::amdgcn_buffer_atomic_or: 773 case Intrinsic::amdgcn_buffer_atomic_xor: { 774 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 775 Info.opc = ISD::INTRINSIC_W_CHAIN; 776 Info.ptrVal = MFI->getBufferPSV( 777 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 778 CI.getArgOperand(1)); 779 Info.memVT = MVT::getVT(CI.getType()); 780 Info.flags = MachineMemOperand::MOLoad | 781 MachineMemOperand::MOStore | 782 MachineMemOperand::MODereferenceable | 783 MachineMemOperand::MOVolatile; 784 return true; 785 } 786 case Intrinsic::amdgcn_buffer_atomic_cmpswap: { 787 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 788 Info.opc = ISD::INTRINSIC_W_CHAIN; 789 Info.ptrVal = MFI->getBufferPSV( 790 *MF.getSubtarget<SISubtarget>().getInstrInfo(), 791 CI.getArgOperand(2)); 792 Info.memVT = MVT::getVT(CI.getType()); 793 Info.flags = MachineMemOperand::MOLoad | 794 MachineMemOperand::MOStore | 795 MachineMemOperand::MODereferenceable | 796 MachineMemOperand::MOVolatile; 797 return true; 798 } 799 default: 800 return false; 801 } 802 } 803 804 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II, 805 SmallVectorImpl<Value*> &Ops, 806 Type *&AccessTy) const { 807 switch (II->getIntrinsicID()) { 808 case Intrinsic::amdgcn_atomic_inc: 809 case Intrinsic::amdgcn_atomic_dec: 810 case Intrinsic::amdgcn_atomic_fadd: 811 case Intrinsic::amdgcn_atomic_fmin: 812 case Intrinsic::amdgcn_atomic_fmax: { 813 Value *Ptr = II->getArgOperand(0); 814 AccessTy = II->getType(); 815 Ops.push_back(Ptr); 816 return true; 817 } 818 default: 819 return false; 820 } 821 } 822 823 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { 824 if (!Subtarget->hasFlatInstOffsets()) { 825 // Flat instructions do not have offsets, and only have the register 826 // address. 827 return AM.BaseOffs == 0 && AM.Scale == 0; 828 } 829 830 // GFX9 added a 13-bit signed offset. When using regular flat instructions, 831 // the sign bit is ignored and is treated as a 12-bit unsigned offset. 832 833 // Just r + i 834 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0; 835 } 836 837 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const { 838 if (Subtarget->hasFlatGlobalInsts()) 839 return isInt<13>(AM.BaseOffs) && AM.Scale == 0; 840 841 if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) { 842 // Assume the we will use FLAT for all global memory accesses 843 // on VI. 844 // FIXME: This assumption is currently wrong. On VI we still use 845 // MUBUF instructions for the r + i addressing mode. As currently 846 // implemented, the MUBUF instructions only work on buffer < 4GB. 847 // It may be possible to support > 4GB buffers with MUBUF instructions, 848 // by setting the stride value in the resource descriptor which would 849 // increase the size limit to (stride * 4GB). However, this is risky, 850 // because it has never been validated. 851 return isLegalFlatAddressingMode(AM); 852 } 853 854 return isLegalMUBUFAddressingMode(AM); 855 } 856 857 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { 858 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and 859 // additionally can do r + r + i with addr64. 32-bit has more addressing 860 // mode options. Depending on the resource constant, it can also do 861 // (i64 r0) + (i32 r1) * (i14 i). 862 // 863 // Private arrays end up using a scratch buffer most of the time, so also 864 // assume those use MUBUF instructions. Scratch loads / stores are currently 865 // implemented as mubuf instructions with offen bit set, so slightly 866 // different than the normal addr64. 867 if (!isUInt<12>(AM.BaseOffs)) 868 return false; 869 870 // FIXME: Since we can split immediate into soffset and immediate offset, 871 // would it make sense to allow any immediate? 872 873 switch (AM.Scale) { 874 case 0: // r + i or just i, depending on HasBaseReg. 875 return true; 876 case 1: 877 return true; // We have r + r or r + i. 878 case 2: 879 if (AM.HasBaseReg) { 880 // Reject 2 * r + r. 881 return false; 882 } 883 884 // Allow 2 * r as r + r 885 // Or 2 * r + i is allowed as r + r + i. 886 return true; 887 default: // Don't allow n * r 888 return false; 889 } 890 } 891 892 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, 893 const AddrMode &AM, Type *Ty, 894 unsigned AS, Instruction *I) const { 895 // No global is ever allowed as a base. 896 if (AM.BaseGV) 897 return false; 898 899 if (AS == AMDGPUASI.GLOBAL_ADDRESS) 900 return isLegalGlobalAddressingMode(AM); 901 902 if (AS == AMDGPUASI.CONSTANT_ADDRESS) { 903 // If the offset isn't a multiple of 4, it probably isn't going to be 904 // correctly aligned. 905 // FIXME: Can we get the real alignment here? 906 if (AM.BaseOffs % 4 != 0) 907 return isLegalMUBUFAddressingMode(AM); 908 909 // There are no SMRD extloads, so if we have to do a small type access we 910 // will use a MUBUF load. 911 // FIXME?: We also need to do this if unaligned, but we don't know the 912 // alignment here. 913 if (DL.getTypeStoreSize(Ty) < 4) 914 return isLegalGlobalAddressingMode(AM); 915 916 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 917 // SMRD instructions have an 8-bit, dword offset on SI. 918 if (!isUInt<8>(AM.BaseOffs / 4)) 919 return false; 920 } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) { 921 // On CI+, this can also be a 32-bit literal constant offset. If it fits 922 // in 8-bits, it can use a smaller encoding. 923 if (!isUInt<32>(AM.BaseOffs / 4)) 924 return false; 925 } else if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 926 // On VI, these use the SMEM format and the offset is 20-bit in bytes. 927 if (!isUInt<20>(AM.BaseOffs)) 928 return false; 929 } else 930 llvm_unreachable("unhandled generation"); 931 932 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 933 return true; 934 935 if (AM.Scale == 1 && AM.HasBaseReg) 936 return true; 937 938 return false; 939 940 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 941 return isLegalMUBUFAddressingMode(AM); 942 } else if (AS == AMDGPUASI.LOCAL_ADDRESS || 943 AS == AMDGPUASI.REGION_ADDRESS) { 944 // Basic, single offset DS instructions allow a 16-bit unsigned immediate 945 // field. 946 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have 947 // an 8-bit dword offset but we don't know the alignment here. 948 if (!isUInt<16>(AM.BaseOffs)) 949 return false; 950 951 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 952 return true; 953 954 if (AM.Scale == 1 && AM.HasBaseReg) 955 return true; 956 957 return false; 958 } else if (AS == AMDGPUASI.FLAT_ADDRESS || 959 AS == AMDGPUASI.UNKNOWN_ADDRESS_SPACE) { 960 // For an unknown address space, this usually means that this is for some 961 // reason being used for pure arithmetic, and not based on some addressing 962 // computation. We don't have instructions that compute pointers with any 963 // addressing modes, so treat them as having no offset like flat 964 // instructions. 965 return isLegalFlatAddressingMode(AM); 966 } else { 967 llvm_unreachable("unhandled address space"); 968 } 969 } 970 971 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, 972 const SelectionDAG &DAG) const { 973 if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) { 974 return (MemVT.getSizeInBits() <= 4 * 32); 975 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 976 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize(); 977 return (MemVT.getSizeInBits() <= MaxPrivateBits); 978 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 979 return (MemVT.getSizeInBits() <= 2 * 32); 980 } 981 return true; 982 } 983 984 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 985 unsigned AddrSpace, 986 unsigned Align, 987 bool *IsFast) const { 988 if (IsFast) 989 *IsFast = false; 990 991 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, 992 // which isn't a simple VT. 993 // Until MVT is extended to handle this, simply check for the size and 994 // rely on the condition below: allow accesses if the size is a multiple of 4. 995 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && 996 VT.getStoreSize() > 16)) { 997 return false; 998 } 999 1000 if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS || 1001 AddrSpace == AMDGPUASI.REGION_ADDRESS) { 1002 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte 1003 // aligned, 8 byte access in a single operation using ds_read2/write2_b32 1004 // with adjacent offsets. 1005 bool AlignedBy4 = (Align % 4 == 0); 1006 if (IsFast) 1007 *IsFast = AlignedBy4; 1008 1009 return AlignedBy4; 1010 } 1011 1012 // FIXME: We have to be conservative here and assume that flat operations 1013 // will access scratch. If we had access to the IR function, then we 1014 // could determine if any private memory was used in the function. 1015 if (!Subtarget->hasUnalignedScratchAccess() && 1016 (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS || 1017 AddrSpace == AMDGPUASI.FLAT_ADDRESS)) { 1018 return false; 1019 } 1020 1021 if (Subtarget->hasUnalignedBufferAccess()) { 1022 // If we have an uniform constant load, it still requires using a slow 1023 // buffer instruction if unaligned. 1024 if (IsFast) { 1025 *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS) ? 1026 (Align % 4 == 0) : true; 1027 } 1028 1029 return true; 1030 } 1031 1032 // Smaller than dword value must be aligned. 1033 if (VT.bitsLT(MVT::i32)) 1034 return false; 1035 1036 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the 1037 // byte-address are ignored, thus forcing Dword alignment. 1038 // This applies to private, global, and constant memory. 1039 if (IsFast) 1040 *IsFast = true; 1041 1042 return VT.bitsGT(MVT::i32) && Align % 4 == 0; 1043 } 1044 1045 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, 1046 unsigned SrcAlign, bool IsMemset, 1047 bool ZeroMemset, 1048 bool MemcpyStrSrc, 1049 MachineFunction &MF) const { 1050 // FIXME: Should account for address space here. 1051 1052 // The default fallback uses the private pointer size as a guess for a type to 1053 // use. Make sure we switch these to 64-bit accesses. 1054 1055 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global 1056 return MVT::v4i32; 1057 1058 if (Size >= 8 && DstAlign >= 4) 1059 return MVT::v2i32; 1060 1061 // Use the default. 1062 return MVT::Other; 1063 } 1064 1065 static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) { 1066 return AS == AMDGPUASI.GLOBAL_ADDRESS || 1067 AS == AMDGPUASI.FLAT_ADDRESS || 1068 AS == AMDGPUASI.CONSTANT_ADDRESS; 1069 } 1070 1071 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, 1072 unsigned DestAS) const { 1073 return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) && 1074 isFlatGlobalAddrSpace(DestAS, AMDGPUASI); 1075 } 1076 1077 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { 1078 const MemSDNode *MemNode = cast<MemSDNode>(N); 1079 const Value *Ptr = MemNode->getMemOperand()->getValue(); 1080 const Instruction *I = dyn_cast<Instruction>(Ptr); 1081 return I && I->getMetadata("amdgpu.noclobber"); 1082 } 1083 1084 bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS, 1085 unsigned DestAS) const { 1086 // Flat -> private/local is a simple truncate. 1087 // Flat -> global is no-op 1088 if (SrcAS == AMDGPUASI.FLAT_ADDRESS) 1089 return true; 1090 1091 return isNoopAddrSpaceCast(SrcAS, DestAS); 1092 } 1093 1094 bool SITargetLowering::isMemOpUniform(const SDNode *N) const { 1095 const MemSDNode *MemNode = cast<MemSDNode>(N); 1096 1097 return AMDGPU::isUniformMMO(MemNode->getMemOperand()); 1098 } 1099 1100 TargetLoweringBase::LegalizeTypeAction 1101 SITargetLowering::getPreferredVectorAction(EVT VT) const { 1102 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) 1103 return TypeSplitVector; 1104 1105 return TargetLoweringBase::getPreferredVectorAction(VT); 1106 } 1107 1108 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 1109 Type *Ty) const { 1110 // FIXME: Could be smarter if called for vector constants. 1111 return true; 1112 } 1113 1114 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { 1115 if (Subtarget->has16BitInsts() && VT == MVT::i16) { 1116 switch (Op) { 1117 case ISD::LOAD: 1118 case ISD::STORE: 1119 1120 // These operations are done with 32-bit instructions anyway. 1121 case ISD::AND: 1122 case ISD::OR: 1123 case ISD::XOR: 1124 case ISD::SELECT: 1125 // TODO: Extensions? 1126 return true; 1127 default: 1128 return false; 1129 } 1130 } 1131 1132 // SimplifySetCC uses this function to determine whether or not it should 1133 // create setcc with i1 operands. We don't have instructions for i1 setcc. 1134 if (VT == MVT::i1 && Op == ISD::SETCC) 1135 return false; 1136 1137 return TargetLowering::isTypeDesirableForOp(Op, VT); 1138 } 1139 1140 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG, 1141 const SDLoc &SL, 1142 SDValue Chain, 1143 uint64_t Offset) const { 1144 const DataLayout &DL = DAG.getDataLayout(); 1145 MachineFunction &MF = DAG.getMachineFunction(); 1146 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1147 1148 const ArgDescriptor *InputPtrReg; 1149 const TargetRegisterClass *RC; 1150 1151 std::tie(InputPtrReg, RC) 1152 = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 1153 1154 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1155 MVT PtrVT = getPointerTy(DL, AMDGPUASI.CONSTANT_ADDRESS); 1156 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, 1157 MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT); 1158 1159 return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, 1160 DAG.getConstant(Offset, SL, PtrVT)); 1161 } 1162 1163 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG, 1164 const SDLoc &SL) const { 1165 auto MFI = DAG.getMachineFunction().getInfo<SIMachineFunctionInfo>(); 1166 uint64_t Offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT); 1167 return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset); 1168 } 1169 1170 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, 1171 const SDLoc &SL, SDValue Val, 1172 bool Signed, 1173 const ISD::InputArg *Arg) const { 1174 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && 1175 VT.bitsLT(MemVT)) { 1176 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; 1177 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); 1178 } 1179 1180 if (MemVT.isFloatingPoint()) 1181 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT); 1182 else if (Signed) 1183 Val = DAG.getSExtOrTrunc(Val, SL, VT); 1184 else 1185 Val = DAG.getZExtOrTrunc(Val, SL, VT); 1186 1187 return Val; 1188 } 1189 1190 SDValue SITargetLowering::lowerKernargMemParameter( 1191 SelectionDAG &DAG, EVT VT, EVT MemVT, 1192 const SDLoc &SL, SDValue Chain, 1193 uint64_t Offset, bool Signed, 1194 const ISD::InputArg *Arg) const { 1195 const DataLayout &DL = DAG.getDataLayout(); 1196 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 1197 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS); 1198 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 1199 1200 unsigned Align = DL.getABITypeAlignment(Ty); 1201 1202 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset); 1203 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align, 1204 MachineMemOperand::MONonTemporal | 1205 MachineMemOperand::MODereferenceable | 1206 MachineMemOperand::MOInvariant); 1207 1208 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg); 1209 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); 1210 } 1211 1212 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA, 1213 const SDLoc &SL, SDValue Chain, 1214 const ISD::InputArg &Arg) const { 1215 MachineFunction &MF = DAG.getMachineFunction(); 1216 MachineFrameInfo &MFI = MF.getFrameInfo(); 1217 1218 if (Arg.Flags.isByVal()) { 1219 unsigned Size = Arg.Flags.getByValSize(); 1220 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false); 1221 return DAG.getFrameIndex(FrameIdx, MVT::i32); 1222 } 1223 1224 unsigned ArgOffset = VA.getLocMemOffset(); 1225 unsigned ArgSize = VA.getValVT().getStoreSize(); 1226 1227 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true); 1228 1229 // Create load nodes to retrieve arguments from the stack. 1230 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1231 SDValue ArgValue; 1232 1233 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) 1234 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 1235 MVT MemVT = VA.getValVT(); 1236 1237 switch (VA.getLocInfo()) { 1238 default: 1239 break; 1240 case CCValAssign::BCvt: 1241 MemVT = VA.getLocVT(); 1242 break; 1243 case CCValAssign::SExt: 1244 ExtType = ISD::SEXTLOAD; 1245 break; 1246 case CCValAssign::ZExt: 1247 ExtType = ISD::ZEXTLOAD; 1248 break; 1249 case CCValAssign::AExt: 1250 ExtType = ISD::EXTLOAD; 1251 break; 1252 } 1253 1254 ArgValue = DAG.getExtLoad( 1255 ExtType, SL, VA.getLocVT(), Chain, FIN, 1256 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), 1257 MemVT); 1258 return ArgValue; 1259 } 1260 1261 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG, 1262 const SIMachineFunctionInfo &MFI, 1263 EVT VT, 1264 AMDGPUFunctionArgInfo::PreloadedValue PVID) const { 1265 const ArgDescriptor *Reg; 1266 const TargetRegisterClass *RC; 1267 1268 std::tie(Reg, RC) = MFI.getPreloadedValue(PVID); 1269 return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT); 1270 } 1271 1272 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits, 1273 CallingConv::ID CallConv, 1274 ArrayRef<ISD::InputArg> Ins, 1275 BitVector &Skipped, 1276 FunctionType *FType, 1277 SIMachineFunctionInfo *Info) { 1278 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) { 1279 const ISD::InputArg &Arg = Ins[I]; 1280 1281 // First check if it's a PS input addr. 1282 if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() && 1283 !Arg.Flags.isByVal() && PSInputNum <= 15) { 1284 1285 if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) { 1286 // We can safely skip PS inputs. 1287 Skipped.set(I); 1288 ++PSInputNum; 1289 continue; 1290 } 1291 1292 Info->markPSInputAllocated(PSInputNum); 1293 if (Arg.Used) 1294 Info->markPSInputEnabled(PSInputNum); 1295 1296 ++PSInputNum; 1297 } 1298 1299 // Second split vertices into their elements. 1300 if (Arg.VT.isVector()) { 1301 ISD::InputArg NewArg = Arg; 1302 NewArg.Flags.setSplit(); 1303 NewArg.VT = Arg.VT.getVectorElementType(); 1304 1305 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a 1306 // three or five element vertex only needs three or five registers, 1307 // NOT four or eight. 1308 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 1309 unsigned NumElements = ParamType->getVectorNumElements(); 1310 1311 for (unsigned J = 0; J != NumElements; ++J) { 1312 Splits.push_back(NewArg); 1313 NewArg.PartOffset += NewArg.VT.getStoreSize(); 1314 } 1315 } else { 1316 Splits.push_back(Arg); 1317 } 1318 } 1319 } 1320 1321 // Allocate special inputs passed in VGPRs. 1322 static void allocateSpecialEntryInputVGPRs(CCState &CCInfo, 1323 MachineFunction &MF, 1324 const SIRegisterInfo &TRI, 1325 SIMachineFunctionInfo &Info) { 1326 if (Info.hasWorkItemIDX()) { 1327 unsigned Reg = AMDGPU::VGPR0; 1328 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1329 1330 CCInfo.AllocateReg(Reg); 1331 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg)); 1332 } 1333 1334 if (Info.hasWorkItemIDY()) { 1335 unsigned Reg = AMDGPU::VGPR1; 1336 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1337 1338 CCInfo.AllocateReg(Reg); 1339 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg)); 1340 } 1341 1342 if (Info.hasWorkItemIDZ()) { 1343 unsigned Reg = AMDGPU::VGPR2; 1344 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1345 1346 CCInfo.AllocateReg(Reg); 1347 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg)); 1348 } 1349 } 1350 1351 // Try to allocate a VGPR at the end of the argument list, or if no argument 1352 // VGPRs are left allocating a stack slot. 1353 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo) { 1354 ArrayRef<MCPhysReg> ArgVGPRs 1355 = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32); 1356 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs); 1357 if (RegIdx == ArgVGPRs.size()) { 1358 // Spill to stack required. 1359 int64_t Offset = CCInfo.AllocateStack(4, 4); 1360 1361 return ArgDescriptor::createStack(Offset); 1362 } 1363 1364 unsigned Reg = ArgVGPRs[RegIdx]; 1365 Reg = CCInfo.AllocateReg(Reg); 1366 assert(Reg != AMDGPU::NoRegister); 1367 1368 MachineFunction &MF = CCInfo.getMachineFunction(); 1369 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1370 return ArgDescriptor::createRegister(Reg); 1371 } 1372 1373 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo, 1374 const TargetRegisterClass *RC, 1375 unsigned NumArgRegs) { 1376 ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32); 1377 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs); 1378 if (RegIdx == ArgSGPRs.size()) 1379 report_fatal_error("ran out of SGPRs for arguments"); 1380 1381 unsigned Reg = ArgSGPRs[RegIdx]; 1382 Reg = CCInfo.AllocateReg(Reg); 1383 assert(Reg != AMDGPU::NoRegister); 1384 1385 MachineFunction &MF = CCInfo.getMachineFunction(); 1386 MF.addLiveIn(Reg, RC); 1387 return ArgDescriptor::createRegister(Reg); 1388 } 1389 1390 static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) { 1391 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32); 1392 } 1393 1394 static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) { 1395 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16); 1396 } 1397 1398 static void allocateSpecialInputVGPRs(CCState &CCInfo, 1399 MachineFunction &MF, 1400 const SIRegisterInfo &TRI, 1401 SIMachineFunctionInfo &Info) { 1402 if (Info.hasWorkItemIDX()) 1403 Info.setWorkItemIDX(allocateVGPR32Input(CCInfo)); 1404 1405 if (Info.hasWorkItemIDY()) 1406 Info.setWorkItemIDY(allocateVGPR32Input(CCInfo)); 1407 1408 if (Info.hasWorkItemIDZ()) 1409 Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo)); 1410 } 1411 1412 static void allocateSpecialInputSGPRs(CCState &CCInfo, 1413 MachineFunction &MF, 1414 const SIRegisterInfo &TRI, 1415 SIMachineFunctionInfo &Info) { 1416 auto &ArgInfo = Info.getArgInfo(); 1417 1418 // TODO: Unify handling with private memory pointers. 1419 1420 if (Info.hasDispatchPtr()) 1421 ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo); 1422 1423 if (Info.hasQueuePtr()) 1424 ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo); 1425 1426 if (Info.hasKernargSegmentPtr()) 1427 ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo); 1428 1429 if (Info.hasDispatchID()) 1430 ArgInfo.DispatchID = allocateSGPR64Input(CCInfo); 1431 1432 // flat_scratch_init is not applicable for non-kernel functions. 1433 1434 if (Info.hasWorkGroupIDX()) 1435 ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo); 1436 1437 if (Info.hasWorkGroupIDY()) 1438 ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo); 1439 1440 if (Info.hasWorkGroupIDZ()) 1441 ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo); 1442 1443 if (Info.hasImplicitArgPtr()) 1444 ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo); 1445 } 1446 1447 // Allocate special inputs passed in user SGPRs. 1448 static void allocateHSAUserSGPRs(CCState &CCInfo, 1449 MachineFunction &MF, 1450 const SIRegisterInfo &TRI, 1451 SIMachineFunctionInfo &Info) { 1452 if (Info.hasImplicitBufferPtr()) { 1453 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI); 1454 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 1455 CCInfo.AllocateReg(ImplicitBufferPtrReg); 1456 } 1457 1458 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 1459 if (Info.hasPrivateSegmentBuffer()) { 1460 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 1461 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 1462 CCInfo.AllocateReg(PrivateSegmentBufferReg); 1463 } 1464 1465 if (Info.hasDispatchPtr()) { 1466 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI); 1467 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 1468 CCInfo.AllocateReg(DispatchPtrReg); 1469 } 1470 1471 if (Info.hasQueuePtr()) { 1472 unsigned QueuePtrReg = Info.addQueuePtr(TRI); 1473 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 1474 CCInfo.AllocateReg(QueuePtrReg); 1475 } 1476 1477 if (Info.hasKernargSegmentPtr()) { 1478 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI); 1479 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); 1480 CCInfo.AllocateReg(InputPtrReg); 1481 } 1482 1483 if (Info.hasDispatchID()) { 1484 unsigned DispatchIDReg = Info.addDispatchID(TRI); 1485 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 1486 CCInfo.AllocateReg(DispatchIDReg); 1487 } 1488 1489 if (Info.hasFlatScratchInit()) { 1490 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI); 1491 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 1492 CCInfo.AllocateReg(FlatScratchInitReg); 1493 } 1494 1495 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 1496 // these from the dispatch pointer. 1497 } 1498 1499 // Allocate special input registers that are initialized per-wave. 1500 static void allocateSystemSGPRs(CCState &CCInfo, 1501 MachineFunction &MF, 1502 SIMachineFunctionInfo &Info, 1503 CallingConv::ID CallConv, 1504 bool IsShader) { 1505 if (Info.hasWorkGroupIDX()) { 1506 unsigned Reg = Info.addWorkGroupIDX(); 1507 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1508 CCInfo.AllocateReg(Reg); 1509 } 1510 1511 if (Info.hasWorkGroupIDY()) { 1512 unsigned Reg = Info.addWorkGroupIDY(); 1513 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1514 CCInfo.AllocateReg(Reg); 1515 } 1516 1517 if (Info.hasWorkGroupIDZ()) { 1518 unsigned Reg = Info.addWorkGroupIDZ(); 1519 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1520 CCInfo.AllocateReg(Reg); 1521 } 1522 1523 if (Info.hasWorkGroupInfo()) { 1524 unsigned Reg = Info.addWorkGroupInfo(); 1525 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1526 CCInfo.AllocateReg(Reg); 1527 } 1528 1529 if (Info.hasPrivateSegmentWaveByteOffset()) { 1530 // Scratch wave offset passed in system SGPR. 1531 unsigned PrivateSegmentWaveByteOffsetReg; 1532 1533 if (IsShader) { 1534 PrivateSegmentWaveByteOffsetReg = 1535 Info.getPrivateSegmentWaveByteOffsetSystemSGPR(); 1536 1537 // This is true if the scratch wave byte offset doesn't have a fixed 1538 // location. 1539 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) { 1540 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); 1541 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); 1542 } 1543 } else 1544 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset(); 1545 1546 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); 1547 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); 1548 } 1549 } 1550 1551 static void reservePrivateMemoryRegs(const TargetMachine &TM, 1552 MachineFunction &MF, 1553 const SIRegisterInfo &TRI, 1554 SIMachineFunctionInfo &Info) { 1555 // Now that we've figured out where the scratch register inputs are, see if 1556 // should reserve the arguments and use them directly. 1557 MachineFrameInfo &MFI = MF.getFrameInfo(); 1558 bool HasStackObjects = MFI.hasStackObjects(); 1559 1560 // Record that we know we have non-spill stack objects so we don't need to 1561 // check all stack objects later. 1562 if (HasStackObjects) 1563 Info.setHasNonSpillStackObjects(true); 1564 1565 // Everything live out of a block is spilled with fast regalloc, so it's 1566 // almost certain that spilling will be required. 1567 if (TM.getOptLevel() == CodeGenOpt::None) 1568 HasStackObjects = true; 1569 1570 // For now assume stack access is needed in any callee functions, so we need 1571 // the scratch registers to pass in. 1572 bool RequiresStackAccess = HasStackObjects || MFI.hasCalls(); 1573 1574 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 1575 if (ST.isAmdCodeObjectV2(MF)) { 1576 if (RequiresStackAccess) { 1577 // If we have stack objects, we unquestionably need the private buffer 1578 // resource. For the Code Object V2 ABI, this will be the first 4 user 1579 // SGPR inputs. We can reserve those and use them directly. 1580 1581 unsigned PrivateSegmentBufferReg = Info.getPreloadedReg( 1582 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 1583 Info.setScratchRSrcReg(PrivateSegmentBufferReg); 1584 1585 if (MFI.hasCalls()) { 1586 // If we have calls, we need to keep the frame register in a register 1587 // that won't be clobbered by a call, so ensure it is copied somewhere. 1588 1589 // This is not a problem for the scratch wave offset, because the same 1590 // registers are reserved in all functions. 1591 1592 // FIXME: Nothing is really ensuring this is a call preserved register, 1593 // it's just selected from the end so it happens to be. 1594 unsigned ReservedOffsetReg 1595 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1596 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1597 } else { 1598 unsigned PrivateSegmentWaveByteOffsetReg = Info.getPreloadedReg( 1599 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1600 Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg); 1601 } 1602 } else { 1603 unsigned ReservedBufferReg 1604 = TRI.reservedPrivateSegmentBufferReg(MF); 1605 unsigned ReservedOffsetReg 1606 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1607 1608 // We tentatively reserve the last registers (skipping the last two 1609 // which may contain VCC). After register allocation, we'll replace 1610 // these with the ones immediately after those which were really 1611 // allocated. In the prologue copies will be inserted from the argument 1612 // to these reserved registers. 1613 Info.setScratchRSrcReg(ReservedBufferReg); 1614 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1615 } 1616 } else { 1617 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF); 1618 1619 // Without HSA, relocations are used for the scratch pointer and the 1620 // buffer resource setup is always inserted in the prologue. Scratch wave 1621 // offset is still in an input SGPR. 1622 Info.setScratchRSrcReg(ReservedBufferReg); 1623 1624 if (HasStackObjects && !MFI.hasCalls()) { 1625 unsigned ScratchWaveOffsetReg = Info.getPreloadedReg( 1626 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1627 Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg); 1628 } else { 1629 unsigned ReservedOffsetReg 1630 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1631 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1632 } 1633 } 1634 } 1635 1636 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const { 1637 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1638 return !Info->isEntryFunction(); 1639 } 1640 1641 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 1642 1643 } 1644 1645 void SITargetLowering::insertCopiesSplitCSR( 1646 MachineBasicBlock *Entry, 1647 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 1648 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 1649 1650 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 1651 if (!IStart) 1652 return; 1653 1654 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 1655 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 1656 MachineBasicBlock::iterator MBBI = Entry->begin(); 1657 for (const MCPhysReg *I = IStart; *I; ++I) { 1658 const TargetRegisterClass *RC = nullptr; 1659 if (AMDGPU::SReg_64RegClass.contains(*I)) 1660 RC = &AMDGPU::SGPR_64RegClass; 1661 else if (AMDGPU::SReg_32RegClass.contains(*I)) 1662 RC = &AMDGPU::SGPR_32RegClass; 1663 else 1664 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 1665 1666 unsigned NewVR = MRI->createVirtualRegister(RC); 1667 // Create copy from CSR to a virtual register. 1668 Entry->addLiveIn(*I); 1669 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 1670 .addReg(*I); 1671 1672 // Insert the copy-back instructions right before the terminator. 1673 for (auto *Exit : Exits) 1674 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 1675 TII->get(TargetOpcode::COPY), *I) 1676 .addReg(NewVR); 1677 } 1678 } 1679 1680 SDValue SITargetLowering::LowerFormalArguments( 1681 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1682 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1683 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1684 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 1685 1686 MachineFunction &MF = DAG.getMachineFunction(); 1687 FunctionType *FType = MF.getFunction().getFunctionType(); 1688 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1689 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 1690 1691 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { 1692 const Function &Fn = MF.getFunction(); 1693 DiagnosticInfoUnsupported NoGraphicsHSA( 1694 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); 1695 DAG.getContext()->diagnose(NoGraphicsHSA); 1696 return DAG.getEntryNode(); 1697 } 1698 1699 // Create stack objects that are used for emitting debugger prologue if 1700 // "amdgpu-debugger-emit-prologue" attribute was specified. 1701 if (ST.debuggerEmitPrologue()) 1702 createDebuggerPrologueStackObjects(MF); 1703 1704 SmallVector<ISD::InputArg, 16> Splits; 1705 SmallVector<CCValAssign, 16> ArgLocs; 1706 BitVector Skipped(Ins.size()); 1707 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1708 *DAG.getContext()); 1709 1710 bool IsShader = AMDGPU::isShader(CallConv); 1711 bool IsKernel = AMDGPU::isKernel(CallConv); 1712 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv); 1713 1714 if (!IsEntryFunc) { 1715 // 4 bytes are reserved at offset 0 for the emergency stack slot. Skip over 1716 // this when allocating argument fixed offsets. 1717 CCInfo.AllocateStack(4, 4); 1718 } 1719 1720 if (IsShader) { 1721 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info); 1722 1723 // At least one interpolation mode must be enabled or else the GPU will 1724 // hang. 1725 // 1726 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 1727 // set PSInputAddr, the user wants to enable some bits after the compilation 1728 // based on run-time states. Since we can't know what the final PSInputEna 1729 // will look like, so we shouldn't do anything here and the user should take 1730 // responsibility for the correct programming. 1731 // 1732 // Otherwise, the following restrictions apply: 1733 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 1734 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 1735 // enabled too. 1736 if (CallConv == CallingConv::AMDGPU_PS) { 1737 if ((Info->getPSInputAddr() & 0x7F) == 0 || 1738 ((Info->getPSInputAddr() & 0xF) == 0 && 1739 Info->isPSInputAllocated(11))) { 1740 CCInfo.AllocateReg(AMDGPU::VGPR0); 1741 CCInfo.AllocateReg(AMDGPU::VGPR1); 1742 Info->markPSInputAllocated(0); 1743 Info->markPSInputEnabled(0); 1744 } 1745 if (Subtarget->isAmdPalOS()) { 1746 // For isAmdPalOS, the user does not enable some bits after compilation 1747 // based on run-time states; the register values being generated here are 1748 // the final ones set in hardware. Therefore we need to apply the 1749 // workaround to PSInputAddr and PSInputEnable together. (The case where 1750 // a bit is set in PSInputAddr but not PSInputEnable is where the 1751 // frontend set up an input arg for a particular interpolation mode, but 1752 // nothing uses that input arg. Really we should have an earlier pass 1753 // that removes such an arg.) 1754 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 1755 if ((PsInputBits & 0x7F) == 0 || 1756 ((PsInputBits & 0xF) == 0 && 1757 (PsInputBits >> 11 & 1))) 1758 Info->markPSInputEnabled( 1759 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 1760 } 1761 } 1762 1763 assert(!Info->hasDispatchPtr() && 1764 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && 1765 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && 1766 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && 1767 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && 1768 !Info->hasWorkItemIDZ()); 1769 } else if (IsKernel) { 1770 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); 1771 } else { 1772 Splits.append(Ins.begin(), Ins.end()); 1773 } 1774 1775 if (IsEntryFunc) { 1776 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 1777 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info); 1778 } 1779 1780 if (IsKernel) { 1781 analyzeFormalArgumentsCompute(CCInfo, Ins); 1782 } else { 1783 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg); 1784 CCInfo.AnalyzeFormalArguments(Splits, AssignFn); 1785 } 1786 1787 SmallVector<SDValue, 16> Chains; 1788 1789 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { 1790 const ISD::InputArg &Arg = Ins[i]; 1791 if (Skipped[i]) { 1792 InVals.push_back(DAG.getUNDEF(Arg.VT)); 1793 continue; 1794 } 1795 1796 CCValAssign &VA = ArgLocs[ArgIdx++]; 1797 MVT VT = VA.getLocVT(); 1798 1799 if (IsEntryFunc && VA.isMemLoc()) { 1800 VT = Ins[i].VT; 1801 EVT MemVT = VA.getLocVT(); 1802 1803 const uint64_t Offset = Subtarget->getExplicitKernelArgOffset(MF) + 1804 VA.getLocMemOffset(); 1805 Info->setABIArgOffset(Offset + MemVT.getStoreSize()); 1806 1807 // The first 36 bytes of the input buffer contains information about 1808 // thread group and global sizes. 1809 SDValue Arg = lowerKernargMemParameter( 1810 DAG, VT, MemVT, DL, Chain, Offset, Ins[i].Flags.isSExt(), &Ins[i]); 1811 Chains.push_back(Arg.getValue(1)); 1812 1813 auto *ParamTy = 1814 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); 1815 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 1816 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 1817 // On SI local pointers are just offsets into LDS, so they are always 1818 // less than 16-bits. On CI and newer they could potentially be 1819 // real pointers, so we can't guarantee their size. 1820 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, 1821 DAG.getValueType(MVT::i16)); 1822 } 1823 1824 InVals.push_back(Arg); 1825 continue; 1826 } else if (!IsEntryFunc && VA.isMemLoc()) { 1827 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg); 1828 InVals.push_back(Val); 1829 if (!Arg.Flags.isByVal()) 1830 Chains.push_back(Val.getValue(1)); 1831 continue; 1832 } 1833 1834 assert(VA.isRegLoc() && "Parameter must be in a register!"); 1835 1836 unsigned Reg = VA.getLocReg(); 1837 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 1838 EVT ValVT = VA.getValVT(); 1839 1840 Reg = MF.addLiveIn(Reg, RC); 1841 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1842 1843 if (Arg.Flags.isSRet() && !getSubtarget()->enableHugePrivateBuffer()) { 1844 // The return object should be reasonably addressable. 1845 1846 // FIXME: This helps when the return is a real sret. If it is a 1847 // automatically inserted sret (i.e. CanLowerReturn returns false), an 1848 // extra copy is inserted in SelectionDAGBuilder which obscures this. 1849 unsigned NumBits = 32 - AssumeFrameIndexHighZeroBits; 1850 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, 1851 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits))); 1852 } 1853 1854 // If this is an 8 or 16-bit value, it is really passed promoted 1855 // to 32 bits. Insert an assert[sz]ext to capture this, then 1856 // truncate to the right size. 1857 switch (VA.getLocInfo()) { 1858 case CCValAssign::Full: 1859 break; 1860 case CCValAssign::BCvt: 1861 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val); 1862 break; 1863 case CCValAssign::SExt: 1864 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val, 1865 DAG.getValueType(ValVT)); 1866 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 1867 break; 1868 case CCValAssign::ZExt: 1869 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, 1870 DAG.getValueType(ValVT)); 1871 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 1872 break; 1873 case CCValAssign::AExt: 1874 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 1875 break; 1876 default: 1877 llvm_unreachable("Unknown loc info!"); 1878 } 1879 1880 if (IsShader && Arg.VT.isVector()) { 1881 // Build a vector from the registers 1882 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 1883 unsigned NumElements = ParamType->getVectorNumElements(); 1884 1885 SmallVector<SDValue, 4> Regs; 1886 Regs.push_back(Val); 1887 for (unsigned j = 1; j != NumElements; ++j) { 1888 Reg = ArgLocs[ArgIdx++].getLocReg(); 1889 Reg = MF.addLiveIn(Reg, RC); 1890 1891 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1892 Regs.push_back(Copy); 1893 } 1894 1895 // Fill up the missing vector elements 1896 NumElements = Arg.VT.getVectorNumElements() - NumElements; 1897 Regs.append(NumElements, DAG.getUNDEF(VT)); 1898 1899 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs)); 1900 continue; 1901 } 1902 1903 InVals.push_back(Val); 1904 } 1905 1906 if (!IsEntryFunc) { 1907 // Special inputs come after user arguments. 1908 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); 1909 } 1910 1911 // Start adding system SGPRs. 1912 if (IsEntryFunc) { 1913 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader); 1914 } else { 1915 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 1916 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg()); 1917 CCInfo.AllocateReg(Info->getFrameOffsetReg()); 1918 allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 1919 } 1920 1921 auto &ArgUsageInfo = 1922 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); 1923 ArgUsageInfo.setFuncArgInfo(MF.getFunction(), Info->getArgInfo()); 1924 1925 unsigned StackArgSize = CCInfo.getNextStackOffset(); 1926 Info->setBytesInStackArgArea(StackArgSize); 1927 1928 return Chains.empty() ? Chain : 1929 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 1930 } 1931 1932 // TODO: If return values can't fit in registers, we should return as many as 1933 // possible in registers before passing on stack. 1934 bool SITargetLowering::CanLowerReturn( 1935 CallingConv::ID CallConv, 1936 MachineFunction &MF, bool IsVarArg, 1937 const SmallVectorImpl<ISD::OutputArg> &Outs, 1938 LLVMContext &Context) const { 1939 // Replacing returns with sret/stack usage doesn't make sense for shaders. 1940 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn 1941 // for shaders. Vector types should be explicitly handled by CC. 1942 if (AMDGPU::isEntryFunctionCC(CallConv)) 1943 return true; 1944 1945 SmallVector<CCValAssign, 16> RVLocs; 1946 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 1947 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); 1948 } 1949 1950 SDValue 1951 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1952 bool isVarArg, 1953 const SmallVectorImpl<ISD::OutputArg> &Outs, 1954 const SmallVectorImpl<SDValue> &OutVals, 1955 const SDLoc &DL, SelectionDAG &DAG) const { 1956 MachineFunction &MF = DAG.getMachineFunction(); 1957 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1958 1959 if (AMDGPU::isKernel(CallConv)) { 1960 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, 1961 OutVals, DL, DAG); 1962 } 1963 1964 bool IsShader = AMDGPU::isShader(CallConv); 1965 1966 Info->setIfReturnsVoid(Outs.size() == 0); 1967 bool IsWaveEnd = Info->returnsVoid() && IsShader; 1968 1969 SmallVector<ISD::OutputArg, 48> Splits; 1970 SmallVector<SDValue, 48> SplitVals; 1971 1972 // Split vectors into their elements. 1973 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 1974 const ISD::OutputArg &Out = Outs[i]; 1975 1976 if (IsShader && Out.VT.isVector()) { 1977 MVT VT = Out.VT.getVectorElementType(); 1978 ISD::OutputArg NewOut = Out; 1979 NewOut.Flags.setSplit(); 1980 NewOut.VT = VT; 1981 1982 // We want the original number of vector elements here, e.g. 1983 // three or five, not four or eight. 1984 unsigned NumElements = Out.ArgVT.getVectorNumElements(); 1985 1986 for (unsigned j = 0; j != NumElements; ++j) { 1987 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i], 1988 DAG.getConstant(j, DL, MVT::i32)); 1989 SplitVals.push_back(Elem); 1990 Splits.push_back(NewOut); 1991 NewOut.PartOffset += NewOut.VT.getStoreSize(); 1992 } 1993 } else { 1994 SplitVals.push_back(OutVals[i]); 1995 Splits.push_back(Out); 1996 } 1997 } 1998 1999 // CCValAssign - represent the assignment of the return value to a location. 2000 SmallVector<CCValAssign, 48> RVLocs; 2001 2002 // CCState - Info about the registers and stack slots. 2003 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 2004 *DAG.getContext()); 2005 2006 // Analyze outgoing return values. 2007 CCInfo.AnalyzeReturn(Splits, CCAssignFnForReturn(CallConv, isVarArg)); 2008 2009 SDValue Flag; 2010 SmallVector<SDValue, 48> RetOps; 2011 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2012 2013 // Add return address for callable functions. 2014 if (!Info->isEntryFunction()) { 2015 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2016 SDValue ReturnAddrReg = CreateLiveInRegister( 2017 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); 2018 2019 // FIXME: Should be able to use a vreg here, but need a way to prevent it 2020 // from being allcoated to a CSR. 2021 2022 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), 2023 MVT::i64); 2024 2025 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag); 2026 Flag = Chain.getValue(1); 2027 2028 RetOps.push_back(PhysReturnAddrReg); 2029 } 2030 2031 // Copy the result values into the output registers. 2032 for (unsigned i = 0, realRVLocIdx = 0; 2033 i != RVLocs.size(); 2034 ++i, ++realRVLocIdx) { 2035 CCValAssign &VA = RVLocs[i]; 2036 assert(VA.isRegLoc() && "Can only return in registers!"); 2037 // TODO: Partially return in registers if return values don't fit. 2038 2039 SDValue Arg = SplitVals[realRVLocIdx]; 2040 2041 // Copied from other backends. 2042 switch (VA.getLocInfo()) { 2043 case CCValAssign::Full: 2044 break; 2045 case CCValAssign::BCvt: 2046 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 2047 break; 2048 case CCValAssign::SExt: 2049 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 2050 break; 2051 case CCValAssign::ZExt: 2052 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 2053 break; 2054 case CCValAssign::AExt: 2055 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 2056 break; 2057 default: 2058 llvm_unreachable("Unknown loc info!"); 2059 } 2060 2061 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 2062 Flag = Chain.getValue(1); 2063 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2064 } 2065 2066 // FIXME: Does sret work properly? 2067 if (!Info->isEntryFunction()) { 2068 const SIRegisterInfo *TRI 2069 = static_cast<const SISubtarget *>(Subtarget)->getRegisterInfo(); 2070 const MCPhysReg *I = 2071 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 2072 if (I) { 2073 for (; *I; ++I) { 2074 if (AMDGPU::SReg_64RegClass.contains(*I)) 2075 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 2076 else if (AMDGPU::SReg_32RegClass.contains(*I)) 2077 RetOps.push_back(DAG.getRegister(*I, MVT::i32)); 2078 else 2079 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 2080 } 2081 } 2082 } 2083 2084 // Update chain and glue. 2085 RetOps[0] = Chain; 2086 if (Flag.getNode()) 2087 RetOps.push_back(Flag); 2088 2089 unsigned Opc = AMDGPUISD::ENDPGM; 2090 if (!IsWaveEnd) 2091 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG; 2092 return DAG.getNode(Opc, DL, MVT::Other, RetOps); 2093 } 2094 2095 SDValue SITargetLowering::LowerCallResult( 2096 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, 2097 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 2098 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn, 2099 SDValue ThisVal) const { 2100 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg); 2101 2102 // Assign locations to each value returned by this call. 2103 SmallVector<CCValAssign, 16> RVLocs; 2104 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 2105 *DAG.getContext()); 2106 CCInfo.AnalyzeCallResult(Ins, RetCC); 2107 2108 // Copy all of the result registers out of their specified physreg. 2109 for (unsigned i = 0; i != RVLocs.size(); ++i) { 2110 CCValAssign VA = RVLocs[i]; 2111 SDValue Val; 2112 2113 if (VA.isRegLoc()) { 2114 Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); 2115 Chain = Val.getValue(1); 2116 InFlag = Val.getValue(2); 2117 } else if (VA.isMemLoc()) { 2118 report_fatal_error("TODO: return values in memory"); 2119 } else 2120 llvm_unreachable("unknown argument location type"); 2121 2122 switch (VA.getLocInfo()) { 2123 case CCValAssign::Full: 2124 break; 2125 case CCValAssign::BCvt: 2126 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 2127 break; 2128 case CCValAssign::ZExt: 2129 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val, 2130 DAG.getValueType(VA.getValVT())); 2131 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2132 break; 2133 case CCValAssign::SExt: 2134 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val, 2135 DAG.getValueType(VA.getValVT())); 2136 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2137 break; 2138 case CCValAssign::AExt: 2139 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2140 break; 2141 default: 2142 llvm_unreachable("Unknown loc info!"); 2143 } 2144 2145 InVals.push_back(Val); 2146 } 2147 2148 return Chain; 2149 } 2150 2151 // Add code to pass special inputs required depending on used features separate 2152 // from the explicit user arguments present in the IR. 2153 void SITargetLowering::passSpecialInputs( 2154 CallLoweringInfo &CLI, 2155 const SIMachineFunctionInfo &Info, 2156 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 2157 SmallVectorImpl<SDValue> &MemOpChains, 2158 SDValue Chain, 2159 SDValue StackPtr) const { 2160 // If we don't have a call site, this was a call inserted by 2161 // legalization. These can never use special inputs. 2162 if (!CLI.CS) 2163 return; 2164 2165 const Function *CalleeFunc = CLI.CS.getCalledFunction(); 2166 assert(CalleeFunc); 2167 2168 SelectionDAG &DAG = CLI.DAG; 2169 const SDLoc &DL = CLI.DL; 2170 2171 const SISubtarget *ST = getSubtarget(); 2172 const SIRegisterInfo *TRI = ST->getRegisterInfo(); 2173 2174 auto &ArgUsageInfo = 2175 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); 2176 const AMDGPUFunctionArgInfo &CalleeArgInfo 2177 = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc); 2178 2179 const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo(); 2180 2181 // TODO: Unify with private memory register handling. This is complicated by 2182 // the fact that at least in kernels, the input argument is not necessarily 2183 // in the same location as the input. 2184 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { 2185 AMDGPUFunctionArgInfo::DISPATCH_PTR, 2186 AMDGPUFunctionArgInfo::QUEUE_PTR, 2187 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR, 2188 AMDGPUFunctionArgInfo::DISPATCH_ID, 2189 AMDGPUFunctionArgInfo::WORKGROUP_ID_X, 2190 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, 2191 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z, 2192 AMDGPUFunctionArgInfo::WORKITEM_ID_X, 2193 AMDGPUFunctionArgInfo::WORKITEM_ID_Y, 2194 AMDGPUFunctionArgInfo::WORKITEM_ID_Z, 2195 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR 2196 }; 2197 2198 for (auto InputID : InputRegs) { 2199 const ArgDescriptor *OutgoingArg; 2200 const TargetRegisterClass *ArgRC; 2201 2202 std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID); 2203 if (!OutgoingArg) 2204 continue; 2205 2206 const ArgDescriptor *IncomingArg; 2207 const TargetRegisterClass *IncomingArgRC; 2208 std::tie(IncomingArg, IncomingArgRC) 2209 = CallerArgInfo.getPreloadedValue(InputID); 2210 assert(IncomingArgRC == ArgRC); 2211 2212 // All special arguments are ints for now. 2213 EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32; 2214 SDValue InputReg; 2215 2216 if (IncomingArg) { 2217 InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg); 2218 } else { 2219 // The implicit arg ptr is special because it doesn't have a corresponding 2220 // input for kernels, and is computed from the kernarg segment pointer. 2221 assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); 2222 InputReg = getImplicitArgPtr(DAG, DL); 2223 } 2224 2225 if (OutgoingArg->isRegister()) { 2226 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); 2227 } else { 2228 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, StackPtr, 2229 InputReg, 2230 OutgoingArg->getStackOffset()); 2231 MemOpChains.push_back(ArgStore); 2232 } 2233 } 2234 } 2235 2236 static bool canGuaranteeTCO(CallingConv::ID CC) { 2237 return CC == CallingConv::Fast; 2238 } 2239 2240 /// Return true if we might ever do TCO for calls with this calling convention. 2241 static bool mayTailCallThisCC(CallingConv::ID CC) { 2242 switch (CC) { 2243 case CallingConv::C: 2244 return true; 2245 default: 2246 return canGuaranteeTCO(CC); 2247 } 2248 } 2249 2250 bool SITargetLowering::isEligibleForTailCallOptimization( 2251 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg, 2252 const SmallVectorImpl<ISD::OutputArg> &Outs, 2253 const SmallVectorImpl<SDValue> &OutVals, 2254 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { 2255 if (!mayTailCallThisCC(CalleeCC)) 2256 return false; 2257 2258 MachineFunction &MF = DAG.getMachineFunction(); 2259 const Function &CallerF = MF.getFunction(); 2260 CallingConv::ID CallerCC = CallerF.getCallingConv(); 2261 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2262 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2263 2264 // Kernels aren't callable, and don't have a live in return address so it 2265 // doesn't make sense to do a tail call with entry functions. 2266 if (!CallerPreserved) 2267 return false; 2268 2269 bool CCMatch = CallerCC == CalleeCC; 2270 2271 if (DAG.getTarget().Options.GuaranteedTailCallOpt) { 2272 if (canGuaranteeTCO(CalleeCC) && CCMatch) 2273 return true; 2274 return false; 2275 } 2276 2277 // TODO: Can we handle var args? 2278 if (IsVarArg) 2279 return false; 2280 2281 for (const Argument &Arg : CallerF.args()) { 2282 if (Arg.hasByValAttr()) 2283 return false; 2284 } 2285 2286 LLVMContext &Ctx = *DAG.getContext(); 2287 2288 // Check that the call results are passed in the same way. 2289 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins, 2290 CCAssignFnForCall(CalleeCC, IsVarArg), 2291 CCAssignFnForCall(CallerCC, IsVarArg))) 2292 return false; 2293 2294 // The callee has to preserve all registers the caller needs to preserve. 2295 if (!CCMatch) { 2296 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2297 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2298 return false; 2299 } 2300 2301 // Nothing more to check if the callee is taking no arguments. 2302 if (Outs.empty()) 2303 return true; 2304 2305 SmallVector<CCValAssign, 16> ArgLocs; 2306 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx); 2307 2308 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg)); 2309 2310 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 2311 // If the stack arguments for this call do not fit into our own save area then 2312 // the call cannot be made tail. 2313 // TODO: Is this really necessary? 2314 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) 2315 return false; 2316 2317 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2318 return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals); 2319 } 2320 2321 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 2322 if (!CI->isTailCall()) 2323 return false; 2324 2325 const Function *ParentFn = CI->getParent()->getParent(); 2326 if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv())) 2327 return false; 2328 2329 auto Attr = ParentFn->getFnAttribute("disable-tail-calls"); 2330 return (Attr.getValueAsString() != "true"); 2331 } 2332 2333 // The wave scratch offset register is used as the global base pointer. 2334 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI, 2335 SmallVectorImpl<SDValue> &InVals) const { 2336 SelectionDAG &DAG = CLI.DAG; 2337 const SDLoc &DL = CLI.DL; 2338 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 2339 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 2340 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 2341 SDValue Chain = CLI.Chain; 2342 SDValue Callee = CLI.Callee; 2343 bool &IsTailCall = CLI.IsTailCall; 2344 CallingConv::ID CallConv = CLI.CallConv; 2345 bool IsVarArg = CLI.IsVarArg; 2346 bool IsSibCall = false; 2347 bool IsThisReturn = false; 2348 MachineFunction &MF = DAG.getMachineFunction(); 2349 2350 if (IsVarArg) { 2351 return lowerUnhandledCall(CLI, InVals, 2352 "unsupported call to variadic function "); 2353 } 2354 2355 if (!CLI.CS.getCalledFunction()) { 2356 return lowerUnhandledCall(CLI, InVals, 2357 "unsupported indirect call to function "); 2358 } 2359 2360 if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) { 2361 return lowerUnhandledCall(CLI, InVals, 2362 "unsupported required tail call to function "); 2363 } 2364 2365 // The first 4 bytes are reserved for the callee's emergency stack slot. 2366 const unsigned CalleeUsableStackOffset = 4; 2367 2368 if (IsTailCall) { 2369 IsTailCall = isEligibleForTailCallOptimization( 2370 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG); 2371 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) { 2372 report_fatal_error("failed to perform tail call elimination on a call " 2373 "site marked musttail"); 2374 } 2375 2376 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 2377 2378 // A sibling call is one where we're under the usual C ABI and not planning 2379 // to change that but can still do a tail call: 2380 if (!TailCallOpt && IsTailCall) 2381 IsSibCall = true; 2382 2383 if (IsTailCall) 2384 ++NumTailCalls; 2385 } 2386 2387 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee)) { 2388 // FIXME: Remove this hack for function pointer types after removing 2389 // support of old address space mapping. In the new address space 2390 // mapping the pointer in default address space is 64 bit, therefore 2391 // does not need this hack. 2392 if (Callee.getValueType() == MVT::i32) { 2393 const GlobalValue *GV = GA->getGlobal(); 2394 Callee = DAG.getGlobalAddress(GV, DL, MVT::i64, GA->getOffset(), false, 2395 GA->getTargetFlags()); 2396 } 2397 } 2398 assert(Callee.getValueType() == MVT::i64); 2399 2400 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2401 2402 // Analyze operands of the call, assigning locations to each operand. 2403 SmallVector<CCValAssign, 16> ArgLocs; 2404 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 2405 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg); 2406 CCInfo.AnalyzeCallOperands(Outs, AssignFn); 2407 2408 // Get a count of how many bytes are to be pushed on the stack. 2409 unsigned NumBytes = CCInfo.getNextStackOffset(); 2410 2411 if (IsSibCall) { 2412 // Since we're not changing the ABI to make this a tail call, the memory 2413 // operands are already available in the caller's incoming argument space. 2414 NumBytes = 0; 2415 } 2416 2417 // FPDiff is the byte offset of the call's argument area from the callee's. 2418 // Stores to callee stack arguments will be placed in FixedStackSlots offset 2419 // by this amount for a tail call. In a sibling call it must be 0 because the 2420 // caller will deallocate the entire stack and the callee still expects its 2421 // arguments to begin at SP+0. Completely unused for non-tail calls. 2422 int32_t FPDiff = 0; 2423 MachineFrameInfo &MFI = MF.getFrameInfo(); 2424 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2425 2426 SDValue CallerSavedFP; 2427 2428 // Adjust the stack pointer for the new arguments... 2429 // These operations are automatically eliminated by the prolog/epilog pass 2430 if (!IsSibCall) { 2431 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL); 2432 2433 unsigned OffsetReg = Info->getScratchWaveOffsetReg(); 2434 2435 // In the HSA case, this should be an identity copy. 2436 SDValue ScratchRSrcReg 2437 = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32); 2438 RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); 2439 2440 // TODO: Don't hardcode these registers and get from the callee function. 2441 SDValue ScratchWaveOffsetReg 2442 = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32); 2443 RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg); 2444 2445 if (!Info->isEntryFunction()) { 2446 // Avoid clobbering this function's FP value. In the current convention 2447 // callee will overwrite this, so do save/restore around the call site. 2448 CallerSavedFP = DAG.getCopyFromReg(Chain, DL, 2449 Info->getFrameOffsetReg(), MVT::i32); 2450 } 2451 } 2452 2453 // Stack pointer relative accesses are done by changing the offset SGPR. This 2454 // is just the VGPR offset component. 2455 SDValue StackPtr = DAG.getConstant(CalleeUsableStackOffset, DL, MVT::i32); 2456 2457 SmallVector<SDValue, 8> MemOpChains; 2458 MVT PtrVT = MVT::i32; 2459 2460 // Walk the register/memloc assignments, inserting copies/loads. 2461 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e; 2462 ++i, ++realArgIdx) { 2463 CCValAssign &VA = ArgLocs[i]; 2464 SDValue Arg = OutVals[realArgIdx]; 2465 2466 // Promote the value if needed. 2467 switch (VA.getLocInfo()) { 2468 case CCValAssign::Full: 2469 break; 2470 case CCValAssign::BCvt: 2471 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 2472 break; 2473 case CCValAssign::ZExt: 2474 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 2475 break; 2476 case CCValAssign::SExt: 2477 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 2478 break; 2479 case CCValAssign::AExt: 2480 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 2481 break; 2482 case CCValAssign::FPExt: 2483 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); 2484 break; 2485 default: 2486 llvm_unreachable("Unknown loc info!"); 2487 } 2488 2489 if (VA.isRegLoc()) { 2490 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2491 } else { 2492 assert(VA.isMemLoc()); 2493 2494 SDValue DstAddr; 2495 MachinePointerInfo DstInfo; 2496 2497 unsigned LocMemOffset = VA.getLocMemOffset(); 2498 int32_t Offset = LocMemOffset; 2499 2500 SDValue PtrOff = DAG.getObjectPtrOffset(DL, StackPtr, Offset); 2501 2502 if (IsTailCall) { 2503 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2504 unsigned OpSize = Flags.isByVal() ? 2505 Flags.getByValSize() : VA.getValVT().getStoreSize(); 2506 2507 Offset = Offset + FPDiff; 2508 int FI = MFI.CreateFixedObject(OpSize, Offset, true); 2509 2510 DstAddr = DAG.getObjectPtrOffset(DL, DAG.getFrameIndex(FI, PtrVT), 2511 StackPtr); 2512 DstInfo = MachinePointerInfo::getFixedStack(MF, FI); 2513 2514 // Make sure any stack arguments overlapping with where we're storing 2515 // are loaded before this eventual operation. Otherwise they'll be 2516 // clobbered. 2517 2518 // FIXME: Why is this really necessary? This seems to just result in a 2519 // lot of code to copy the stack and write them back to the same 2520 // locations, which are supposed to be immutable? 2521 Chain = addTokenForArgument(Chain, DAG, MFI, FI); 2522 } else { 2523 DstAddr = PtrOff; 2524 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset); 2525 } 2526 2527 if (Outs[i].Flags.isByVal()) { 2528 SDValue SizeNode = 2529 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32); 2530 SDValue Cpy = DAG.getMemcpy( 2531 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(), 2532 /*isVol = */ false, /*AlwaysInline = */ true, 2533 /*isTailCall = */ false, DstInfo, 2534 MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy( 2535 *DAG.getContext(), AMDGPUASI.PRIVATE_ADDRESS)))); 2536 2537 MemOpChains.push_back(Cpy); 2538 } else { 2539 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo); 2540 MemOpChains.push_back(Store); 2541 } 2542 } 2543 } 2544 2545 // Copy special input registers after user input arguments. 2546 passSpecialInputs(CLI, *Info, RegsToPass, MemOpChains, Chain, StackPtr); 2547 2548 if (!MemOpChains.empty()) 2549 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 2550 2551 // Build a sequence of copy-to-reg nodes chained together with token chain 2552 // and flag operands which copy the outgoing args into the appropriate regs. 2553 SDValue InFlag; 2554 for (auto &RegToPass : RegsToPass) { 2555 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, 2556 RegToPass.second, InFlag); 2557 InFlag = Chain.getValue(1); 2558 } 2559 2560 2561 SDValue PhysReturnAddrReg; 2562 if (IsTailCall) { 2563 // Since the return is being combined with the call, we need to pass on the 2564 // return address. 2565 2566 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2567 SDValue ReturnAddrReg = CreateLiveInRegister( 2568 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); 2569 2570 PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), 2571 MVT::i64); 2572 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag); 2573 InFlag = Chain.getValue(1); 2574 } 2575 2576 // We don't usually want to end the call-sequence here because we would tidy 2577 // the frame up *after* the call, however in the ABI-changing tail-call case 2578 // we've carefully laid out the parameters so that when sp is reset they'll be 2579 // in the correct location. 2580 if (IsTailCall && !IsSibCall) { 2581 Chain = DAG.getCALLSEQ_END(Chain, 2582 DAG.getTargetConstant(NumBytes, DL, MVT::i32), 2583 DAG.getTargetConstant(0, DL, MVT::i32), 2584 InFlag, DL); 2585 InFlag = Chain.getValue(1); 2586 } 2587 2588 std::vector<SDValue> Ops; 2589 Ops.push_back(Chain); 2590 Ops.push_back(Callee); 2591 2592 if (IsTailCall) { 2593 // Each tail call may have to adjust the stack by a different amount, so 2594 // this information must travel along with the operation for eventual 2595 // consumption by emitEpilogue. 2596 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); 2597 2598 Ops.push_back(PhysReturnAddrReg); 2599 } 2600 2601 // Add argument registers to the end of the list so that they are known live 2602 // into the call. 2603 for (auto &RegToPass : RegsToPass) { 2604 Ops.push_back(DAG.getRegister(RegToPass.first, 2605 RegToPass.second.getValueType())); 2606 } 2607 2608 // Add a register mask operand representing the call-preserved registers. 2609 2610 const AMDGPURegisterInfo *TRI = Subtarget->getRegisterInfo(); 2611 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 2612 assert(Mask && "Missing call preserved mask for calling convention"); 2613 Ops.push_back(DAG.getRegisterMask(Mask)); 2614 2615 if (InFlag.getNode()) 2616 Ops.push_back(InFlag); 2617 2618 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2619 2620 // If we're doing a tall call, use a TC_RETURN here rather than an 2621 // actual call instruction. 2622 if (IsTailCall) { 2623 MFI.setHasTailCall(); 2624 return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops); 2625 } 2626 2627 // Returns a chain and a flag for retval copy to use. 2628 SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops); 2629 Chain = Call.getValue(0); 2630 InFlag = Call.getValue(1); 2631 2632 if (CallerSavedFP) { 2633 SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32); 2634 Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag); 2635 InFlag = Chain.getValue(1); 2636 } 2637 2638 uint64_t CalleePopBytes = NumBytes; 2639 Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32), 2640 DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32), 2641 InFlag, DL); 2642 if (!Ins.empty()) 2643 InFlag = Chain.getValue(1); 2644 2645 // Handle result values, copying them out of physregs into vregs that we 2646 // return. 2647 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, 2648 InVals, IsThisReturn, 2649 IsThisReturn ? OutVals[0] : SDValue()); 2650 } 2651 2652 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT, 2653 SelectionDAG &DAG) const { 2654 unsigned Reg = StringSwitch<unsigned>(RegName) 2655 .Case("m0", AMDGPU::M0) 2656 .Case("exec", AMDGPU::EXEC) 2657 .Case("exec_lo", AMDGPU::EXEC_LO) 2658 .Case("exec_hi", AMDGPU::EXEC_HI) 2659 .Case("flat_scratch", AMDGPU::FLAT_SCR) 2660 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) 2661 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) 2662 .Default(AMDGPU::NoRegister); 2663 2664 if (Reg == AMDGPU::NoRegister) { 2665 report_fatal_error(Twine("invalid register name \"" 2666 + StringRef(RegName) + "\".")); 2667 2668 } 2669 2670 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && 2671 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { 2672 report_fatal_error(Twine("invalid register \"" 2673 + StringRef(RegName) + "\" for subtarget.")); 2674 } 2675 2676 switch (Reg) { 2677 case AMDGPU::M0: 2678 case AMDGPU::EXEC_LO: 2679 case AMDGPU::EXEC_HI: 2680 case AMDGPU::FLAT_SCR_LO: 2681 case AMDGPU::FLAT_SCR_HI: 2682 if (VT.getSizeInBits() == 32) 2683 return Reg; 2684 break; 2685 case AMDGPU::EXEC: 2686 case AMDGPU::FLAT_SCR: 2687 if (VT.getSizeInBits() == 64) 2688 return Reg; 2689 break; 2690 default: 2691 llvm_unreachable("missing register type checking"); 2692 } 2693 2694 report_fatal_error(Twine("invalid type for register \"" 2695 + StringRef(RegName) + "\".")); 2696 } 2697 2698 // If kill is not the last instruction, split the block so kill is always a 2699 // proper terminator. 2700 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI, 2701 MachineBasicBlock *BB) const { 2702 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 2703 2704 MachineBasicBlock::iterator SplitPoint(&MI); 2705 ++SplitPoint; 2706 2707 if (SplitPoint == BB->end()) { 2708 // Don't bother with a new block. 2709 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); 2710 return BB; 2711 } 2712 2713 MachineFunction *MF = BB->getParent(); 2714 MachineBasicBlock *SplitBB 2715 = MF->CreateMachineBasicBlock(BB->getBasicBlock()); 2716 2717 MF->insert(++MachineFunction::iterator(BB), SplitBB); 2718 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end()); 2719 2720 SplitBB->transferSuccessorsAndUpdatePHIs(BB); 2721 BB->addSuccessor(SplitBB); 2722 2723 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); 2724 return SplitBB; 2725 } 2726 2727 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the 2728 // wavefront. If the value is uniform and just happens to be in a VGPR, this 2729 // will only do one iteration. In the worst case, this will loop 64 times. 2730 // 2731 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. 2732 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop( 2733 const SIInstrInfo *TII, 2734 MachineRegisterInfo &MRI, 2735 MachineBasicBlock &OrigBB, 2736 MachineBasicBlock &LoopBB, 2737 const DebugLoc &DL, 2738 const MachineOperand &IdxReg, 2739 unsigned InitReg, 2740 unsigned ResultReg, 2741 unsigned PhiReg, 2742 unsigned InitSaveExecReg, 2743 int Offset, 2744 bool UseGPRIdxMode) { 2745 MachineBasicBlock::iterator I = LoopBB.begin(); 2746 2747 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2748 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2749 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2750 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2751 2752 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) 2753 .addReg(InitReg) 2754 .addMBB(&OrigBB) 2755 .addReg(ResultReg) 2756 .addMBB(&LoopBB); 2757 2758 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) 2759 .addReg(InitSaveExecReg) 2760 .addMBB(&OrigBB) 2761 .addReg(NewExec) 2762 .addMBB(&LoopBB); 2763 2764 // Read the next variant <- also loop target. 2765 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) 2766 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef())); 2767 2768 // Compare the just read M0 value to all possible Idx values. 2769 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) 2770 .addReg(CurrentIdxReg) 2771 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg()); 2772 2773 if (UseGPRIdxMode) { 2774 unsigned IdxReg; 2775 if (Offset == 0) { 2776 IdxReg = CurrentIdxReg; 2777 } else { 2778 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2779 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg) 2780 .addReg(CurrentIdxReg, RegState::Kill) 2781 .addImm(Offset); 2782 } 2783 2784 MachineInstr *SetIdx = 2785 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_IDX)) 2786 .addReg(IdxReg, RegState::Kill); 2787 SetIdx->getOperand(2).setIsUndef(); 2788 } else { 2789 // Move index from VCC into M0 2790 if (Offset == 0) { 2791 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 2792 .addReg(CurrentIdxReg, RegState::Kill); 2793 } else { 2794 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 2795 .addReg(CurrentIdxReg, RegState::Kill) 2796 .addImm(Offset); 2797 } 2798 } 2799 2800 // Update EXEC, save the original EXEC value to VCC. 2801 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec) 2802 .addReg(CondReg, RegState::Kill); 2803 2804 MRI.setSimpleHint(NewExec, CondReg); 2805 2806 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 2807 MachineInstr *InsertPt = 2808 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 2809 .addReg(AMDGPU::EXEC) 2810 .addReg(NewExec); 2811 2812 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use 2813 // s_cbranch_scc0? 2814 2815 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. 2816 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 2817 .addMBB(&LoopBB); 2818 2819 return InsertPt->getIterator(); 2820 } 2821 2822 // This has slightly sub-optimal regalloc when the source vector is killed by 2823 // the read. The register allocator does not understand that the kill is 2824 // per-workitem, so is kept alive for the whole loop so we end up not re-using a 2825 // subregister from it, using 1 more VGPR than necessary. This was saved when 2826 // this was expanded after register allocation. 2827 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII, 2828 MachineBasicBlock &MBB, 2829 MachineInstr &MI, 2830 unsigned InitResultReg, 2831 unsigned PhiReg, 2832 int Offset, 2833 bool UseGPRIdxMode) { 2834 MachineFunction *MF = MBB.getParent(); 2835 MachineRegisterInfo &MRI = MF->getRegInfo(); 2836 const DebugLoc &DL = MI.getDebugLoc(); 2837 MachineBasicBlock::iterator I(&MI); 2838 2839 unsigned DstReg = MI.getOperand(0).getReg(); 2840 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 2841 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 2842 2843 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); 2844 2845 // Save the EXEC mask 2846 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec) 2847 .addReg(AMDGPU::EXEC); 2848 2849 // To insert the loop we need to split the block. Move everything after this 2850 // point to a new block, and insert a new empty block between the two. 2851 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); 2852 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); 2853 MachineFunction::iterator MBBI(MBB); 2854 ++MBBI; 2855 2856 MF->insert(MBBI, LoopBB); 2857 MF->insert(MBBI, RemainderBB); 2858 2859 LoopBB->addSuccessor(LoopBB); 2860 LoopBB->addSuccessor(RemainderBB); 2861 2862 // Move the rest of the block into a new block. 2863 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 2864 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 2865 2866 MBB.addSuccessor(LoopBB); 2867 2868 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 2869 2870 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, 2871 InitResultReg, DstReg, PhiReg, TmpExec, 2872 Offset, UseGPRIdxMode); 2873 2874 MachineBasicBlock::iterator First = RemainderBB->begin(); 2875 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 2876 .addReg(SaveExec); 2877 2878 return InsPt; 2879 } 2880 2881 // Returns subreg index, offset 2882 static std::pair<unsigned, int> 2883 computeIndirectRegAndOffset(const SIRegisterInfo &TRI, 2884 const TargetRegisterClass *SuperRC, 2885 unsigned VecReg, 2886 int Offset) { 2887 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32; 2888 2889 // Skip out of bounds offsets, or else we would end up using an undefined 2890 // register. 2891 if (Offset >= NumElts || Offset < 0) 2892 return std::make_pair(AMDGPU::sub0, Offset); 2893 2894 return std::make_pair(AMDGPU::sub0 + Offset, 0); 2895 } 2896 2897 // Return true if the index is an SGPR and was set. 2898 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, 2899 MachineRegisterInfo &MRI, 2900 MachineInstr &MI, 2901 int Offset, 2902 bool UseGPRIdxMode, 2903 bool IsIndirectSrc) { 2904 MachineBasicBlock *MBB = MI.getParent(); 2905 const DebugLoc &DL = MI.getDebugLoc(); 2906 MachineBasicBlock::iterator I(&MI); 2907 2908 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 2909 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); 2910 2911 assert(Idx->getReg() != AMDGPU::NoRegister); 2912 2913 if (!TII->getRegisterInfo().isSGPRClass(IdxRC)) 2914 return false; 2915 2916 if (UseGPRIdxMode) { 2917 unsigned IdxMode = IsIndirectSrc ? 2918 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE; 2919 if (Offset == 0) { 2920 MachineInstr *SetOn = 2921 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 2922 .add(*Idx) 2923 .addImm(IdxMode); 2924 2925 SetOn->getOperand(3).setIsUndef(); 2926 } else { 2927 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 2928 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) 2929 .add(*Idx) 2930 .addImm(Offset); 2931 MachineInstr *SetOn = 2932 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 2933 .addReg(Tmp, RegState::Kill) 2934 .addImm(IdxMode); 2935 2936 SetOn->getOperand(3).setIsUndef(); 2937 } 2938 2939 return true; 2940 } 2941 2942 if (Offset == 0) { 2943 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 2944 .add(*Idx); 2945 } else { 2946 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 2947 .add(*Idx) 2948 .addImm(Offset); 2949 } 2950 2951 return true; 2952 } 2953 2954 // Control flow needs to be inserted if indexing with a VGPR. 2955 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, 2956 MachineBasicBlock &MBB, 2957 const SISubtarget &ST) { 2958 const SIInstrInfo *TII = ST.getInstrInfo(); 2959 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 2960 MachineFunction *MF = MBB.getParent(); 2961 MachineRegisterInfo &MRI = MF->getRegInfo(); 2962 2963 unsigned Dst = MI.getOperand(0).getReg(); 2964 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); 2965 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 2966 2967 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); 2968 2969 unsigned SubReg; 2970 std::tie(SubReg, Offset) 2971 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); 2972 2973 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode); 2974 2975 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) { 2976 MachineBasicBlock::iterator I(&MI); 2977 const DebugLoc &DL = MI.getDebugLoc(); 2978 2979 if (UseGPRIdxMode) { 2980 // TODO: Look at the uses to avoid the copy. This may require rescheduling 2981 // to avoid interfering with other uses, so probably requires a new 2982 // optimization pass. 2983 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 2984 .addReg(SrcReg, RegState::Undef, SubReg) 2985 .addReg(SrcReg, RegState::Implicit) 2986 .addReg(AMDGPU::M0, RegState::Implicit); 2987 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 2988 } else { 2989 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 2990 .addReg(SrcReg, RegState::Undef, SubReg) 2991 .addReg(SrcReg, RegState::Implicit); 2992 } 2993 2994 MI.eraseFromParent(); 2995 2996 return &MBB; 2997 } 2998 2999 const DebugLoc &DL = MI.getDebugLoc(); 3000 MachineBasicBlock::iterator I(&MI); 3001 3002 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3003 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3004 3005 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); 3006 3007 if (UseGPRIdxMode) { 3008 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 3009 .addImm(0) // Reset inside loop. 3010 .addImm(VGPRIndexMode::SRC0_ENABLE); 3011 SetOn->getOperand(3).setIsUndef(); 3012 3013 // Disable again after the loop. 3014 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3015 } 3016 3017 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, UseGPRIdxMode); 3018 MachineBasicBlock *LoopBB = InsPt->getParent(); 3019 3020 if (UseGPRIdxMode) { 3021 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 3022 .addReg(SrcReg, RegState::Undef, SubReg) 3023 .addReg(SrcReg, RegState::Implicit) 3024 .addReg(AMDGPU::M0, RegState::Implicit); 3025 } else { 3026 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 3027 .addReg(SrcReg, RegState::Undef, SubReg) 3028 .addReg(SrcReg, RegState::Implicit); 3029 } 3030 3031 MI.eraseFromParent(); 3032 3033 return LoopBB; 3034 } 3035 3036 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI, 3037 const TargetRegisterClass *VecRC) { 3038 switch (TRI.getRegSizeInBits(*VecRC)) { 3039 case 32: // 4 bytes 3040 return AMDGPU::V_MOVRELD_B32_V1; 3041 case 64: // 8 bytes 3042 return AMDGPU::V_MOVRELD_B32_V2; 3043 case 128: // 16 bytes 3044 return AMDGPU::V_MOVRELD_B32_V4; 3045 case 256: // 32 bytes 3046 return AMDGPU::V_MOVRELD_B32_V8; 3047 case 512: // 64 bytes 3048 return AMDGPU::V_MOVRELD_B32_V16; 3049 default: 3050 llvm_unreachable("unsupported size for MOVRELD pseudos"); 3051 } 3052 } 3053 3054 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, 3055 MachineBasicBlock &MBB, 3056 const SISubtarget &ST) { 3057 const SIInstrInfo *TII = ST.getInstrInfo(); 3058 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 3059 MachineFunction *MF = MBB.getParent(); 3060 MachineRegisterInfo &MRI = MF->getRegInfo(); 3061 3062 unsigned Dst = MI.getOperand(0).getReg(); 3063 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); 3064 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 3065 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); 3066 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 3067 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); 3068 3069 // This can be an immediate, but will be folded later. 3070 assert(Val->getReg()); 3071 3072 unsigned SubReg; 3073 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, 3074 SrcVec->getReg(), 3075 Offset); 3076 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode); 3077 3078 if (Idx->getReg() == AMDGPU::NoRegister) { 3079 MachineBasicBlock::iterator I(&MI); 3080 const DebugLoc &DL = MI.getDebugLoc(); 3081 3082 assert(Offset == 0); 3083 3084 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) 3085 .add(*SrcVec) 3086 .add(*Val) 3087 .addImm(SubReg); 3088 3089 MI.eraseFromParent(); 3090 return &MBB; 3091 } 3092 3093 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) { 3094 MachineBasicBlock::iterator I(&MI); 3095 const DebugLoc &DL = MI.getDebugLoc(); 3096 3097 if (UseGPRIdxMode) { 3098 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 3099 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst 3100 .add(*Val) 3101 .addReg(Dst, RegState::ImplicitDefine) 3102 .addReg(SrcVec->getReg(), RegState::Implicit) 3103 .addReg(AMDGPU::M0, RegState::Implicit); 3104 3105 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3106 } else { 3107 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC)); 3108 3109 BuildMI(MBB, I, DL, MovRelDesc) 3110 .addReg(Dst, RegState::Define) 3111 .addReg(SrcVec->getReg()) 3112 .add(*Val) 3113 .addImm(SubReg - AMDGPU::sub0); 3114 } 3115 3116 MI.eraseFromParent(); 3117 return &MBB; 3118 } 3119 3120 if (Val->isReg()) 3121 MRI.clearKillFlags(Val->getReg()); 3122 3123 const DebugLoc &DL = MI.getDebugLoc(); 3124 3125 if (UseGPRIdxMode) { 3126 MachineBasicBlock::iterator I(&MI); 3127 3128 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 3129 .addImm(0) // Reset inside loop. 3130 .addImm(VGPRIndexMode::DST_ENABLE); 3131 SetOn->getOperand(3).setIsUndef(); 3132 3133 // Disable again after the loop. 3134 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3135 } 3136 3137 unsigned PhiReg = MRI.createVirtualRegister(VecRC); 3138 3139 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, 3140 Offset, UseGPRIdxMode); 3141 MachineBasicBlock *LoopBB = InsPt->getParent(); 3142 3143 if (UseGPRIdxMode) { 3144 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 3145 .addReg(PhiReg, RegState::Undef, SubReg) // vdst 3146 .add(*Val) // src0 3147 .addReg(Dst, RegState::ImplicitDefine) 3148 .addReg(PhiReg, RegState::Implicit) 3149 .addReg(AMDGPU::M0, RegState::Implicit); 3150 } else { 3151 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC)); 3152 3153 BuildMI(*LoopBB, InsPt, DL, MovRelDesc) 3154 .addReg(Dst, RegState::Define) 3155 .addReg(PhiReg) 3156 .add(*Val) 3157 .addImm(SubReg - AMDGPU::sub0); 3158 } 3159 3160 MI.eraseFromParent(); 3161 3162 return LoopBB; 3163 } 3164 3165 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( 3166 MachineInstr &MI, MachineBasicBlock *BB) const { 3167 3168 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3169 MachineFunction *MF = BB->getParent(); 3170 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 3171 3172 if (TII->isMIMG(MI)) { 3173 if (MI.memoperands_empty() && MI.mayLoadOrStore()) { 3174 report_fatal_error("missing mem operand from MIMG instruction"); 3175 } 3176 // Add a memoperand for mimg instructions so that they aren't assumed to 3177 // be ordered memory instuctions. 3178 3179 return BB; 3180 } 3181 3182 switch (MI.getOpcode()) { 3183 case AMDGPU::S_ADD_U64_PSEUDO: 3184 case AMDGPU::S_SUB_U64_PSEUDO: { 3185 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 3186 const DebugLoc &DL = MI.getDebugLoc(); 3187 3188 MachineOperand &Dest = MI.getOperand(0); 3189 MachineOperand &Src0 = MI.getOperand(1); 3190 MachineOperand &Src1 = MI.getOperand(2); 3191 3192 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3193 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3194 3195 MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, 3196 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0, 3197 &AMDGPU::SReg_32_XM0RegClass); 3198 MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, 3199 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1, 3200 &AMDGPU::SReg_32_XM0RegClass); 3201 3202 MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, 3203 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0, 3204 &AMDGPU::SReg_32_XM0RegClass); 3205 MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, 3206 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1, 3207 &AMDGPU::SReg_32_XM0RegClass); 3208 3209 bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 3210 3211 unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; 3212 unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; 3213 BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0) 3214 .add(Src0Sub0) 3215 .add(Src1Sub0); 3216 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1) 3217 .add(Src0Sub1) 3218 .add(Src1Sub1); 3219 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) 3220 .addReg(DestSub0) 3221 .addImm(AMDGPU::sub0) 3222 .addReg(DestSub1) 3223 .addImm(AMDGPU::sub1); 3224 MI.eraseFromParent(); 3225 return BB; 3226 } 3227 case AMDGPU::SI_INIT_M0: { 3228 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), 3229 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 3230 .add(MI.getOperand(0)); 3231 MI.eraseFromParent(); 3232 return BB; 3233 } 3234 case AMDGPU::SI_INIT_EXEC: 3235 // This should be before all vector instructions. 3236 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64), 3237 AMDGPU::EXEC) 3238 .addImm(MI.getOperand(0).getImm()); 3239 MI.eraseFromParent(); 3240 return BB; 3241 3242 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: { 3243 // Extract the thread count from an SGPR input and set EXEC accordingly. 3244 // Since BFM can't shift by 64, handle that case with CMP + CMOV. 3245 // 3246 // S_BFE_U32 count, input, {shift, 7} 3247 // S_BFM_B64 exec, count, 0 3248 // S_CMP_EQ_U32 count, 64 3249 // S_CMOV_B64 exec, -1 3250 MachineInstr *FirstMI = &*BB->begin(); 3251 MachineRegisterInfo &MRI = MF->getRegInfo(); 3252 unsigned InputReg = MI.getOperand(0).getReg(); 3253 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3254 bool Found = false; 3255 3256 // Move the COPY of the input reg to the beginning, so that we can use it. 3257 for (auto I = BB->begin(); I != &MI; I++) { 3258 if (I->getOpcode() != TargetOpcode::COPY || 3259 I->getOperand(0).getReg() != InputReg) 3260 continue; 3261 3262 if (I == FirstMI) { 3263 FirstMI = &*++BB->begin(); 3264 } else { 3265 I->removeFromParent(); 3266 BB->insert(FirstMI, &*I); 3267 } 3268 Found = true; 3269 break; 3270 } 3271 assert(Found); 3272 (void)Found; 3273 3274 // This should be before all vector instructions. 3275 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg) 3276 .addReg(InputReg) 3277 .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000); 3278 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64), 3279 AMDGPU::EXEC) 3280 .addReg(CountReg) 3281 .addImm(0); 3282 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32)) 3283 .addReg(CountReg, RegState::Kill) 3284 .addImm(64); 3285 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64), 3286 AMDGPU::EXEC) 3287 .addImm(-1); 3288 MI.eraseFromParent(); 3289 return BB; 3290 } 3291 3292 case AMDGPU::GET_GROUPSTATICSIZE: { 3293 DebugLoc DL = MI.getDebugLoc(); 3294 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) 3295 .add(MI.getOperand(0)) 3296 .addImm(MFI->getLDSSize()); 3297 MI.eraseFromParent(); 3298 return BB; 3299 } 3300 case AMDGPU::SI_INDIRECT_SRC_V1: 3301 case AMDGPU::SI_INDIRECT_SRC_V2: 3302 case AMDGPU::SI_INDIRECT_SRC_V4: 3303 case AMDGPU::SI_INDIRECT_SRC_V8: 3304 case AMDGPU::SI_INDIRECT_SRC_V16: 3305 return emitIndirectSrc(MI, *BB, *getSubtarget()); 3306 case AMDGPU::SI_INDIRECT_DST_V1: 3307 case AMDGPU::SI_INDIRECT_DST_V2: 3308 case AMDGPU::SI_INDIRECT_DST_V4: 3309 case AMDGPU::SI_INDIRECT_DST_V8: 3310 case AMDGPU::SI_INDIRECT_DST_V16: 3311 return emitIndirectDst(MI, *BB, *getSubtarget()); 3312 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 3313 case AMDGPU::SI_KILL_I1_PSEUDO: 3314 return splitKillBlock(MI, BB); 3315 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 3316 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 3317 3318 unsigned Dst = MI.getOperand(0).getReg(); 3319 unsigned Src0 = MI.getOperand(1).getReg(); 3320 unsigned Src1 = MI.getOperand(2).getReg(); 3321 const DebugLoc &DL = MI.getDebugLoc(); 3322 unsigned SrcCond = MI.getOperand(3).getReg(); 3323 3324 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3325 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3326 unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 3327 3328 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy) 3329 .addReg(SrcCond); 3330 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 3331 .addReg(Src0, 0, AMDGPU::sub0) 3332 .addReg(Src1, 0, AMDGPU::sub0) 3333 .addReg(SrcCondCopy); 3334 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 3335 .addReg(Src0, 0, AMDGPU::sub1) 3336 .addReg(Src1, 0, AMDGPU::sub1) 3337 .addReg(SrcCondCopy); 3338 3339 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) 3340 .addReg(DstLo) 3341 .addImm(AMDGPU::sub0) 3342 .addReg(DstHi) 3343 .addImm(AMDGPU::sub1); 3344 MI.eraseFromParent(); 3345 return BB; 3346 } 3347 case AMDGPU::SI_BR_UNDEF: { 3348 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3349 const DebugLoc &DL = MI.getDebugLoc(); 3350 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) 3351 .add(MI.getOperand(0)); 3352 Br->getOperand(1).setIsUndef(true); // read undef SCC 3353 MI.eraseFromParent(); 3354 return BB; 3355 } 3356 case AMDGPU::ADJCALLSTACKUP: 3357 case AMDGPU::ADJCALLSTACKDOWN: { 3358 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 3359 MachineInstrBuilder MIB(*MF, &MI); 3360 MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine) 3361 .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit); 3362 return BB; 3363 } 3364 case AMDGPU::SI_CALL_ISEL: 3365 case AMDGPU::SI_TCRETURN_ISEL: { 3366 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3367 const DebugLoc &DL = MI.getDebugLoc(); 3368 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF); 3369 3370 MachineRegisterInfo &MRI = MF->getRegInfo(); 3371 unsigned GlobalAddrReg = MI.getOperand(0).getReg(); 3372 MachineInstr *PCRel = MRI.getVRegDef(GlobalAddrReg); 3373 assert(PCRel->getOpcode() == AMDGPU::SI_PC_ADD_REL_OFFSET); 3374 3375 const GlobalValue *G = PCRel->getOperand(1).getGlobal(); 3376 3377 MachineInstrBuilder MIB; 3378 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) { 3379 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg) 3380 .add(MI.getOperand(0)) 3381 .addGlobalAddress(G); 3382 } else { 3383 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_TCRETURN)) 3384 .add(MI.getOperand(0)) 3385 .addGlobalAddress(G); 3386 3387 // There is an additional imm operand for tcreturn, but it should be in the 3388 // right place already. 3389 } 3390 3391 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) 3392 MIB.add(MI.getOperand(I)); 3393 3394 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 3395 MI.eraseFromParent(); 3396 return BB; 3397 } 3398 default: 3399 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 3400 } 3401 } 3402 3403 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const { 3404 return isTypeLegal(VT.getScalarType()); 3405 } 3406 3407 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { 3408 // This currently forces unfolding various combinations of fsub into fma with 3409 // free fneg'd operands. As long as we have fast FMA (controlled by 3410 // isFMAFasterThanFMulAndFAdd), we should perform these. 3411 3412 // When fma is quarter rate, for f64 where add / sub are at best half rate, 3413 // most of these combines appear to be cycle neutral but save on instruction 3414 // count / code size. 3415 return true; 3416 } 3417 3418 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, 3419 EVT VT) const { 3420 if (!VT.isVector()) { 3421 return MVT::i1; 3422 } 3423 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); 3424 } 3425 3426 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { 3427 // TODO: Should i16 be used always if legal? For now it would force VALU 3428 // shifts. 3429 return (VT == MVT::i16) ? MVT::i16 : MVT::i32; 3430 } 3431 3432 // Answering this is somewhat tricky and depends on the specific device which 3433 // have different rates for fma or all f64 operations. 3434 // 3435 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other 3436 // regardless of which device (although the number of cycles differs between 3437 // devices), so it is always profitable for f64. 3438 // 3439 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable 3440 // only on full rate devices. Normally, we should prefer selecting v_mad_f32 3441 // which we can always do even without fused FP ops since it returns the same 3442 // result as the separate operations and since it is always full 3443 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 3444 // however does not support denormals, so we do report fma as faster if we have 3445 // a fast fma device and require denormals. 3446 // 3447 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 3448 VT = VT.getScalarType(); 3449 3450 switch (VT.getSimpleVT().SimpleTy) { 3451 case MVT::f32: 3452 // This is as fast on some subtargets. However, we always have full rate f32 3453 // mad available which returns the same result as the separate operations 3454 // which we should prefer over fma. We can't use this if we want to support 3455 // denormals, so only report this in these cases. 3456 return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32(); 3457 case MVT::f64: 3458 return true; 3459 case MVT::f16: 3460 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals(); 3461 default: 3462 break; 3463 } 3464 3465 return false; 3466 } 3467 3468 //===----------------------------------------------------------------------===// 3469 // Custom DAG Lowering Operations 3470 //===----------------------------------------------------------------------===// 3471 3472 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 3473 switch (Op.getOpcode()) { 3474 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 3475 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 3476 case ISD::LOAD: { 3477 SDValue Result = LowerLOAD(Op, DAG); 3478 assert((!Result.getNode() || 3479 Result.getNode()->getNumValues() == 2) && 3480 "Load should return a value and a chain"); 3481 return Result; 3482 } 3483 3484 case ISD::FSIN: 3485 case ISD::FCOS: 3486 return LowerTrig(Op, DAG); 3487 case ISD::SELECT: return LowerSELECT(Op, DAG); 3488 case ISD::FDIV: return LowerFDIV(Op, DAG); 3489 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); 3490 case ISD::STORE: return LowerSTORE(Op, DAG); 3491 case ISD::GlobalAddress: { 3492 MachineFunction &MF = DAG.getMachineFunction(); 3493 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 3494 return LowerGlobalAddress(MFI, Op, DAG); 3495 } 3496 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 3497 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 3498 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 3499 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); 3500 case ISD::INSERT_VECTOR_ELT: 3501 return lowerINSERT_VECTOR_ELT(Op, DAG); 3502 case ISD::EXTRACT_VECTOR_ELT: 3503 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 3504 case ISD::FP_ROUND: 3505 return lowerFP_ROUND(Op, DAG); 3506 case ISD::TRAP: 3507 case ISD::DEBUGTRAP: 3508 return lowerTRAP(Op, DAG); 3509 } 3510 return SDValue(); 3511 } 3512 3513 static SDValue adjustLoadValueType(SDValue Result, EVT LoadVT, SDLoc DL, 3514 SelectionDAG &DAG, bool Unpacked) { 3515 if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16. 3516 // Truncate to v2i16/v4i16. 3517 EVT IntLoadVT = LoadVT.changeTypeToInteger(); 3518 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, IntLoadVT, Result); 3519 // Bitcast to original type (v2f16/v4f16). 3520 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Trunc); 3521 } 3522 // Cast back to the original packed type. 3523 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result); 3524 } 3525 3526 // This is to lower INTRINSIC_W_CHAIN with illegal result types. 3527 SDValue SITargetLowering::lowerIntrinsicWChain_IllegalReturnType(SDValue Op, 3528 SDValue &Chain, SelectionDAG &DAG) const { 3529 EVT LoadVT = Op.getValueType(); 3530 // TODO: handle v3f16. 3531 if (LoadVT != MVT::v2f16 && LoadVT != MVT::v4f16) 3532 return SDValue(); 3533 3534 bool Unpacked = Subtarget->hasUnpackedD16VMem(); 3535 EVT UnpackedLoadVT = (LoadVT == MVT::v2f16) ? MVT::v2i32 : MVT::v4i32; 3536 EVT EquivLoadVT = Unpacked ? UnpackedLoadVT : 3537 getEquivalentMemType(*DAG.getContext(), LoadVT); 3538 // Change from v4f16/v2f16 to EquivLoadVT. 3539 SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other); 3540 3541 SDValue Res; 3542 SDLoc DL(Op); 3543 MemSDNode *M = cast<MemSDNode>(Op); 3544 unsigned IID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 3545 switch (IID) { 3546 case Intrinsic::amdgcn_tbuffer_load: { 3547 SDValue Ops[] = { 3548 Op.getOperand(0), // Chain 3549 Op.getOperand(2), // rsrc 3550 Op.getOperand(3), // vindex 3551 Op.getOperand(4), // voffset 3552 Op.getOperand(5), // soffset 3553 Op.getOperand(6), // offset 3554 Op.getOperand(7), // dfmt 3555 Op.getOperand(8), // nfmt 3556 Op.getOperand(9), // glc 3557 Op.getOperand(10) // slc 3558 }; 3559 Res = DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, DL, 3560 VTList, Ops, M->getMemoryVT(), 3561 M->getMemOperand()); 3562 Chain = Res.getValue(1); 3563 return adjustLoadValueType(Res, LoadVT, DL, DAG, Unpacked); 3564 } 3565 case Intrinsic::amdgcn_buffer_load_format: { 3566 SDValue Ops[] = { 3567 Op.getOperand(0), // Chain 3568 Op.getOperand(2), // rsrc 3569 Op.getOperand(3), // vindex 3570 Op.getOperand(4), // offset 3571 Op.getOperand(5), // glc 3572 Op.getOperand(6) // slc 3573 }; 3574 Res = DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, 3575 DL, VTList, Ops, M->getMemoryVT(), 3576 M->getMemOperand()); 3577 Chain = Res.getValue(1); 3578 return adjustLoadValueType(Res, LoadVT, DL, DAG, Unpacked); 3579 } 3580 default: 3581 return SDValue(); 3582 } 3583 } 3584 3585 void SITargetLowering::ReplaceNodeResults(SDNode *N, 3586 SmallVectorImpl<SDValue> &Results, 3587 SelectionDAG &DAG) const { 3588 switch (N->getOpcode()) { 3589 case ISD::INSERT_VECTOR_ELT: { 3590 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) 3591 Results.push_back(Res); 3592 return; 3593 } 3594 case ISD::EXTRACT_VECTOR_ELT: { 3595 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) 3596 Results.push_back(Res); 3597 return; 3598 } 3599 case ISD::INTRINSIC_WO_CHAIN: { 3600 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 3601 if (IID == Intrinsic::amdgcn_cvt_pkrtz) { 3602 SDValue Src0 = N->getOperand(1); 3603 SDValue Src1 = N->getOperand(2); 3604 SDLoc SL(N); 3605 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32, 3606 Src0, Src1); 3607 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt)); 3608 return; 3609 } 3610 break; 3611 } 3612 case ISD::INTRINSIC_W_CHAIN: { 3613 SDValue Chain; 3614 if (SDValue Res = lowerIntrinsicWChain_IllegalReturnType(SDValue(N, 0), 3615 Chain, DAG)) { 3616 Results.push_back(Res); 3617 Results.push_back(Chain); 3618 return; 3619 } 3620 break; 3621 } 3622 case ISD::SELECT: { 3623 SDLoc SL(N); 3624 EVT VT = N->getValueType(0); 3625 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 3626 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1)); 3627 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2)); 3628 3629 EVT SelectVT = NewVT; 3630 if (NewVT.bitsLT(MVT::i32)) { 3631 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS); 3632 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS); 3633 SelectVT = MVT::i32; 3634 } 3635 3636 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT, 3637 N->getOperand(0), LHS, RHS); 3638 3639 if (NewVT != SelectVT) 3640 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect); 3641 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect)); 3642 return; 3643 } 3644 default: 3645 break; 3646 } 3647 } 3648 3649 /// \brief Helper function for LowerBRCOND 3650 static SDNode *findUser(SDValue Value, unsigned Opcode) { 3651 3652 SDNode *Parent = Value.getNode(); 3653 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); 3654 I != E; ++I) { 3655 3656 if (I.getUse().get() != Value) 3657 continue; 3658 3659 if (I->getOpcode() == Opcode) 3660 return *I; 3661 } 3662 return nullptr; 3663 } 3664 3665 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { 3666 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 3667 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { 3668 case Intrinsic::amdgcn_if: 3669 return AMDGPUISD::IF; 3670 case Intrinsic::amdgcn_else: 3671 return AMDGPUISD::ELSE; 3672 case Intrinsic::amdgcn_loop: 3673 return AMDGPUISD::LOOP; 3674 case Intrinsic::amdgcn_end_cf: 3675 llvm_unreachable("should not occur"); 3676 default: 3677 return 0; 3678 } 3679 } 3680 3681 // break, if_break, else_break are all only used as inputs to loop, not 3682 // directly as branch conditions. 3683 return 0; 3684 } 3685 3686 void SITargetLowering::createDebuggerPrologueStackObjects( 3687 MachineFunction &MF) const { 3688 // Create stack objects that are used for emitting debugger prologue. 3689 // 3690 // Debugger prologue writes work group IDs and work item IDs to scratch memory 3691 // at fixed location in the following format: 3692 // offset 0: work group ID x 3693 // offset 4: work group ID y 3694 // offset 8: work group ID z 3695 // offset 16: work item ID x 3696 // offset 20: work item ID y 3697 // offset 24: work item ID z 3698 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 3699 int ObjectIdx = 0; 3700 3701 // For each dimension: 3702 for (unsigned i = 0; i < 3; ++i) { 3703 // Create fixed stack object for work group ID. 3704 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true); 3705 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx); 3706 // Create fixed stack object for work item ID. 3707 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true); 3708 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx); 3709 } 3710 } 3711 3712 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { 3713 const Triple &TT = getTargetMachine().getTargetTriple(); 3714 return GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS && 3715 AMDGPU::shouldEmitConstantsToTextSection(TT); 3716 } 3717 3718 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { 3719 return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS || 3720 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) && 3721 !shouldEmitFixup(GV) && 3722 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 3723 } 3724 3725 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { 3726 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); 3727 } 3728 3729 /// This transforms the control flow intrinsics to get the branch destination as 3730 /// last parameter, also switches branch target with BR if the need arise 3731 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, 3732 SelectionDAG &DAG) const { 3733 SDLoc DL(BRCOND); 3734 3735 SDNode *Intr = BRCOND.getOperand(1).getNode(); 3736 SDValue Target = BRCOND.getOperand(2); 3737 SDNode *BR = nullptr; 3738 SDNode *SetCC = nullptr; 3739 3740 if (Intr->getOpcode() == ISD::SETCC) { 3741 // As long as we negate the condition everything is fine 3742 SetCC = Intr; 3743 Intr = SetCC->getOperand(0).getNode(); 3744 3745 } else { 3746 // Get the target from BR if we don't negate the condition 3747 BR = findUser(BRCOND, ISD::BR); 3748 Target = BR->getOperand(1); 3749 } 3750 3751 // FIXME: This changes the types of the intrinsics instead of introducing new 3752 // nodes with the correct types. 3753 // e.g. llvm.amdgcn.loop 3754 3755 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3 3756 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088> 3757 3758 unsigned CFNode = isCFIntrinsic(Intr); 3759 if (CFNode == 0) { 3760 // This is a uniform branch so we don't need to legalize. 3761 return BRCOND; 3762 } 3763 3764 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || 3765 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; 3766 3767 assert(!SetCC || 3768 (SetCC->getConstantOperandVal(1) == 1 && 3769 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == 3770 ISD::SETNE)); 3771 3772 // operands of the new intrinsic call 3773 SmallVector<SDValue, 4> Ops; 3774 if (HaveChain) 3775 Ops.push_back(BRCOND.getOperand(0)); 3776 3777 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end()); 3778 Ops.push_back(Target); 3779 3780 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); 3781 3782 // build the new intrinsic call 3783 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode(); 3784 3785 if (!HaveChain) { 3786 SDValue Ops[] = { 3787 SDValue(Result, 0), 3788 BRCOND.getOperand(0) 3789 }; 3790 3791 Result = DAG.getMergeValues(Ops, DL).getNode(); 3792 } 3793 3794 if (BR) { 3795 // Give the branch instruction our target 3796 SDValue Ops[] = { 3797 BR->getOperand(0), 3798 BRCOND.getOperand(2) 3799 }; 3800 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); 3801 DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); 3802 BR = NewBR.getNode(); 3803 } 3804 3805 SDValue Chain = SDValue(Result, Result->getNumValues() - 1); 3806 3807 // Copy the intrinsic results to registers 3808 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { 3809 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); 3810 if (!CopyToReg) 3811 continue; 3812 3813 Chain = DAG.getCopyToReg( 3814 Chain, DL, 3815 CopyToReg->getOperand(1), 3816 SDValue(Result, i - 1), 3817 SDValue()); 3818 3819 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); 3820 } 3821 3822 // Remove the old intrinsic from the chain 3823 DAG.ReplaceAllUsesOfValueWith( 3824 SDValue(Intr, Intr->getNumValues() - 1), 3825 Intr->getOperand(0)); 3826 3827 return Chain; 3828 } 3829 3830 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG, 3831 SDValue Op, 3832 const SDLoc &DL, 3833 EVT VT) const { 3834 return Op.getValueType().bitsLE(VT) ? 3835 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : 3836 DAG.getNode(ISD::FTRUNC, DL, VT, Op); 3837 } 3838 3839 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 3840 assert(Op.getValueType() == MVT::f16 && 3841 "Do not know how to custom lower FP_ROUND for non-f16 type"); 3842 3843 SDValue Src = Op.getOperand(0); 3844 EVT SrcVT = Src.getValueType(); 3845 if (SrcVT != MVT::f64) 3846 return Op; 3847 3848 SDLoc DL(Op); 3849 3850 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); 3851 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); 3852 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc); 3853 } 3854 3855 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { 3856 SDLoc SL(Op); 3857 MachineFunction &MF = DAG.getMachineFunction(); 3858 SDValue Chain = Op.getOperand(0); 3859 3860 unsigned TrapID = Op.getOpcode() == ISD::DEBUGTRAP ? 3861 SISubtarget::TrapIDLLVMDebugTrap : SISubtarget::TrapIDLLVMTrap; 3862 3863 if (Subtarget->getTrapHandlerAbi() == SISubtarget::TrapHandlerAbiHsa && 3864 Subtarget->isTrapHandlerEnabled()) { 3865 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 3866 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 3867 assert(UserSGPR != AMDGPU::NoRegister); 3868 3869 SDValue QueuePtr = CreateLiveInRegister( 3870 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 3871 3872 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64); 3873 3874 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01, 3875 QueuePtr, SDValue()); 3876 3877 SDValue Ops[] = { 3878 ToReg, 3879 DAG.getTargetConstant(TrapID, SL, MVT::i16), 3880 SGPR01, 3881 ToReg.getValue(1) 3882 }; 3883 3884 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); 3885 } 3886 3887 switch (TrapID) { 3888 case SISubtarget::TrapIDLLVMTrap: 3889 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain); 3890 case SISubtarget::TrapIDLLVMDebugTrap: { 3891 DiagnosticInfoUnsupported NoTrap(MF.getFunction(), 3892 "debugtrap handler not supported", 3893 Op.getDebugLoc(), 3894 DS_Warning); 3895 LLVMContext &Ctx = MF.getFunction().getContext(); 3896 Ctx.diagnose(NoTrap); 3897 return Chain; 3898 } 3899 default: 3900 llvm_unreachable("unsupported trap handler type!"); 3901 } 3902 3903 return Chain; 3904 } 3905 3906 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL, 3907 SelectionDAG &DAG) const { 3908 // FIXME: Use inline constants (src_{shared, private}_base) instead. 3909 if (Subtarget->hasApertureRegs()) { 3910 unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ? 3911 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE : 3912 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE; 3913 unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ? 3914 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE : 3915 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE; 3916 unsigned Encoding = 3917 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ | 3918 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ | 3919 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_; 3920 3921 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16); 3922 SDValue ApertureReg = SDValue( 3923 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0); 3924 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32); 3925 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount); 3926 } 3927 3928 MachineFunction &MF = DAG.getMachineFunction(); 3929 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 3930 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 3931 assert(UserSGPR != AMDGPU::NoRegister); 3932 3933 SDValue QueuePtr = CreateLiveInRegister( 3934 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 3935 3936 // Offset into amd_queue_t for group_segment_aperture_base_hi / 3937 // private_segment_aperture_base_hi. 3938 uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44; 3939 3940 SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset); 3941 3942 // TODO: Use custom target PseudoSourceValue. 3943 // TODO: We should use the value from the IR intrinsic call, but it might not 3944 // be available and how do we get it? 3945 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()), 3946 AMDGPUASI.CONSTANT_ADDRESS)); 3947 3948 MachinePointerInfo PtrInfo(V, StructOffset); 3949 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo, 3950 MinAlign(64, StructOffset), 3951 MachineMemOperand::MODereferenceable | 3952 MachineMemOperand::MOInvariant); 3953 } 3954 3955 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, 3956 SelectionDAG &DAG) const { 3957 SDLoc SL(Op); 3958 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); 3959 3960 SDValue Src = ASC->getOperand(0); 3961 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); 3962 3963 const AMDGPUTargetMachine &TM = 3964 static_cast<const AMDGPUTargetMachine &>(getTargetMachine()); 3965 3966 // flat -> local/private 3967 if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) { 3968 unsigned DestAS = ASC->getDestAddressSpace(); 3969 3970 if (DestAS == AMDGPUASI.LOCAL_ADDRESS || 3971 DestAS == AMDGPUASI.PRIVATE_ADDRESS) { 3972 unsigned NullVal = TM.getNullPointerValue(DestAS); 3973 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 3974 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); 3975 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); 3976 3977 return DAG.getNode(ISD::SELECT, SL, MVT::i32, 3978 NonNull, Ptr, SegmentNullPtr); 3979 } 3980 } 3981 3982 // local/private -> flat 3983 if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) { 3984 unsigned SrcAS = ASC->getSrcAddressSpace(); 3985 3986 if (SrcAS == AMDGPUASI.LOCAL_ADDRESS || 3987 SrcAS == AMDGPUASI.PRIVATE_ADDRESS) { 3988 unsigned NullVal = TM.getNullPointerValue(SrcAS); 3989 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 3990 3991 SDValue NonNull 3992 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); 3993 3994 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG); 3995 SDValue CvtPtr 3996 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); 3997 3998 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, 3999 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), 4000 FlatNullPtr); 4001 } 4002 } 4003 4004 // global <-> flat are no-ops and never emitted. 4005 4006 const MachineFunction &MF = DAG.getMachineFunction(); 4007 DiagnosticInfoUnsupported InvalidAddrSpaceCast( 4008 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); 4009 DAG.getContext()->diagnose(InvalidAddrSpaceCast); 4010 4011 return DAG.getUNDEF(ASC->getValueType(0)); 4012 } 4013 4014 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 4015 SelectionDAG &DAG) const { 4016 SDValue Idx = Op.getOperand(2); 4017 if (isa<ConstantSDNode>(Idx)) 4018 return SDValue(); 4019 4020 // Avoid stack access for dynamic indexing. 4021 SDLoc SL(Op); 4022 SDValue Vec = Op.getOperand(0); 4023 SDValue Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Op.getOperand(1)); 4024 4025 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec 4026 SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Val); 4027 4028 // Convert vector index to bit-index. 4029 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, 4030 DAG.getConstant(16, SL, MVT::i32)); 4031 4032 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 4033 4034 SDValue BFM = DAG.getNode(ISD::SHL, SL, MVT::i32, 4035 DAG.getConstant(0xffff, SL, MVT::i32), 4036 ScaledIdx); 4037 4038 SDValue LHS = DAG.getNode(ISD::AND, SL, MVT::i32, BFM, ExtVal); 4039 SDValue RHS = DAG.getNode(ISD::AND, SL, MVT::i32, 4040 DAG.getNOT(SL, BFM, MVT::i32), BCVec); 4041 4042 SDValue BFI = DAG.getNode(ISD::OR, SL, MVT::i32, LHS, RHS); 4043 return DAG.getNode(ISD::BITCAST, SL, Op.getValueType(), BFI); 4044 } 4045 4046 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 4047 SelectionDAG &DAG) const { 4048 SDLoc SL(Op); 4049 4050 EVT ResultVT = Op.getValueType(); 4051 SDValue Vec = Op.getOperand(0); 4052 SDValue Idx = Op.getOperand(1); 4053 4054 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); 4055 4056 // Make sure we we do any optimizations that will make it easier to fold 4057 // source modifiers before obscuring it with bit operations. 4058 4059 // XXX - Why doesn't this get called when vector_shuffle is expanded? 4060 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI)) 4061 return Combined; 4062 4063 if (const ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) { 4064 SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 4065 4066 if (CIdx->getZExtValue() == 1) { 4067 Result = DAG.getNode(ISD::SRL, SL, MVT::i32, Result, 4068 DAG.getConstant(16, SL, MVT::i32)); 4069 } else { 4070 assert(CIdx->getZExtValue() == 0); 4071 } 4072 4073 if (ResultVT.bitsLT(MVT::i32)) 4074 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result); 4075 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 4076 } 4077 4078 SDValue Sixteen = DAG.getConstant(16, SL, MVT::i32); 4079 4080 // Convert vector index to bit-index. 4081 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, Sixteen); 4082 4083 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 4084 SDValue Elt = DAG.getNode(ISD::SRL, SL, MVT::i32, BC, ScaledIdx); 4085 4086 SDValue Result = Elt; 4087 if (ResultVT.bitsLT(MVT::i32)) 4088 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result); 4089 4090 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 4091 } 4092 4093 bool 4094 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 4095 // We can fold offsets for anything that doesn't require a GOT relocation. 4096 return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS || 4097 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) && 4098 !shouldEmitGOTReloc(GA->getGlobal()); 4099 } 4100 4101 static SDValue 4102 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, 4103 const SDLoc &DL, unsigned Offset, EVT PtrVT, 4104 unsigned GAFlags = SIInstrInfo::MO_NONE) { 4105 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is 4106 // lowered to the following code sequence: 4107 // 4108 // For constant address space: 4109 // s_getpc_b64 s[0:1] 4110 // s_add_u32 s0, s0, $symbol 4111 // s_addc_u32 s1, s1, 0 4112 // 4113 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 4114 // a fixup or relocation is emitted to replace $symbol with a literal 4115 // constant, which is a pc-relative offset from the encoding of the $symbol 4116 // operand to the global variable. 4117 // 4118 // For global address space: 4119 // s_getpc_b64 s[0:1] 4120 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo 4121 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi 4122 // 4123 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 4124 // fixups or relocations are emitted to replace $symbol@*@lo and 4125 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, 4126 // which is a 64-bit pc-relative offset from the encoding of the $symbol 4127 // operand to the global variable. 4128 // 4129 // What we want here is an offset from the value returned by s_getpc 4130 // (which is the address of the s_add_u32 instruction) to the global 4131 // variable, but since the encoding of $symbol starts 4 bytes after the start 4132 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too 4133 // small. This requires us to add 4 to the global variable offset in order to 4134 // compute the correct address. 4135 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 4136 GAFlags); 4137 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 4138 GAFlags == SIInstrInfo::MO_NONE ? 4139 GAFlags : GAFlags + 1); 4140 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); 4141 } 4142 4143 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 4144 SDValue Op, 4145 SelectionDAG &DAG) const { 4146 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 4147 const GlobalValue *GV = GSD->getGlobal(); 4148 4149 if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS && 4150 GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS && 4151 // FIXME: It isn't correct to rely on the type of the pointer. This should 4152 // be removed when address space 0 is 64-bit. 4153 !GV->getType()->getElementType()->isFunctionTy()) 4154 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 4155 4156 SDLoc DL(GSD); 4157 EVT PtrVT = Op.getValueType(); 4158 4159 if (shouldEmitFixup(GV)) 4160 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); 4161 else if (shouldEmitPCReloc(GV)) 4162 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, 4163 SIInstrInfo::MO_REL32); 4164 4165 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, 4166 SIInstrInfo::MO_GOTPCREL32); 4167 4168 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); 4169 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS); 4170 const DataLayout &DataLayout = DAG.getDataLayout(); 4171 unsigned Align = DataLayout.getABITypeAlignment(PtrTy); 4172 // FIXME: Use a PseudoSourceValue once those can be assigned an address space. 4173 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 4174 4175 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align, 4176 MachineMemOperand::MODereferenceable | 4177 MachineMemOperand::MOInvariant); 4178 } 4179 4180 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, 4181 const SDLoc &DL, SDValue V) const { 4182 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as 4183 // the destination register. 4184 // 4185 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, 4186 // so we will end up with redundant moves to m0. 4187 // 4188 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. 4189 4190 // A Null SDValue creates a glue result. 4191 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, 4192 V, Chain); 4193 return SDValue(M0, 0); 4194 } 4195 4196 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, 4197 SDValue Op, 4198 MVT VT, 4199 unsigned Offset) const { 4200 SDLoc SL(Op); 4201 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL, 4202 DAG.getEntryNode(), Offset, false); 4203 // The local size values will have the hi 16-bits as zero. 4204 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, 4205 DAG.getValueType(VT)); 4206 } 4207 4208 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 4209 EVT VT) { 4210 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), 4211 "non-hsa intrinsic with hsa target", 4212 DL.getDebugLoc()); 4213 DAG.getContext()->diagnose(BadIntrin); 4214 return DAG.getUNDEF(VT); 4215 } 4216 4217 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 4218 EVT VT) { 4219 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), 4220 "intrinsic not supported on subtarget", 4221 DL.getDebugLoc()); 4222 DAG.getContext()->diagnose(BadIntrin); 4223 return DAG.getUNDEF(VT); 4224 } 4225 4226 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 4227 SelectionDAG &DAG) const { 4228 MachineFunction &MF = DAG.getMachineFunction(); 4229 auto MFI = MF.getInfo<SIMachineFunctionInfo>(); 4230 4231 EVT VT = Op.getValueType(); 4232 SDLoc DL(Op); 4233 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4234 4235 // TODO: Should this propagate fast-math-flags? 4236 4237 switch (IntrinsicID) { 4238 case Intrinsic::amdgcn_implicit_buffer_ptr: { 4239 if (getSubtarget()->isAmdCodeObjectV2(MF)) 4240 return emitNonHSAIntrinsicError(DAG, DL, VT); 4241 return getPreloadedValue(DAG, *MFI, VT, 4242 AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR); 4243 } 4244 case Intrinsic::amdgcn_dispatch_ptr: 4245 case Intrinsic::amdgcn_queue_ptr: { 4246 if (!Subtarget->isAmdCodeObjectV2(MF)) { 4247 DiagnosticInfoUnsupported BadIntrin( 4248 MF.getFunction(), "unsupported hsa intrinsic without hsa target", 4249 DL.getDebugLoc()); 4250 DAG.getContext()->diagnose(BadIntrin); 4251 return DAG.getUNDEF(VT); 4252 } 4253 4254 auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? 4255 AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR; 4256 return getPreloadedValue(DAG, *MFI, VT, RegID); 4257 } 4258 case Intrinsic::amdgcn_implicitarg_ptr: { 4259 if (MFI->isEntryFunction()) 4260 return getImplicitArgPtr(DAG, DL); 4261 return getPreloadedValue(DAG, *MFI, VT, 4262 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); 4263 } 4264 case Intrinsic::amdgcn_kernarg_segment_ptr: { 4265 return getPreloadedValue(DAG, *MFI, VT, 4266 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 4267 } 4268 case Intrinsic::amdgcn_dispatch_id: { 4269 return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID); 4270 } 4271 case Intrinsic::amdgcn_rcp: 4272 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); 4273 case Intrinsic::amdgcn_rsq: 4274 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 4275 case Intrinsic::amdgcn_rsq_legacy: 4276 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 4277 return emitRemovedIntrinsicError(DAG, DL, VT); 4278 4279 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); 4280 case Intrinsic::amdgcn_rcp_legacy: 4281 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 4282 return emitRemovedIntrinsicError(DAG, DL, VT); 4283 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); 4284 case Intrinsic::amdgcn_rsq_clamp: { 4285 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 4286 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 4287 4288 Type *Type = VT.getTypeForEVT(*DAG.getContext()); 4289 APFloat Max = APFloat::getLargest(Type->getFltSemantics()); 4290 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); 4291 4292 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 4293 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, 4294 DAG.getConstantFP(Max, DL, VT)); 4295 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, 4296 DAG.getConstantFP(Min, DL, VT)); 4297 } 4298 case Intrinsic::r600_read_ngroups_x: 4299 if (Subtarget->isAmdHsaOS()) 4300 return emitNonHSAIntrinsicError(DAG, DL, VT); 4301 4302 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4303 SI::KernelInputOffsets::NGROUPS_X, false); 4304 case Intrinsic::r600_read_ngroups_y: 4305 if (Subtarget->isAmdHsaOS()) 4306 return emitNonHSAIntrinsicError(DAG, DL, VT); 4307 4308 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4309 SI::KernelInputOffsets::NGROUPS_Y, false); 4310 case Intrinsic::r600_read_ngroups_z: 4311 if (Subtarget->isAmdHsaOS()) 4312 return emitNonHSAIntrinsicError(DAG, DL, VT); 4313 4314 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4315 SI::KernelInputOffsets::NGROUPS_Z, false); 4316 case Intrinsic::r600_read_global_size_x: 4317 if (Subtarget->isAmdHsaOS()) 4318 return emitNonHSAIntrinsicError(DAG, DL, VT); 4319 4320 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4321 SI::KernelInputOffsets::GLOBAL_SIZE_X, false); 4322 case Intrinsic::r600_read_global_size_y: 4323 if (Subtarget->isAmdHsaOS()) 4324 return emitNonHSAIntrinsicError(DAG, DL, VT); 4325 4326 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4327 SI::KernelInputOffsets::GLOBAL_SIZE_Y, false); 4328 case Intrinsic::r600_read_global_size_z: 4329 if (Subtarget->isAmdHsaOS()) 4330 return emitNonHSAIntrinsicError(DAG, DL, VT); 4331 4332 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4333 SI::KernelInputOffsets::GLOBAL_SIZE_Z, false); 4334 case Intrinsic::r600_read_local_size_x: 4335 if (Subtarget->isAmdHsaOS()) 4336 return emitNonHSAIntrinsicError(DAG, DL, VT); 4337 4338 return lowerImplicitZextParam(DAG, Op, MVT::i16, 4339 SI::KernelInputOffsets::LOCAL_SIZE_X); 4340 case Intrinsic::r600_read_local_size_y: 4341 if (Subtarget->isAmdHsaOS()) 4342 return emitNonHSAIntrinsicError(DAG, DL, VT); 4343 4344 return lowerImplicitZextParam(DAG, Op, MVT::i16, 4345 SI::KernelInputOffsets::LOCAL_SIZE_Y); 4346 case Intrinsic::r600_read_local_size_z: 4347 if (Subtarget->isAmdHsaOS()) 4348 return emitNonHSAIntrinsicError(DAG, DL, VT); 4349 4350 return lowerImplicitZextParam(DAG, Op, MVT::i16, 4351 SI::KernelInputOffsets::LOCAL_SIZE_Z); 4352 case Intrinsic::amdgcn_workgroup_id_x: 4353 case Intrinsic::r600_read_tgid_x: 4354 return getPreloadedValue(DAG, *MFI, VT, 4355 AMDGPUFunctionArgInfo::WORKGROUP_ID_X); 4356 case Intrinsic::amdgcn_workgroup_id_y: 4357 case Intrinsic::r600_read_tgid_y: 4358 return getPreloadedValue(DAG, *MFI, VT, 4359 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); 4360 case Intrinsic::amdgcn_workgroup_id_z: 4361 case Intrinsic::r600_read_tgid_z: 4362 return getPreloadedValue(DAG, *MFI, VT, 4363 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); 4364 case Intrinsic::amdgcn_workitem_id_x: { 4365 case Intrinsic::r600_read_tidig_x: 4366 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 4367 SDLoc(DAG.getEntryNode()), 4368 MFI->getArgInfo().WorkItemIDX); 4369 } 4370 case Intrinsic::amdgcn_workitem_id_y: 4371 case Intrinsic::r600_read_tidig_y: 4372 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 4373 SDLoc(DAG.getEntryNode()), 4374 MFI->getArgInfo().WorkItemIDY); 4375 case Intrinsic::amdgcn_workitem_id_z: 4376 case Intrinsic::r600_read_tidig_z: 4377 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 4378 SDLoc(DAG.getEntryNode()), 4379 MFI->getArgInfo().WorkItemIDZ); 4380 case AMDGPUIntrinsic::SI_load_const: { 4381 SDValue Ops[] = { 4382 Op.getOperand(1), 4383 Op.getOperand(2) 4384 }; 4385 4386 MachineMemOperand *MMO = MF.getMachineMemOperand( 4387 MachinePointerInfo(), 4388 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 4389 MachineMemOperand::MOInvariant, 4390 VT.getStoreSize(), 4); 4391 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL, 4392 Op->getVTList(), Ops, VT, MMO); 4393 } 4394 case Intrinsic::amdgcn_fdiv_fast: 4395 return lowerFDIV_FAST(Op, DAG); 4396 case Intrinsic::amdgcn_interp_mov: { 4397 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 4398 SDValue Glue = M0.getValue(1); 4399 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1), 4400 Op.getOperand(2), Op.getOperand(3), Glue); 4401 } 4402 case Intrinsic::amdgcn_interp_p1: { 4403 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 4404 SDValue Glue = M0.getValue(1); 4405 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), 4406 Op.getOperand(2), Op.getOperand(3), Glue); 4407 } 4408 case Intrinsic::amdgcn_interp_p2: { 4409 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 4410 SDValue Glue = SDValue(M0.getNode(), 1); 4411 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), 4412 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), 4413 Glue); 4414 } 4415 case Intrinsic::amdgcn_sin: 4416 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); 4417 4418 case Intrinsic::amdgcn_cos: 4419 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); 4420 4421 case Intrinsic::amdgcn_log_clamp: { 4422 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) 4423 return SDValue(); 4424 4425 DiagnosticInfoUnsupported BadIntrin( 4426 MF.getFunction(), "intrinsic not supported on subtarget", 4427 DL.getDebugLoc()); 4428 DAG.getContext()->diagnose(BadIntrin); 4429 return DAG.getUNDEF(VT); 4430 } 4431 case Intrinsic::amdgcn_ldexp: 4432 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, 4433 Op.getOperand(1), Op.getOperand(2)); 4434 4435 case Intrinsic::amdgcn_fract: 4436 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); 4437 4438 case Intrinsic::amdgcn_class: 4439 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, 4440 Op.getOperand(1), Op.getOperand(2)); 4441 case Intrinsic::amdgcn_div_fmas: 4442 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, 4443 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 4444 Op.getOperand(4)); 4445 4446 case Intrinsic::amdgcn_div_fixup: 4447 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, 4448 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4449 4450 case Intrinsic::amdgcn_trig_preop: 4451 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, 4452 Op.getOperand(1), Op.getOperand(2)); 4453 case Intrinsic::amdgcn_div_scale: { 4454 // 3rd parameter required to be a constant. 4455 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4456 if (!Param) 4457 return DAG.getMergeValues({ DAG.getUNDEF(VT), DAG.getUNDEF(MVT::i1) }, DL); 4458 4459 // Translate to the operands expected by the machine instruction. The 4460 // first parameter must be the same as the first instruction. 4461 SDValue Numerator = Op.getOperand(1); 4462 SDValue Denominator = Op.getOperand(2); 4463 4464 // Note this order is opposite of the machine instruction's operations, 4465 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The 4466 // intrinsic has the numerator as the first operand to match a normal 4467 // division operation. 4468 4469 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; 4470 4471 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, 4472 Denominator, Numerator); 4473 } 4474 case Intrinsic::amdgcn_icmp: { 4475 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4476 if (!CD) 4477 return DAG.getUNDEF(VT); 4478 4479 int CondCode = CD->getSExtValue(); 4480 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || 4481 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE) 4482 return DAG.getUNDEF(VT); 4483 4484 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); 4485 ISD::CondCode CCOpcode = getICmpCondCode(IcInput); 4486 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 4487 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 4488 } 4489 case Intrinsic::amdgcn_fcmp: { 4490 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4491 if (!CD) 4492 return DAG.getUNDEF(VT); 4493 4494 int CondCode = CD->getSExtValue(); 4495 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE || 4496 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) 4497 return DAG.getUNDEF(VT); 4498 4499 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); 4500 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); 4501 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 4502 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 4503 } 4504 case Intrinsic::amdgcn_fmed3: 4505 return DAG.getNode(AMDGPUISD::FMED3, DL, VT, 4506 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4507 case Intrinsic::amdgcn_fmul_legacy: 4508 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, 4509 Op.getOperand(1), Op.getOperand(2)); 4510 case Intrinsic::amdgcn_sffbh: 4511 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); 4512 case Intrinsic::amdgcn_sbfe: 4513 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, 4514 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4515 case Intrinsic::amdgcn_ubfe: 4516 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, 4517 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4518 case Intrinsic::amdgcn_cvt_pkrtz: { 4519 // FIXME: Stop adding cast if v2f16 legal. 4520 EVT VT = Op.getValueType(); 4521 SDValue Node = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, DL, MVT::i32, 4522 Op.getOperand(1), Op.getOperand(2)); 4523 return DAG.getNode(ISD::BITCAST, DL, VT, Node); 4524 } 4525 case Intrinsic::amdgcn_wqm: { 4526 SDValue Src = Op.getOperand(1); 4527 return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src), 4528 0); 4529 } 4530 case Intrinsic::amdgcn_wwm: { 4531 SDValue Src = Op.getOperand(1); 4532 return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src), 4533 0); 4534 } 4535 case Intrinsic::amdgcn_image_getlod: 4536 case Intrinsic::amdgcn_image_getresinfo: { 4537 unsigned Idx = (IntrinsicID == Intrinsic::amdgcn_image_getresinfo) ? 3 : 4; 4538 4539 // Replace dmask with everything disabled with undef. 4540 const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(Idx)); 4541 if (!DMask || DMask->isNullValue()) 4542 return DAG.getUNDEF(Op.getValueType()); 4543 return SDValue(); 4544 } 4545 default: 4546 return Op; 4547 } 4548 } 4549 4550 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 4551 SelectionDAG &DAG) const { 4552 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 4553 SDLoc DL(Op); 4554 4555 switch (IntrID) { 4556 case Intrinsic::amdgcn_atomic_inc: 4557 case Intrinsic::amdgcn_atomic_dec: 4558 case Intrinsic::amdgcn_atomic_fadd: 4559 case Intrinsic::amdgcn_atomic_fmin: 4560 case Intrinsic::amdgcn_atomic_fmax: { 4561 MemSDNode *M = cast<MemSDNode>(Op); 4562 unsigned Opc; 4563 switch (IntrID) { 4564 case Intrinsic::amdgcn_atomic_inc: 4565 Opc = AMDGPUISD::ATOMIC_INC; 4566 break; 4567 case Intrinsic::amdgcn_atomic_dec: 4568 Opc = AMDGPUISD::ATOMIC_DEC; 4569 break; 4570 case Intrinsic::amdgcn_atomic_fadd: 4571 Opc = AMDGPUISD::ATOMIC_LOAD_FADD; 4572 break; 4573 case Intrinsic::amdgcn_atomic_fmin: 4574 Opc = AMDGPUISD::ATOMIC_LOAD_FMIN; 4575 break; 4576 case Intrinsic::amdgcn_atomic_fmax: 4577 Opc = AMDGPUISD::ATOMIC_LOAD_FMAX; 4578 break; 4579 default: 4580 llvm_unreachable("Unknown intrinsic!"); 4581 } 4582 SDValue Ops[] = { 4583 M->getOperand(0), // Chain 4584 M->getOperand(2), // Ptr 4585 M->getOperand(3) // Value 4586 }; 4587 4588 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, 4589 M->getMemoryVT(), M->getMemOperand()); 4590 } 4591 case Intrinsic::amdgcn_buffer_load: 4592 case Intrinsic::amdgcn_buffer_load_format: { 4593 SDValue Ops[] = { 4594 Op.getOperand(0), // Chain 4595 Op.getOperand(2), // rsrc 4596 Op.getOperand(3), // vindex 4597 Op.getOperand(4), // offset 4598 Op.getOperand(5), // glc 4599 Op.getOperand(6) // slc 4600 }; 4601 4602 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? 4603 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; 4604 EVT VT = Op.getValueType(); 4605 EVT IntVT = VT.changeTypeToInteger(); 4606 4607 auto *M = cast<MemSDNode>(Op); 4608 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, 4609 M->getMemOperand()); 4610 } 4611 case Intrinsic::amdgcn_tbuffer_load: { 4612 MemSDNode *M = cast<MemSDNode>(Op); 4613 SDValue Ops[] = { 4614 Op.getOperand(0), // Chain 4615 Op.getOperand(2), // rsrc 4616 Op.getOperand(3), // vindex 4617 Op.getOperand(4), // voffset 4618 Op.getOperand(5), // soffset 4619 Op.getOperand(6), // offset 4620 Op.getOperand(7), // dfmt 4621 Op.getOperand(8), // nfmt 4622 Op.getOperand(9), // glc 4623 Op.getOperand(10) // slc 4624 }; 4625 4626 EVT VT = Op.getValueType(); 4627 4628 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, 4629 Op->getVTList(), Ops, VT, M->getMemOperand()); 4630 } 4631 case Intrinsic::amdgcn_buffer_atomic_swap: 4632 case Intrinsic::amdgcn_buffer_atomic_add: 4633 case Intrinsic::amdgcn_buffer_atomic_sub: 4634 case Intrinsic::amdgcn_buffer_atomic_smin: 4635 case Intrinsic::amdgcn_buffer_atomic_umin: 4636 case Intrinsic::amdgcn_buffer_atomic_smax: 4637 case Intrinsic::amdgcn_buffer_atomic_umax: 4638 case Intrinsic::amdgcn_buffer_atomic_and: 4639 case Intrinsic::amdgcn_buffer_atomic_or: 4640 case Intrinsic::amdgcn_buffer_atomic_xor: { 4641 SDValue Ops[] = { 4642 Op.getOperand(0), // Chain 4643 Op.getOperand(2), // vdata 4644 Op.getOperand(3), // rsrc 4645 Op.getOperand(4), // vindex 4646 Op.getOperand(5), // offset 4647 Op.getOperand(6) // slc 4648 }; 4649 EVT VT = Op.getValueType(); 4650 4651 auto *M = cast<MemSDNode>(Op); 4652 unsigned Opcode = 0; 4653 4654 switch (IntrID) { 4655 case Intrinsic::amdgcn_buffer_atomic_swap: 4656 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; 4657 break; 4658 case Intrinsic::amdgcn_buffer_atomic_add: 4659 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; 4660 break; 4661 case Intrinsic::amdgcn_buffer_atomic_sub: 4662 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; 4663 break; 4664 case Intrinsic::amdgcn_buffer_atomic_smin: 4665 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; 4666 break; 4667 case Intrinsic::amdgcn_buffer_atomic_umin: 4668 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; 4669 break; 4670 case Intrinsic::amdgcn_buffer_atomic_smax: 4671 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; 4672 break; 4673 case Intrinsic::amdgcn_buffer_atomic_umax: 4674 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; 4675 break; 4676 case Intrinsic::amdgcn_buffer_atomic_and: 4677 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; 4678 break; 4679 case Intrinsic::amdgcn_buffer_atomic_or: 4680 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; 4681 break; 4682 case Intrinsic::amdgcn_buffer_atomic_xor: 4683 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; 4684 break; 4685 default: 4686 llvm_unreachable("unhandled atomic opcode"); 4687 } 4688 4689 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, 4690 M->getMemOperand()); 4691 } 4692 4693 case Intrinsic::amdgcn_buffer_atomic_cmpswap: { 4694 SDValue Ops[] = { 4695 Op.getOperand(0), // Chain 4696 Op.getOperand(2), // src 4697 Op.getOperand(3), // cmp 4698 Op.getOperand(4), // rsrc 4699 Op.getOperand(5), // vindex 4700 Op.getOperand(6), // offset 4701 Op.getOperand(7) // slc 4702 }; 4703 EVT VT = Op.getValueType(); 4704 auto *M = cast<MemSDNode>(Op); 4705 4706 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, 4707 Op->getVTList(), Ops, VT, M->getMemOperand()); 4708 } 4709 4710 // Basic sample. 4711 case Intrinsic::amdgcn_image_sample: 4712 case Intrinsic::amdgcn_image_sample_cl: 4713 case Intrinsic::amdgcn_image_sample_d: 4714 case Intrinsic::amdgcn_image_sample_d_cl: 4715 case Intrinsic::amdgcn_image_sample_l: 4716 case Intrinsic::amdgcn_image_sample_b: 4717 case Intrinsic::amdgcn_image_sample_b_cl: 4718 case Intrinsic::amdgcn_image_sample_lz: 4719 case Intrinsic::amdgcn_image_sample_cd: 4720 case Intrinsic::amdgcn_image_sample_cd_cl: 4721 4722 // Sample with comparison. 4723 case Intrinsic::amdgcn_image_sample_c: 4724 case Intrinsic::amdgcn_image_sample_c_cl: 4725 case Intrinsic::amdgcn_image_sample_c_d: 4726 case Intrinsic::amdgcn_image_sample_c_d_cl: 4727 case Intrinsic::amdgcn_image_sample_c_l: 4728 case Intrinsic::amdgcn_image_sample_c_b: 4729 case Intrinsic::amdgcn_image_sample_c_b_cl: 4730 case Intrinsic::amdgcn_image_sample_c_lz: 4731 case Intrinsic::amdgcn_image_sample_c_cd: 4732 case Intrinsic::amdgcn_image_sample_c_cd_cl: 4733 4734 // Sample with offsets. 4735 case Intrinsic::amdgcn_image_sample_o: 4736 case Intrinsic::amdgcn_image_sample_cl_o: 4737 case Intrinsic::amdgcn_image_sample_d_o: 4738 case Intrinsic::amdgcn_image_sample_d_cl_o: 4739 case Intrinsic::amdgcn_image_sample_l_o: 4740 case Intrinsic::amdgcn_image_sample_b_o: 4741 case Intrinsic::amdgcn_image_sample_b_cl_o: 4742 case Intrinsic::amdgcn_image_sample_lz_o: 4743 case Intrinsic::amdgcn_image_sample_cd_o: 4744 case Intrinsic::amdgcn_image_sample_cd_cl_o: 4745 4746 // Sample with comparison and offsets. 4747 case Intrinsic::amdgcn_image_sample_c_o: 4748 case Intrinsic::amdgcn_image_sample_c_cl_o: 4749 case Intrinsic::amdgcn_image_sample_c_d_o: 4750 case Intrinsic::amdgcn_image_sample_c_d_cl_o: 4751 case Intrinsic::amdgcn_image_sample_c_l_o: 4752 case Intrinsic::amdgcn_image_sample_c_b_o: 4753 case Intrinsic::amdgcn_image_sample_c_b_cl_o: 4754 case Intrinsic::amdgcn_image_sample_c_lz_o: 4755 case Intrinsic::amdgcn_image_sample_c_cd_o: 4756 case Intrinsic::amdgcn_image_sample_c_cd_cl_o: { 4757 // Replace dmask with everything disabled with undef. 4758 const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(5)); 4759 if (!DMask || DMask->isNullValue()) { 4760 SDValue Undef = DAG.getUNDEF(Op.getValueType()); 4761 return DAG.getMergeValues({ Undef, Op.getOperand(0) }, SDLoc(Op)); 4762 } 4763 4764 return SDValue(); 4765 } 4766 default: 4767 return SDValue(); 4768 } 4769 } 4770 4771 SDValue SITargetLowering::handleD16VData(SDValue VData, 4772 SelectionDAG &DAG) const { 4773 EVT StoreVT = VData.getValueType(); 4774 SDLoc DL(VData); 4775 4776 if (StoreVT.isVector()) { 4777 assert ((StoreVT.getVectorNumElements() != 3) && "Handle v3f16"); 4778 if (!Subtarget->hasUnpackedD16VMem()) { 4779 if (!isTypeLegal(StoreVT)) { 4780 // If Target supports packed vmem, we just need to workaround 4781 // the illegal type by casting to an equivalent one. 4782 EVT EquivStoreVT = getEquivalentMemType(*DAG.getContext(), StoreVT); 4783 return DAG.getNode(ISD::BITCAST, DL, EquivStoreVT, VData); 4784 } 4785 } else { // We need to unpack the packed data to store. 4786 EVT IntStoreVT = StoreVT.changeTypeToInteger(); 4787 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); 4788 EVT EquivStoreVT = (StoreVT == MVT::v2f16) ? MVT::v2i32 : MVT::v4i32; 4789 return DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData); 4790 } 4791 } 4792 // No change for f16 and legal vector D16 types. 4793 return VData; 4794 } 4795 4796 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, 4797 SelectionDAG &DAG) const { 4798 SDLoc DL(Op); 4799 SDValue Chain = Op.getOperand(0); 4800 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 4801 MachineFunction &MF = DAG.getMachineFunction(); 4802 4803 switch (IntrinsicID) { 4804 case Intrinsic::amdgcn_exp: { 4805 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 4806 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 4807 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8)); 4808 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9)); 4809 4810 const SDValue Ops[] = { 4811 Chain, 4812 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 4813 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 4814 Op.getOperand(4), // src0 4815 Op.getOperand(5), // src1 4816 Op.getOperand(6), // src2 4817 Op.getOperand(7), // src3 4818 DAG.getTargetConstant(0, DL, MVT::i1), // compr 4819 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 4820 }; 4821 4822 unsigned Opc = Done->isNullValue() ? 4823 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 4824 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 4825 } 4826 case Intrinsic::amdgcn_exp_compr: { 4827 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 4828 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 4829 SDValue Src0 = Op.getOperand(4); 4830 SDValue Src1 = Op.getOperand(5); 4831 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); 4832 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7)); 4833 4834 SDValue Undef = DAG.getUNDEF(MVT::f32); 4835 const SDValue Ops[] = { 4836 Chain, 4837 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 4838 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 4839 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), 4840 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), 4841 Undef, // src2 4842 Undef, // src3 4843 DAG.getTargetConstant(1, DL, MVT::i1), // compr 4844 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 4845 }; 4846 4847 unsigned Opc = Done->isNullValue() ? 4848 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 4849 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 4850 } 4851 case Intrinsic::amdgcn_s_sendmsg: 4852 case Intrinsic::amdgcn_s_sendmsghalt: { 4853 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ? 4854 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT; 4855 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 4856 SDValue Glue = Chain.getValue(1); 4857 return DAG.getNode(NodeOp, DL, MVT::Other, Chain, 4858 Op.getOperand(2), Glue); 4859 } 4860 case Intrinsic::amdgcn_init_exec: { 4861 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain, 4862 Op.getOperand(2)); 4863 } 4864 case Intrinsic::amdgcn_init_exec_from_input: { 4865 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain, 4866 Op.getOperand(2), Op.getOperand(3)); 4867 } 4868 case AMDGPUIntrinsic::AMDGPU_kill: { 4869 SDValue Src = Op.getOperand(2); 4870 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) { 4871 if (!K->isNegative()) 4872 return Chain; 4873 4874 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32); 4875 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne); 4876 } 4877 4878 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src); 4879 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast); 4880 } 4881 case Intrinsic::amdgcn_s_barrier: { 4882 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) { 4883 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 4884 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second; 4885 if (WGSize <= ST.getWavefrontSize()) 4886 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other, 4887 Op.getOperand(0)), 0); 4888 } 4889 return SDValue(); 4890 }; 4891 case AMDGPUIntrinsic::SI_tbuffer_store: { 4892 4893 // Extract vindex and voffset from vaddr as appropriate 4894 const ConstantSDNode *OffEn = cast<ConstantSDNode>(Op.getOperand(10)); 4895 const ConstantSDNode *IdxEn = cast<ConstantSDNode>(Op.getOperand(11)); 4896 SDValue VAddr = Op.getOperand(5); 4897 4898 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32); 4899 4900 assert(!(OffEn->isOne() && IdxEn->isOne()) && 4901 "Legacy intrinsic doesn't support both offset and index - use new version"); 4902 4903 SDValue VIndex = IdxEn->isOne() ? VAddr : Zero; 4904 SDValue VOffset = OffEn->isOne() ? VAddr : Zero; 4905 4906 // Deal with the vec-3 case 4907 const ConstantSDNode *NumChannels = cast<ConstantSDNode>(Op.getOperand(4)); 4908 auto Opcode = NumChannels->getZExtValue() == 3 ? 4909 AMDGPUISD::TBUFFER_STORE_FORMAT_X3 : AMDGPUISD::TBUFFER_STORE_FORMAT; 4910 4911 SDValue Ops[] = { 4912 Chain, 4913 Op.getOperand(3), // vdata 4914 Op.getOperand(2), // rsrc 4915 VIndex, 4916 VOffset, 4917 Op.getOperand(6), // soffset 4918 Op.getOperand(7), // inst_offset 4919 Op.getOperand(8), // dfmt 4920 Op.getOperand(9), // nfmt 4921 Op.getOperand(12), // glc 4922 Op.getOperand(13), // slc 4923 }; 4924 4925 assert((cast<ConstantSDNode>(Op.getOperand(14)))->getZExtValue() == 0 && 4926 "Value of tfe other than zero is unsupported"); 4927 4928 EVT VT = Op.getOperand(3).getValueType(); 4929 MachineMemOperand *MMO = MF.getMachineMemOperand( 4930 MachinePointerInfo(), 4931 MachineMemOperand::MOStore, 4932 VT.getStoreSize(), 4); 4933 return DAG.getMemIntrinsicNode(Opcode, DL, 4934 Op->getVTList(), Ops, VT, MMO); 4935 } 4936 4937 case Intrinsic::amdgcn_tbuffer_store: { 4938 SDValue VData = Op.getOperand(2); 4939 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 4940 if (IsD16) 4941 VData = handleD16VData(VData, DAG); 4942 SDValue Ops[] = { 4943 Chain, 4944 VData, // vdata 4945 Op.getOperand(3), // rsrc 4946 Op.getOperand(4), // vindex 4947 Op.getOperand(5), // voffset 4948 Op.getOperand(6), // soffset 4949 Op.getOperand(7), // offset 4950 Op.getOperand(8), // dfmt 4951 Op.getOperand(9), // nfmt 4952 Op.getOperand(10), // glc 4953 Op.getOperand(11) // slc 4954 }; 4955 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : 4956 AMDGPUISD::TBUFFER_STORE_FORMAT; 4957 MemSDNode *M = cast<MemSDNode>(Op); 4958 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 4959 M->getMemoryVT(), M->getMemOperand()); 4960 } 4961 4962 case Intrinsic::amdgcn_buffer_store: 4963 case Intrinsic::amdgcn_buffer_store_format: { 4964 SDValue VData = Op.getOperand(2); 4965 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 4966 if (IsD16) 4967 VData = handleD16VData(VData, DAG); 4968 SDValue Ops[] = { 4969 Chain, 4970 VData, // vdata 4971 Op.getOperand(3), // rsrc 4972 Op.getOperand(4), // vindex 4973 Op.getOperand(5), // offset 4974 Op.getOperand(6), // glc 4975 Op.getOperand(7) // slc 4976 }; 4977 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ? 4978 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; 4979 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; 4980 MemSDNode *M = cast<MemSDNode>(Op); 4981 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 4982 M->getMemoryVT(), M->getMemOperand()); 4983 } 4984 4985 default: 4986 return Op; 4987 } 4988 } 4989 4990 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 4991 SDLoc DL(Op); 4992 LoadSDNode *Load = cast<LoadSDNode>(Op); 4993 ISD::LoadExtType ExtType = Load->getExtensionType(); 4994 EVT MemVT = Load->getMemoryVT(); 4995 4996 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { 4997 if (MemVT == MVT::i16 && isTypeLegal(MVT::i16)) 4998 return SDValue(); 4999 5000 // FIXME: Copied from PPC 5001 // First, load into 32 bits, then truncate to 1 bit. 5002 5003 SDValue Chain = Load->getChain(); 5004 SDValue BasePtr = Load->getBasePtr(); 5005 MachineMemOperand *MMO = Load->getMemOperand(); 5006 5007 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; 5008 5009 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 5010 BasePtr, RealMemVT, MMO); 5011 5012 SDValue Ops[] = { 5013 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), 5014 NewLD.getValue(1) 5015 }; 5016 5017 return DAG.getMergeValues(Ops, DL); 5018 } 5019 5020 if (!MemVT.isVector()) 5021 return SDValue(); 5022 5023 assert(Op.getValueType().getVectorElementType() == MVT::i32 && 5024 "Custom lowering for non-i32 vectors hasn't been implemented."); 5025 5026 unsigned AS = Load->getAddressSpace(); 5027 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, 5028 AS, Load->getAlignment())) { 5029 SDValue Ops[2]; 5030 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); 5031 return DAG.getMergeValues(Ops, DL); 5032 } 5033 5034 MachineFunction &MF = DAG.getMachineFunction(); 5035 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 5036 // If there is a possibilty that flat instruction access scratch memory 5037 // then we need to use the same legalization rules we use for private. 5038 if (AS == AMDGPUASI.FLAT_ADDRESS) 5039 AS = MFI->hasFlatScratchInit() ? 5040 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS; 5041 5042 unsigned NumElements = MemVT.getVectorNumElements(); 5043 if (AS == AMDGPUASI.CONSTANT_ADDRESS) { 5044 if (isMemOpUniform(Load)) 5045 return SDValue(); 5046 // Non-uniform loads will be selected to MUBUF instructions, so they 5047 // have the same legalization requirements as global and private 5048 // loads. 5049 // 5050 } 5051 if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS) { 5052 if (Subtarget->getScalarizeGlobalBehavior() && isMemOpUniform(Load) && 5053 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load)) 5054 return SDValue(); 5055 // Non-uniform loads will be selected to MUBUF instructions, so they 5056 // have the same legalization requirements as global and private 5057 // loads. 5058 // 5059 } 5060 if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS || 5061 AS == AMDGPUASI.FLAT_ADDRESS) { 5062 if (NumElements > 4) 5063 return SplitVectorLoad(Op, DAG); 5064 // v4 loads are supported for private and global memory. 5065 return SDValue(); 5066 } 5067 if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 5068 // Depending on the setting of the private_element_size field in the 5069 // resource descriptor, we can only make private accesses up to a certain 5070 // size. 5071 switch (Subtarget->getMaxPrivateElementSize()) { 5072 case 4: 5073 return scalarizeVectorLoad(Load, DAG); 5074 case 8: 5075 if (NumElements > 2) 5076 return SplitVectorLoad(Op, DAG); 5077 return SDValue(); 5078 case 16: 5079 // Same as global/flat 5080 if (NumElements > 4) 5081 return SplitVectorLoad(Op, DAG); 5082 return SDValue(); 5083 default: 5084 llvm_unreachable("unsupported private_element_size"); 5085 } 5086 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 5087 if (NumElements > 2) 5088 return SplitVectorLoad(Op, DAG); 5089 5090 if (NumElements == 2) 5091 return SDValue(); 5092 5093 // If properly aligned, if we split we might be able to use ds_read_b64. 5094 return SplitVectorLoad(Op, DAG); 5095 } 5096 return SDValue(); 5097 } 5098 5099 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 5100 if (Op.getValueType() != MVT::i64) 5101 return SDValue(); 5102 5103 SDLoc DL(Op); 5104 SDValue Cond = Op.getOperand(0); 5105 5106 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 5107 SDValue One = DAG.getConstant(1, DL, MVT::i32); 5108 5109 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); 5110 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); 5111 5112 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); 5113 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); 5114 5115 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); 5116 5117 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); 5118 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); 5119 5120 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); 5121 5122 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); 5123 return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res); 5124 } 5125 5126 // Catch division cases where we can use shortcuts with rcp and rsq 5127 // instructions. 5128 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, 5129 SelectionDAG &DAG) const { 5130 SDLoc SL(Op); 5131 SDValue LHS = Op.getOperand(0); 5132 SDValue RHS = Op.getOperand(1); 5133 EVT VT = Op.getValueType(); 5134 const SDNodeFlags Flags = Op->getFlags(); 5135 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || 5136 Flags.hasUnsafeAlgebra() || Flags.hasAllowReciprocal(); 5137 5138 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals()) 5139 return SDValue(); 5140 5141 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { 5142 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) { 5143 if (CLHS->isExactlyValue(1.0)) { 5144 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 5145 // the CI documentation has a worst case error of 1 ulp. 5146 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 5147 // use it as long as we aren't trying to use denormals. 5148 // 5149 // v_rcp_f16 and v_rsq_f16 DO support denormals. 5150 5151 // 1.0 / sqrt(x) -> rsq(x) 5152 5153 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP 5154 // error seems really high at 2^29 ULP. 5155 if (RHS.getOpcode() == ISD::FSQRT) 5156 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); 5157 5158 // 1.0 / x -> rcp(x) 5159 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 5160 } 5161 5162 // Same as for 1.0, but expand the sign out of the constant. 5163 if (CLHS->isExactlyValue(-1.0)) { 5164 // -1.0 / x -> rcp (fneg x) 5165 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 5166 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); 5167 } 5168 } 5169 } 5170 5171 if (Unsafe) { 5172 // Turn into multiply by the reciprocal. 5173 // x / y -> x * (1.0 / y) 5174 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 5175 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags); 5176 } 5177 5178 return SDValue(); 5179 } 5180 5181 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 5182 EVT VT, SDValue A, SDValue B, SDValue GlueChain) { 5183 if (GlueChain->getNumValues() <= 1) { 5184 return DAG.getNode(Opcode, SL, VT, A, B); 5185 } 5186 5187 assert(GlueChain->getNumValues() == 3); 5188 5189 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 5190 switch (Opcode) { 5191 default: llvm_unreachable("no chain equivalent for opcode"); 5192 case ISD::FMUL: 5193 Opcode = AMDGPUISD::FMUL_W_CHAIN; 5194 break; 5195 } 5196 5197 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, 5198 GlueChain.getValue(2)); 5199 } 5200 5201 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 5202 EVT VT, SDValue A, SDValue B, SDValue C, 5203 SDValue GlueChain) { 5204 if (GlueChain->getNumValues() <= 1) { 5205 return DAG.getNode(Opcode, SL, VT, A, B, C); 5206 } 5207 5208 assert(GlueChain->getNumValues() == 3); 5209 5210 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 5211 switch (Opcode) { 5212 default: llvm_unreachable("no chain equivalent for opcode"); 5213 case ISD::FMA: 5214 Opcode = AMDGPUISD::FMA_W_CHAIN; 5215 break; 5216 } 5217 5218 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C, 5219 GlueChain.getValue(2)); 5220 } 5221 5222 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { 5223 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 5224 return FastLowered; 5225 5226 SDLoc SL(Op); 5227 SDValue Src0 = Op.getOperand(0); 5228 SDValue Src1 = Op.getOperand(1); 5229 5230 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); 5231 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); 5232 5233 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); 5234 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); 5235 5236 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); 5237 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); 5238 5239 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); 5240 } 5241 5242 // Faster 2.5 ULP division that does not support denormals. 5243 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { 5244 SDLoc SL(Op); 5245 SDValue LHS = Op.getOperand(1); 5246 SDValue RHS = Op.getOperand(2); 5247 5248 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); 5249 5250 const APFloat K0Val(BitsToFloat(0x6f800000)); 5251 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); 5252 5253 const APFloat K1Val(BitsToFloat(0x2f800000)); 5254 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); 5255 5256 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 5257 5258 EVT SetCCVT = 5259 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 5260 5261 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); 5262 5263 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); 5264 5265 // TODO: Should this propagate fast-math-flags? 5266 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); 5267 5268 // rcp does not support denormals. 5269 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); 5270 5271 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); 5272 5273 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); 5274 } 5275 5276 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { 5277 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 5278 return FastLowered; 5279 5280 SDLoc SL(Op); 5281 SDValue LHS = Op.getOperand(0); 5282 SDValue RHS = Op.getOperand(1); 5283 5284 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 5285 5286 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); 5287 5288 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 5289 RHS, RHS, LHS); 5290 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 5291 LHS, RHS, LHS); 5292 5293 // Denominator is scaled to not be denormal, so using rcp is ok. 5294 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, 5295 DenominatorScaled); 5296 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, 5297 DenominatorScaled); 5298 5299 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | 5300 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | 5301 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); 5302 5303 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16); 5304 5305 if (!Subtarget->hasFP32Denormals()) { 5306 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); 5307 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE, 5308 SL, MVT::i32); 5309 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs, 5310 DAG.getEntryNode(), 5311 EnableDenormValue, BitField); 5312 SDValue Ops[3] = { 5313 NegDivScale0, 5314 EnableDenorm.getValue(0), 5315 EnableDenorm.getValue(1) 5316 }; 5317 5318 NegDivScale0 = DAG.getMergeValues(Ops, SL); 5319 } 5320 5321 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, 5322 ApproxRcp, One, NegDivScale0); 5323 5324 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, 5325 ApproxRcp, Fma0); 5326 5327 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, 5328 Fma1, Fma1); 5329 5330 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, 5331 NumeratorScaled, Mul); 5332 5333 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2); 5334 5335 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, 5336 NumeratorScaled, Fma3); 5337 5338 if (!Subtarget->hasFP32Denormals()) { 5339 const SDValue DisableDenormValue = 5340 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32); 5341 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other, 5342 Fma4.getValue(1), 5343 DisableDenormValue, 5344 BitField, 5345 Fma4.getValue(2)); 5346 5347 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 5348 DisableDenorm, DAG.getRoot()); 5349 DAG.setRoot(OutputChain); 5350 } 5351 5352 SDValue Scale = NumeratorScaled.getValue(1); 5353 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, 5354 Fma4, Fma1, Fma3, Scale); 5355 5356 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS); 5357 } 5358 5359 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { 5360 if (DAG.getTarget().Options.UnsafeFPMath) 5361 return lowerFastUnsafeFDIV(Op, DAG); 5362 5363 SDLoc SL(Op); 5364 SDValue X = Op.getOperand(0); 5365 SDValue Y = Op.getOperand(1); 5366 5367 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 5368 5369 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); 5370 5371 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); 5372 5373 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); 5374 5375 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); 5376 5377 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); 5378 5379 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); 5380 5381 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); 5382 5383 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); 5384 5385 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); 5386 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); 5387 5388 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, 5389 NegDivScale0, Mul, DivScale1); 5390 5391 SDValue Scale; 5392 5393 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { 5394 // Workaround a hardware bug on SI where the condition output from div_scale 5395 // is not usable. 5396 5397 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); 5398 5399 // Figure out if the scale to use for div_fmas. 5400 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 5401 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); 5402 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); 5403 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); 5404 5405 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); 5406 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); 5407 5408 SDValue Scale0Hi 5409 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); 5410 SDValue Scale1Hi 5411 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); 5412 5413 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); 5414 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); 5415 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); 5416 } else { 5417 Scale = DivScale1.getValue(1); 5418 } 5419 5420 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, 5421 Fma4, Fma3, Mul, Scale); 5422 5423 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); 5424 } 5425 5426 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { 5427 EVT VT = Op.getValueType(); 5428 5429 if (VT == MVT::f32) 5430 return LowerFDIV32(Op, DAG); 5431 5432 if (VT == MVT::f64) 5433 return LowerFDIV64(Op, DAG); 5434 5435 if (VT == MVT::f16) 5436 return LowerFDIV16(Op, DAG); 5437 5438 llvm_unreachable("Unexpected type for fdiv"); 5439 } 5440 5441 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 5442 SDLoc DL(Op); 5443 StoreSDNode *Store = cast<StoreSDNode>(Op); 5444 EVT VT = Store->getMemoryVT(); 5445 5446 if (VT == MVT::i1) { 5447 return DAG.getTruncStore(Store->getChain(), DL, 5448 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), 5449 Store->getBasePtr(), MVT::i1, Store->getMemOperand()); 5450 } 5451 5452 assert(VT.isVector() && 5453 Store->getValue().getValueType().getScalarType() == MVT::i32); 5454 5455 unsigned AS = Store->getAddressSpace(); 5456 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 5457 AS, Store->getAlignment())) { 5458 return expandUnalignedStore(Store, DAG); 5459 } 5460 5461 MachineFunction &MF = DAG.getMachineFunction(); 5462 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 5463 // If there is a possibilty that flat instruction access scratch memory 5464 // then we need to use the same legalization rules we use for private. 5465 if (AS == AMDGPUASI.FLAT_ADDRESS) 5466 AS = MFI->hasFlatScratchInit() ? 5467 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS; 5468 5469 unsigned NumElements = VT.getVectorNumElements(); 5470 if (AS == AMDGPUASI.GLOBAL_ADDRESS || 5471 AS == AMDGPUASI.FLAT_ADDRESS) { 5472 if (NumElements > 4) 5473 return SplitVectorStore(Op, DAG); 5474 return SDValue(); 5475 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 5476 switch (Subtarget->getMaxPrivateElementSize()) { 5477 case 4: 5478 return scalarizeVectorStore(Store, DAG); 5479 case 8: 5480 if (NumElements > 2) 5481 return SplitVectorStore(Op, DAG); 5482 return SDValue(); 5483 case 16: 5484 if (NumElements > 4) 5485 return SplitVectorStore(Op, DAG); 5486 return SDValue(); 5487 default: 5488 llvm_unreachable("unsupported private_element_size"); 5489 } 5490 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 5491 if (NumElements > 2) 5492 return SplitVectorStore(Op, DAG); 5493 5494 if (NumElements == 2) 5495 return Op; 5496 5497 // If properly aligned, if we split we might be able to use ds_write_b64. 5498 return SplitVectorStore(Op, DAG); 5499 } else { 5500 llvm_unreachable("unhandled address space"); 5501 } 5502 } 5503 5504 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 5505 SDLoc DL(Op); 5506 EVT VT = Op.getValueType(); 5507 SDValue Arg = Op.getOperand(0); 5508 // TODO: Should this propagate fast-math-flags? 5509 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, 5510 DAG.getNode(ISD::FMUL, DL, VT, Arg, 5511 DAG.getConstantFP(0.5/M_PI, DL, 5512 VT))); 5513 5514 switch (Op.getOpcode()) { 5515 case ISD::FCOS: 5516 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart); 5517 case ISD::FSIN: 5518 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart); 5519 default: 5520 llvm_unreachable("Wrong trig opcode"); 5521 } 5522 } 5523 5524 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 5525 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); 5526 assert(AtomicNode->isCompareAndSwap()); 5527 unsigned AS = AtomicNode->getAddressSpace(); 5528 5529 // No custom lowering required for local address space 5530 if (!isFlatGlobalAddrSpace(AS, AMDGPUASI)) 5531 return Op; 5532 5533 // Non-local address space requires custom lowering for atomic compare 5534 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 5535 SDLoc DL(Op); 5536 SDValue ChainIn = Op.getOperand(0); 5537 SDValue Addr = Op.getOperand(1); 5538 SDValue Old = Op.getOperand(2); 5539 SDValue New = Op.getOperand(3); 5540 EVT VT = Op.getValueType(); 5541 MVT SimpleVT = VT.getSimpleVT(); 5542 MVT VecType = MVT::getVectorVT(SimpleVT, 2); 5543 5544 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); 5545 SDValue Ops[] = { ChainIn, Addr, NewOld }; 5546 5547 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), 5548 Ops, VT, AtomicNode->getMemOperand()); 5549 } 5550 5551 //===----------------------------------------------------------------------===// 5552 // Custom DAG optimizations 5553 //===----------------------------------------------------------------------===// 5554 5555 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, 5556 DAGCombinerInfo &DCI) const { 5557 EVT VT = N->getValueType(0); 5558 EVT ScalarVT = VT.getScalarType(); 5559 if (ScalarVT != MVT::f32) 5560 return SDValue(); 5561 5562 SelectionDAG &DAG = DCI.DAG; 5563 SDLoc DL(N); 5564 5565 SDValue Src = N->getOperand(0); 5566 EVT SrcVT = Src.getValueType(); 5567 5568 // TODO: We could try to match extracting the higher bytes, which would be 5569 // easier if i8 vectors weren't promoted to i32 vectors, particularly after 5570 // types are legalized. v4i8 -> v4f32 is probably the only case to worry 5571 // about in practice. 5572 if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) { 5573 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { 5574 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); 5575 DCI.AddToWorklist(Cvt.getNode()); 5576 return Cvt; 5577 } 5578 } 5579 5580 return SDValue(); 5581 } 5582 5583 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) 5584 5585 // This is a variant of 5586 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), 5587 // 5588 // The normal DAG combiner will do this, but only if the add has one use since 5589 // that would increase the number of instructions. 5590 // 5591 // This prevents us from seeing a constant offset that can be folded into a 5592 // memory instruction's addressing mode. If we know the resulting add offset of 5593 // a pointer can be folded into an addressing offset, we can replace the pointer 5594 // operand with the add of new constant offset. This eliminates one of the uses, 5595 // and may allow the remaining use to also be simplified. 5596 // 5597 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, 5598 unsigned AddrSpace, 5599 EVT MemVT, 5600 DAGCombinerInfo &DCI) const { 5601 SDValue N0 = N->getOperand(0); 5602 SDValue N1 = N->getOperand(1); 5603 5604 // We only do this to handle cases where it's profitable when there are 5605 // multiple uses of the add, so defer to the standard combine. 5606 if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) || 5607 N0->hasOneUse()) 5608 return SDValue(); 5609 5610 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); 5611 if (!CN1) 5612 return SDValue(); 5613 5614 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 5615 if (!CAdd) 5616 return SDValue(); 5617 5618 // If the resulting offset is too large, we can't fold it into the addressing 5619 // mode offset. 5620 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); 5621 Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext()); 5622 5623 AddrMode AM; 5624 AM.HasBaseReg = true; 5625 AM.BaseOffs = Offset.getSExtValue(); 5626 if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace)) 5627 return SDValue(); 5628 5629 SelectionDAG &DAG = DCI.DAG; 5630 SDLoc SL(N); 5631 EVT VT = N->getValueType(0); 5632 5633 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); 5634 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); 5635 5636 SDNodeFlags Flags; 5637 Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() && 5638 (N0.getOpcode() == ISD::OR || 5639 N0->getFlags().hasNoUnsignedWrap())); 5640 5641 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags); 5642 } 5643 5644 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, 5645 DAGCombinerInfo &DCI) const { 5646 SDValue Ptr = N->getBasePtr(); 5647 SelectionDAG &DAG = DCI.DAG; 5648 SDLoc SL(N); 5649 5650 // TODO: We could also do this for multiplies. 5651 if (Ptr.getOpcode() == ISD::SHL) { 5652 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(), 5653 N->getMemoryVT(), DCI); 5654 if (NewPtr) { 5655 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); 5656 5657 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; 5658 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); 5659 } 5660 } 5661 5662 return SDValue(); 5663 } 5664 5665 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { 5666 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || 5667 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || 5668 (Opc == ISD::XOR && Val == 0); 5669 } 5670 5671 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This 5672 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit 5673 // integer combine opportunities since most 64-bit operations are decomposed 5674 // this way. TODO: We won't want this for SALU especially if it is an inline 5675 // immediate. 5676 SDValue SITargetLowering::splitBinaryBitConstantOp( 5677 DAGCombinerInfo &DCI, 5678 const SDLoc &SL, 5679 unsigned Opc, SDValue LHS, 5680 const ConstantSDNode *CRHS) const { 5681 uint64_t Val = CRHS->getZExtValue(); 5682 uint32_t ValLo = Lo_32(Val); 5683 uint32_t ValHi = Hi_32(Val); 5684 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 5685 5686 if ((bitOpWithConstantIsReducible(Opc, ValLo) || 5687 bitOpWithConstantIsReducible(Opc, ValHi)) || 5688 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { 5689 // If we need to materialize a 64-bit immediate, it will be split up later 5690 // anyway. Avoid creating the harder to understand 64-bit immediate 5691 // materialization. 5692 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); 5693 } 5694 5695 return SDValue(); 5696 } 5697 5698 // Returns true if argument is a boolean value which is not serialized into 5699 // memory or argument and does not require v_cmdmask_b32 to be deserialized. 5700 static bool isBoolSGPR(SDValue V) { 5701 if (V.getValueType() != MVT::i1) 5702 return false; 5703 switch (V.getOpcode()) { 5704 default: break; 5705 case ISD::SETCC: 5706 case ISD::AND: 5707 case ISD::OR: 5708 case ISD::XOR: 5709 case AMDGPUISD::FP_CLASS: 5710 return true; 5711 } 5712 return false; 5713 } 5714 5715 SDValue SITargetLowering::performAndCombine(SDNode *N, 5716 DAGCombinerInfo &DCI) const { 5717 if (DCI.isBeforeLegalize()) 5718 return SDValue(); 5719 5720 SelectionDAG &DAG = DCI.DAG; 5721 EVT VT = N->getValueType(0); 5722 SDValue LHS = N->getOperand(0); 5723 SDValue RHS = N->getOperand(1); 5724 5725 5726 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 5727 if (VT == MVT::i64 && CRHS) { 5728 if (SDValue Split 5729 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) 5730 return Split; 5731 } 5732 5733 if (CRHS && VT == MVT::i32) { 5734 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb 5735 // nb = number of trailing zeroes in mask 5736 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass, 5737 // given that we are selecting 8 or 16 bit fields starting at byte boundary. 5738 uint64_t Mask = CRHS->getZExtValue(); 5739 unsigned Bits = countPopulation(Mask); 5740 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL && 5741 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) { 5742 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) { 5743 unsigned Shift = CShift->getZExtValue(); 5744 unsigned NB = CRHS->getAPIntValue().countTrailingZeros(); 5745 unsigned Offset = NB + Shift; 5746 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary. 5747 SDLoc SL(N); 5748 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 5749 LHS->getOperand(0), 5750 DAG.getConstant(Offset, SL, MVT::i32), 5751 DAG.getConstant(Bits, SL, MVT::i32)); 5752 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits); 5753 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE, 5754 DAG.getValueType(NarrowVT)); 5755 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext, 5756 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32)); 5757 return Shl; 5758 } 5759 } 5760 } 5761 } 5762 5763 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> 5764 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) 5765 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { 5766 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 5767 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); 5768 5769 SDValue X = LHS.getOperand(0); 5770 SDValue Y = RHS.getOperand(0); 5771 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) 5772 return SDValue(); 5773 5774 if (LCC == ISD::SETO) { 5775 if (X != LHS.getOperand(1)) 5776 return SDValue(); 5777 5778 if (RCC == ISD::SETUNE) { 5779 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); 5780 if (!C1 || !C1->isInfinity() || C1->isNegative()) 5781 return SDValue(); 5782 5783 const uint32_t Mask = SIInstrFlags::N_NORMAL | 5784 SIInstrFlags::N_SUBNORMAL | 5785 SIInstrFlags::N_ZERO | 5786 SIInstrFlags::P_ZERO | 5787 SIInstrFlags::P_SUBNORMAL | 5788 SIInstrFlags::P_NORMAL; 5789 5790 static_assert(((~(SIInstrFlags::S_NAN | 5791 SIInstrFlags::Q_NAN | 5792 SIInstrFlags::N_INFINITY | 5793 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, 5794 "mask not equal"); 5795 5796 SDLoc DL(N); 5797 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 5798 X, DAG.getConstant(Mask, DL, MVT::i32)); 5799 } 5800 } 5801 } 5802 5803 if (VT == MVT::i32 && 5804 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) { 5805 // and x, (sext cc from i1) => select cc, x, 0 5806 if (RHS.getOpcode() != ISD::SIGN_EXTEND) 5807 std::swap(LHS, RHS); 5808 if (isBoolSGPR(RHS.getOperand(0))) 5809 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0), 5810 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32)); 5811 } 5812 5813 return SDValue(); 5814 } 5815 5816 SDValue SITargetLowering::performOrCombine(SDNode *N, 5817 DAGCombinerInfo &DCI) const { 5818 SelectionDAG &DAG = DCI.DAG; 5819 SDValue LHS = N->getOperand(0); 5820 SDValue RHS = N->getOperand(1); 5821 5822 EVT VT = N->getValueType(0); 5823 if (VT == MVT::i1) { 5824 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) 5825 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && 5826 RHS.getOpcode() == AMDGPUISD::FP_CLASS) { 5827 SDValue Src = LHS.getOperand(0); 5828 if (Src != RHS.getOperand(0)) 5829 return SDValue(); 5830 5831 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 5832 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 5833 if (!CLHS || !CRHS) 5834 return SDValue(); 5835 5836 // Only 10 bits are used. 5837 static const uint32_t MaxMask = 0x3ff; 5838 5839 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; 5840 SDLoc DL(N); 5841 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 5842 Src, DAG.getConstant(NewMask, DL, MVT::i32)); 5843 } 5844 5845 return SDValue(); 5846 } 5847 5848 if (VT != MVT::i64) 5849 return SDValue(); 5850 5851 // TODO: This could be a generic combine with a predicate for extracting the 5852 // high half of an integer being free. 5853 5854 // (or i64:x, (zero_extend i32:y)) -> 5855 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) 5856 if (LHS.getOpcode() == ISD::ZERO_EXTEND && 5857 RHS.getOpcode() != ISD::ZERO_EXTEND) 5858 std::swap(LHS, RHS); 5859 5860 if (RHS.getOpcode() == ISD::ZERO_EXTEND) { 5861 SDValue ExtSrc = RHS.getOperand(0); 5862 EVT SrcVT = ExtSrc.getValueType(); 5863 if (SrcVT == MVT::i32) { 5864 SDLoc SL(N); 5865 SDValue LowLHS, HiBits; 5866 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); 5867 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); 5868 5869 DCI.AddToWorklist(LowOr.getNode()); 5870 DCI.AddToWorklist(HiBits.getNode()); 5871 5872 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 5873 LowOr, HiBits); 5874 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 5875 } 5876 } 5877 5878 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 5879 if (CRHS) { 5880 if (SDValue Split 5881 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) 5882 return Split; 5883 } 5884 5885 return SDValue(); 5886 } 5887 5888 SDValue SITargetLowering::performXorCombine(SDNode *N, 5889 DAGCombinerInfo &DCI) const { 5890 EVT VT = N->getValueType(0); 5891 if (VT != MVT::i64) 5892 return SDValue(); 5893 5894 SDValue LHS = N->getOperand(0); 5895 SDValue RHS = N->getOperand(1); 5896 5897 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 5898 if (CRHS) { 5899 if (SDValue Split 5900 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) 5901 return Split; 5902 } 5903 5904 return SDValue(); 5905 } 5906 5907 // Instructions that will be lowered with a final instruction that zeros the 5908 // high result bits. 5909 // XXX - probably only need to list legal operations. 5910 static bool fp16SrcZerosHighBits(unsigned Opc) { 5911 switch (Opc) { 5912 case ISD::FADD: 5913 case ISD::FSUB: 5914 case ISD::FMUL: 5915 case ISD::FDIV: 5916 case ISD::FREM: 5917 case ISD::FMA: 5918 case ISD::FMAD: 5919 case ISD::FCANONICALIZE: 5920 case ISD::FP_ROUND: 5921 case ISD::UINT_TO_FP: 5922 case ISD::SINT_TO_FP: 5923 case ISD::FABS: 5924 // Fabs is lowered to a bit operation, but it's an and which will clear the 5925 // high bits anyway. 5926 case ISD::FSQRT: 5927 case ISD::FSIN: 5928 case ISD::FCOS: 5929 case ISD::FPOWI: 5930 case ISD::FPOW: 5931 case ISD::FLOG: 5932 case ISD::FLOG2: 5933 case ISD::FLOG10: 5934 case ISD::FEXP: 5935 case ISD::FEXP2: 5936 case ISD::FCEIL: 5937 case ISD::FTRUNC: 5938 case ISD::FRINT: 5939 case ISD::FNEARBYINT: 5940 case ISD::FROUND: 5941 case ISD::FFLOOR: 5942 case ISD::FMINNUM: 5943 case ISD::FMAXNUM: 5944 case AMDGPUISD::FRACT: 5945 case AMDGPUISD::CLAMP: 5946 case AMDGPUISD::COS_HW: 5947 case AMDGPUISD::SIN_HW: 5948 case AMDGPUISD::FMIN3: 5949 case AMDGPUISD::FMAX3: 5950 case AMDGPUISD::FMED3: 5951 case AMDGPUISD::FMAD_FTZ: 5952 case AMDGPUISD::RCP: 5953 case AMDGPUISD::RSQ: 5954 case AMDGPUISD::LDEXP: 5955 return true; 5956 default: 5957 // fcopysign, select and others may be lowered to 32-bit bit operations 5958 // which don't zero the high bits. 5959 return false; 5960 } 5961 } 5962 5963 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N, 5964 DAGCombinerInfo &DCI) const { 5965 if (!Subtarget->has16BitInsts() || 5966 DCI.getDAGCombineLevel() < AfterLegalizeDAG) 5967 return SDValue(); 5968 5969 EVT VT = N->getValueType(0); 5970 if (VT != MVT::i32) 5971 return SDValue(); 5972 5973 SDValue Src = N->getOperand(0); 5974 if (Src.getValueType() != MVT::i16) 5975 return SDValue(); 5976 5977 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src 5978 // FIXME: It is not universally true that the high bits are zeroed on gfx9. 5979 if (Src.getOpcode() == ISD::BITCAST) { 5980 SDValue BCSrc = Src.getOperand(0); 5981 if (BCSrc.getValueType() == MVT::f16 && 5982 fp16SrcZerosHighBits(BCSrc.getOpcode())) 5983 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc); 5984 } 5985 5986 return SDValue(); 5987 } 5988 5989 SDValue SITargetLowering::performClassCombine(SDNode *N, 5990 DAGCombinerInfo &DCI) const { 5991 SelectionDAG &DAG = DCI.DAG; 5992 SDValue Mask = N->getOperand(1); 5993 5994 // fp_class x, 0 -> false 5995 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { 5996 if (CMask->isNullValue()) 5997 return DAG.getConstant(0, SDLoc(N), MVT::i1); 5998 } 5999 6000 if (N->getOperand(0).isUndef()) 6001 return DAG.getUNDEF(MVT::i1); 6002 6003 return SDValue(); 6004 } 6005 6006 static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) { 6007 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions()) 6008 return true; 6009 6010 return DAG.isKnownNeverNaN(Op); 6011 } 6012 6013 static bool isCanonicalized(SelectionDAG &DAG, SDValue Op, 6014 const SISubtarget *ST, unsigned MaxDepth=5) { 6015 // If source is a result of another standard FP operation it is already in 6016 // canonical form. 6017 6018 switch (Op.getOpcode()) { 6019 default: 6020 break; 6021 6022 // These will flush denorms if required. 6023 case ISD::FADD: 6024 case ISD::FSUB: 6025 case ISD::FMUL: 6026 case ISD::FSQRT: 6027 case ISD::FCEIL: 6028 case ISD::FFLOOR: 6029 case ISD::FMA: 6030 case ISD::FMAD: 6031 6032 case ISD::FCANONICALIZE: 6033 return true; 6034 6035 case ISD::FP_ROUND: 6036 return Op.getValueType().getScalarType() != MVT::f16 || 6037 ST->hasFP16Denormals(); 6038 6039 case ISD::FP_EXTEND: 6040 return Op.getOperand(0).getValueType().getScalarType() != MVT::f16 || 6041 ST->hasFP16Denormals(); 6042 6043 case ISD::FP16_TO_FP: 6044 case ISD::FP_TO_FP16: 6045 return ST->hasFP16Denormals(); 6046 6047 // It can/will be lowered or combined as a bit operation. 6048 // Need to check their input recursively to handle. 6049 case ISD::FNEG: 6050 case ISD::FABS: 6051 return (MaxDepth > 0) && 6052 isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1); 6053 6054 case ISD::FSIN: 6055 case ISD::FCOS: 6056 case ISD::FSINCOS: 6057 return Op.getValueType().getScalarType() != MVT::f16; 6058 6059 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. 6060 // For such targets need to check their input recursively. 6061 case ISD::FMINNUM: 6062 case ISD::FMAXNUM: 6063 case ISD::FMINNAN: 6064 case ISD::FMAXNAN: 6065 6066 if (ST->supportsMinMaxDenormModes() && 6067 DAG.isKnownNeverNaN(Op.getOperand(0)) && 6068 DAG.isKnownNeverNaN(Op.getOperand(1))) 6069 return true; 6070 6071 return (MaxDepth > 0) && 6072 isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1) && 6073 isCanonicalized(DAG, Op.getOperand(1), ST, MaxDepth - 1); 6074 6075 case ISD::ConstantFP: { 6076 auto F = cast<ConstantFPSDNode>(Op)->getValueAPF(); 6077 return !F.isDenormal() && !(F.isNaN() && F.isSignaling()); 6078 } 6079 } 6080 return false; 6081 } 6082 6083 // Constant fold canonicalize. 6084 SDValue SITargetLowering::performFCanonicalizeCombine( 6085 SDNode *N, 6086 DAGCombinerInfo &DCI) const { 6087 SelectionDAG &DAG = DCI.DAG; 6088 ConstantFPSDNode *CFP = isConstOrConstSplatFP(N->getOperand(0)); 6089 6090 if (!CFP) { 6091 SDValue N0 = N->getOperand(0); 6092 EVT VT = N0.getValueType().getScalarType(); 6093 auto ST = getSubtarget(); 6094 6095 if (((VT == MVT::f32 && ST->hasFP32Denormals()) || 6096 (VT == MVT::f64 && ST->hasFP64Denormals()) || 6097 (VT == MVT::f16 && ST->hasFP16Denormals())) && 6098 DAG.isKnownNeverNaN(N0)) 6099 return N0; 6100 6101 bool IsIEEEMode = Subtarget->enableIEEEBit(DAG.getMachineFunction()); 6102 6103 if ((IsIEEEMode || isKnownNeverSNan(DAG, N0)) && 6104 isCanonicalized(DAG, N0, ST)) 6105 return N0; 6106 6107 return SDValue(); 6108 } 6109 6110 const APFloat &C = CFP->getValueAPF(); 6111 6112 // Flush denormals to 0 if not enabled. 6113 if (C.isDenormal()) { 6114 EVT VT = N->getValueType(0); 6115 EVT SVT = VT.getScalarType(); 6116 if (SVT == MVT::f32 && !Subtarget->hasFP32Denormals()) 6117 return DAG.getConstantFP(0.0, SDLoc(N), VT); 6118 6119 if (SVT == MVT::f64 && !Subtarget->hasFP64Denormals()) 6120 return DAG.getConstantFP(0.0, SDLoc(N), VT); 6121 6122 if (SVT == MVT::f16 && !Subtarget->hasFP16Denormals()) 6123 return DAG.getConstantFP(0.0, SDLoc(N), VT); 6124 } 6125 6126 if (C.isNaN()) { 6127 EVT VT = N->getValueType(0); 6128 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); 6129 if (C.isSignaling()) { 6130 // Quiet a signaling NaN. 6131 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 6132 } 6133 6134 // Make sure it is the canonical NaN bitpattern. 6135 // 6136 // TODO: Can we use -1 as the canonical NaN value since it's an inline 6137 // immediate? 6138 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) 6139 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 6140 } 6141 6142 return N->getOperand(0); 6143 } 6144 6145 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { 6146 switch (Opc) { 6147 case ISD::FMAXNUM: 6148 return AMDGPUISD::FMAX3; 6149 case ISD::SMAX: 6150 return AMDGPUISD::SMAX3; 6151 case ISD::UMAX: 6152 return AMDGPUISD::UMAX3; 6153 case ISD::FMINNUM: 6154 return AMDGPUISD::FMIN3; 6155 case ISD::SMIN: 6156 return AMDGPUISD::SMIN3; 6157 case ISD::UMIN: 6158 return AMDGPUISD::UMIN3; 6159 default: 6160 llvm_unreachable("Not a min/max opcode"); 6161 } 6162 } 6163 6164 SDValue SITargetLowering::performIntMed3ImmCombine( 6165 SelectionDAG &DAG, const SDLoc &SL, 6166 SDValue Op0, SDValue Op1, bool Signed) const { 6167 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); 6168 if (!K1) 6169 return SDValue(); 6170 6171 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 6172 if (!K0) 6173 return SDValue(); 6174 6175 if (Signed) { 6176 if (K0->getAPIntValue().sge(K1->getAPIntValue())) 6177 return SDValue(); 6178 } else { 6179 if (K0->getAPIntValue().uge(K1->getAPIntValue())) 6180 return SDValue(); 6181 } 6182 6183 EVT VT = K0->getValueType(0); 6184 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3; 6185 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) { 6186 return DAG.getNode(Med3Opc, SL, VT, 6187 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); 6188 } 6189 6190 // If there isn't a 16-bit med3 operation, convert to 32-bit. 6191 MVT NVT = MVT::i32; 6192 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6193 6194 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); 6195 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); 6196 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); 6197 6198 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3); 6199 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3); 6200 } 6201 6202 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) { 6203 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 6204 return C; 6205 6206 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) { 6207 if (ConstantFPSDNode *C = BV->getConstantFPSplatNode()) 6208 return C; 6209 } 6210 6211 return nullptr; 6212 } 6213 6214 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG, 6215 const SDLoc &SL, 6216 SDValue Op0, 6217 SDValue Op1) const { 6218 ConstantFPSDNode *K1 = getSplatConstantFP(Op1); 6219 if (!K1) 6220 return SDValue(); 6221 6222 ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1)); 6223 if (!K0) 6224 return SDValue(); 6225 6226 // Ordered >= (although NaN inputs should have folded away by now). 6227 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF()); 6228 if (Cmp == APFloat::cmpGreaterThan) 6229 return SDValue(); 6230 6231 // TODO: Check IEEE bit enabled? 6232 EVT VT = Op0.getValueType(); 6233 if (Subtarget->enableDX10Clamp()) { 6234 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the 6235 // hardware fmed3 behavior converting to a min. 6236 // FIXME: Should this be allowing -0.0? 6237 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0)) 6238 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0)); 6239 } 6240 6241 // med3 for f16 is only available on gfx9+, and not available for v2f16. 6242 if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) { 6243 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a 6244 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would 6245 // then give the other result, which is different from med3 with a NaN 6246 // input. 6247 SDValue Var = Op0.getOperand(0); 6248 if (!isKnownNeverSNan(DAG, Var)) 6249 return SDValue(); 6250 6251 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), 6252 Var, SDValue(K0, 0), SDValue(K1, 0)); 6253 } 6254 6255 return SDValue(); 6256 } 6257 6258 SDValue SITargetLowering::performMinMaxCombine(SDNode *N, 6259 DAGCombinerInfo &DCI) const { 6260 SelectionDAG &DAG = DCI.DAG; 6261 6262 EVT VT = N->getValueType(0); 6263 unsigned Opc = N->getOpcode(); 6264 SDValue Op0 = N->getOperand(0); 6265 SDValue Op1 = N->getOperand(1); 6266 6267 // Only do this if the inner op has one use since this will just increases 6268 // register pressure for no benefit. 6269 6270 6271 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && 6272 VT != MVT::f64 && 6273 ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) { 6274 // max(max(a, b), c) -> max3(a, b, c) 6275 // min(min(a, b), c) -> min3(a, b, c) 6276 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { 6277 SDLoc DL(N); 6278 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 6279 DL, 6280 N->getValueType(0), 6281 Op0.getOperand(0), 6282 Op0.getOperand(1), 6283 Op1); 6284 } 6285 6286 // Try commuted. 6287 // max(a, max(b, c)) -> max3(a, b, c) 6288 // min(a, min(b, c)) -> min3(a, b, c) 6289 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { 6290 SDLoc DL(N); 6291 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 6292 DL, 6293 N->getValueType(0), 6294 Op0, 6295 Op1.getOperand(0), 6296 Op1.getOperand(1)); 6297 } 6298 } 6299 6300 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) 6301 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { 6302 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) 6303 return Med3; 6304 } 6305 6306 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { 6307 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) 6308 return Med3; 6309 } 6310 6311 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) 6312 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || 6313 (Opc == AMDGPUISD::FMIN_LEGACY && 6314 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && 6315 (VT == MVT::f32 || VT == MVT::f64 || 6316 (VT == MVT::f16 && Subtarget->has16BitInsts()) || 6317 (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) && 6318 Op0.hasOneUse()) { 6319 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) 6320 return Res; 6321 } 6322 6323 return SDValue(); 6324 } 6325 6326 static bool isClampZeroToOne(SDValue A, SDValue B) { 6327 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) { 6328 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) { 6329 // FIXME: Should this be allowing -0.0? 6330 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) || 6331 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0)); 6332 } 6333 } 6334 6335 return false; 6336 } 6337 6338 // FIXME: Should only worry about snans for version with chain. 6339 SDValue SITargetLowering::performFMed3Combine(SDNode *N, 6340 DAGCombinerInfo &DCI) const { 6341 EVT VT = N->getValueType(0); 6342 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and 6343 // NaNs. With a NaN input, the order of the operands may change the result. 6344 6345 SelectionDAG &DAG = DCI.DAG; 6346 SDLoc SL(N); 6347 6348 SDValue Src0 = N->getOperand(0); 6349 SDValue Src1 = N->getOperand(1); 6350 SDValue Src2 = N->getOperand(2); 6351 6352 if (isClampZeroToOne(Src0, Src1)) { 6353 // const_a, const_b, x -> clamp is safe in all cases including signaling 6354 // nans. 6355 // FIXME: Should this be allowing -0.0? 6356 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2); 6357 } 6358 6359 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother 6360 // handling no dx10-clamp? 6361 if (Subtarget->enableDX10Clamp()) { 6362 // If NaNs is clamped to 0, we are free to reorder the inputs. 6363 6364 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 6365 std::swap(Src0, Src1); 6366 6367 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2)) 6368 std::swap(Src1, Src2); 6369 6370 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 6371 std::swap(Src0, Src1); 6372 6373 if (isClampZeroToOne(Src1, Src2)) 6374 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0); 6375 } 6376 6377 return SDValue(); 6378 } 6379 6380 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N, 6381 DAGCombinerInfo &DCI) const { 6382 SDValue Src0 = N->getOperand(0); 6383 SDValue Src1 = N->getOperand(1); 6384 if (Src0.isUndef() && Src1.isUndef()) 6385 return DCI.DAG.getUNDEF(N->getValueType(0)); 6386 return SDValue(); 6387 } 6388 6389 SDValue SITargetLowering::performExtractVectorEltCombine( 6390 SDNode *N, DAGCombinerInfo &DCI) const { 6391 SDValue Vec = N->getOperand(0); 6392 6393 SelectionDAG &DAG = DCI.DAG; 6394 if (Vec.getOpcode() == ISD::FNEG && allUsesHaveSourceMods(N)) { 6395 SDLoc SL(N); 6396 EVT EltVT = N->getValueType(0); 6397 SDValue Idx = N->getOperand(1); 6398 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 6399 Vec.getOperand(0), Idx); 6400 return DAG.getNode(ISD::FNEG, SL, EltVT, Elt); 6401 } 6402 6403 return SDValue(); 6404 } 6405 6406 static bool convertBuildVectorCastElt(SelectionDAG &DAG, 6407 SDValue &Lo, SDValue &Hi) { 6408 if (Hi.getOpcode() == ISD::BITCAST && 6409 Hi.getOperand(0).getValueType() == MVT::f16 && 6410 (isa<ConstantSDNode>(Lo) || Lo.isUndef())) { 6411 Lo = DAG.getNode(ISD::BITCAST, SDLoc(Lo), MVT::f16, Lo); 6412 Hi = Hi.getOperand(0); 6413 return true; 6414 } 6415 6416 return false; 6417 } 6418 6419 SDValue SITargetLowering::performBuildVectorCombine( 6420 SDNode *N, DAGCombinerInfo &DCI) const { 6421 SDLoc SL(N); 6422 6423 if (!isTypeLegal(MVT::v2i16)) 6424 return SDValue(); 6425 SelectionDAG &DAG = DCI.DAG; 6426 EVT VT = N->getValueType(0); 6427 6428 if (VT == MVT::v2i16) { 6429 SDValue Lo = N->getOperand(0); 6430 SDValue Hi = N->getOperand(1); 6431 6432 // v2i16 build_vector (const|undef), (bitcast f16:$x) 6433 // -> bitcast (v2f16 build_vector const|undef, $x 6434 if (convertBuildVectorCastElt(DAG, Lo, Hi)) { 6435 SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Lo, Hi }); 6436 return DAG.getNode(ISD::BITCAST, SL, VT, NewVec); 6437 } 6438 6439 if (convertBuildVectorCastElt(DAG, Hi, Lo)) { 6440 SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Hi, Lo }); 6441 return DAG.getNode(ISD::BITCAST, SL, VT, NewVec); 6442 } 6443 } 6444 6445 return SDValue(); 6446 } 6447 6448 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, 6449 const SDNode *N0, 6450 const SDNode *N1) const { 6451 EVT VT = N0->getValueType(0); 6452 6453 // Only do this if we are not trying to support denormals. v_mad_f32 does not 6454 // support denormals ever. 6455 if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) || 6456 (VT == MVT::f16 && !Subtarget->hasFP16Denormals())) 6457 return ISD::FMAD; 6458 6459 const TargetOptions &Options = DAG.getTarget().Options; 6460 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || 6461 (N0->getFlags().hasUnsafeAlgebra() && 6462 N1->getFlags().hasUnsafeAlgebra())) && 6463 isFMAFasterThanFMulAndFAdd(VT)) { 6464 return ISD::FMA; 6465 } 6466 6467 return 0; 6468 } 6469 6470 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, 6471 EVT VT, 6472 SDValue N0, SDValue N1, SDValue N2, 6473 bool Signed) { 6474 unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32; 6475 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1); 6476 SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2); 6477 return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad); 6478 } 6479 6480 SDValue SITargetLowering::performAddCombine(SDNode *N, 6481 DAGCombinerInfo &DCI) const { 6482 SelectionDAG &DAG = DCI.DAG; 6483 EVT VT = N->getValueType(0); 6484 SDLoc SL(N); 6485 SDValue LHS = N->getOperand(0); 6486 SDValue RHS = N->getOperand(1); 6487 6488 if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL) 6489 && Subtarget->hasMad64_32() && 6490 !VT.isVector() && VT.getScalarSizeInBits() > 32 && 6491 VT.getScalarSizeInBits() <= 64) { 6492 if (LHS.getOpcode() != ISD::MUL) 6493 std::swap(LHS, RHS); 6494 6495 SDValue MulLHS = LHS.getOperand(0); 6496 SDValue MulRHS = LHS.getOperand(1); 6497 SDValue AddRHS = RHS; 6498 6499 // TODO: Maybe restrict if SGPR inputs. 6500 if (numBitsUnsigned(MulLHS, DAG) <= 32 && 6501 numBitsUnsigned(MulRHS, DAG) <= 32) { 6502 MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32); 6503 MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32); 6504 AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64); 6505 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false); 6506 } 6507 6508 if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) { 6509 MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32); 6510 MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32); 6511 AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64); 6512 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true); 6513 } 6514 6515 return SDValue(); 6516 } 6517 6518 if (VT != MVT::i32) 6519 return SDValue(); 6520 6521 // add x, zext (setcc) => addcarry x, 0, setcc 6522 // add x, sext (setcc) => subcarry x, 0, setcc 6523 unsigned Opc = LHS.getOpcode(); 6524 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND || 6525 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY) 6526 std::swap(RHS, LHS); 6527 6528 Opc = RHS.getOpcode(); 6529 switch (Opc) { 6530 default: break; 6531 case ISD::ZERO_EXTEND: 6532 case ISD::SIGN_EXTEND: 6533 case ISD::ANY_EXTEND: { 6534 auto Cond = RHS.getOperand(0); 6535 if (!isBoolSGPR(Cond)) 6536 break; 6537 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); 6538 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; 6539 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY; 6540 return DAG.getNode(Opc, SL, VTList, Args); 6541 } 6542 case ISD::ADDCARRY: { 6543 // add x, (addcarry y, 0, cc) => addcarry x, y, cc 6544 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 6545 if (!C || C->getZExtValue() != 0) break; 6546 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) }; 6547 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args); 6548 } 6549 } 6550 return SDValue(); 6551 } 6552 6553 SDValue SITargetLowering::performSubCombine(SDNode *N, 6554 DAGCombinerInfo &DCI) const { 6555 SelectionDAG &DAG = DCI.DAG; 6556 EVT VT = N->getValueType(0); 6557 6558 if (VT != MVT::i32) 6559 return SDValue(); 6560 6561 SDLoc SL(N); 6562 SDValue LHS = N->getOperand(0); 6563 SDValue RHS = N->getOperand(1); 6564 6565 unsigned Opc = LHS.getOpcode(); 6566 if (Opc != ISD::SUBCARRY) 6567 std::swap(RHS, LHS); 6568 6569 if (LHS.getOpcode() == ISD::SUBCARRY) { 6570 // sub (subcarry x, 0, cc), y => subcarry x, y, cc 6571 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 6572 if (!C || C->getZExtValue() != 0) 6573 return SDValue(); 6574 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) }; 6575 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args); 6576 } 6577 return SDValue(); 6578 } 6579 6580 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N, 6581 DAGCombinerInfo &DCI) const { 6582 6583 if (N->getValueType(0) != MVT::i32) 6584 return SDValue(); 6585 6586 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6587 if (!C || C->getZExtValue() != 0) 6588 return SDValue(); 6589 6590 SelectionDAG &DAG = DCI.DAG; 6591 SDValue LHS = N->getOperand(0); 6592 6593 // addcarry (add x, y), 0, cc => addcarry x, y, cc 6594 // subcarry (sub x, y), 0, cc => subcarry x, y, cc 6595 unsigned LHSOpc = LHS.getOpcode(); 6596 unsigned Opc = N->getOpcode(); 6597 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) || 6598 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) { 6599 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) }; 6600 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args); 6601 } 6602 return SDValue(); 6603 } 6604 6605 SDValue SITargetLowering::performFAddCombine(SDNode *N, 6606 DAGCombinerInfo &DCI) const { 6607 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 6608 return SDValue(); 6609 6610 SelectionDAG &DAG = DCI.DAG; 6611 EVT VT = N->getValueType(0); 6612 6613 SDLoc SL(N); 6614 SDValue LHS = N->getOperand(0); 6615 SDValue RHS = N->getOperand(1); 6616 6617 // These should really be instruction patterns, but writing patterns with 6618 // source modiifiers is a pain. 6619 6620 // fadd (fadd (a, a), b) -> mad 2.0, a, b 6621 if (LHS.getOpcode() == ISD::FADD) { 6622 SDValue A = LHS.getOperand(0); 6623 if (A == LHS.getOperand(1)) { 6624 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 6625 if (FusedOp != 0) { 6626 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 6627 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); 6628 } 6629 } 6630 } 6631 6632 // fadd (b, fadd (a, a)) -> mad 2.0, a, b 6633 if (RHS.getOpcode() == ISD::FADD) { 6634 SDValue A = RHS.getOperand(0); 6635 if (A == RHS.getOperand(1)) { 6636 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 6637 if (FusedOp != 0) { 6638 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 6639 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); 6640 } 6641 } 6642 } 6643 6644 return SDValue(); 6645 } 6646 6647 SDValue SITargetLowering::performFSubCombine(SDNode *N, 6648 DAGCombinerInfo &DCI) const { 6649 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 6650 return SDValue(); 6651 6652 SelectionDAG &DAG = DCI.DAG; 6653 SDLoc SL(N); 6654 EVT VT = N->getValueType(0); 6655 assert(!VT.isVector()); 6656 6657 // Try to get the fneg to fold into the source modifier. This undoes generic 6658 // DAG combines and folds them into the mad. 6659 // 6660 // Only do this if we are not trying to support denormals. v_mad_f32 does 6661 // not support denormals ever. 6662 SDValue LHS = N->getOperand(0); 6663 SDValue RHS = N->getOperand(1); 6664 if (LHS.getOpcode() == ISD::FADD) { 6665 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) 6666 SDValue A = LHS.getOperand(0); 6667 if (A == LHS.getOperand(1)) { 6668 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 6669 if (FusedOp != 0){ 6670 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 6671 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 6672 6673 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); 6674 } 6675 } 6676 } 6677 6678 if (RHS.getOpcode() == ISD::FADD) { 6679 // (fsub c, (fadd a, a)) -> mad -2.0, a, c 6680 6681 SDValue A = RHS.getOperand(0); 6682 if (A == RHS.getOperand(1)) { 6683 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 6684 if (FusedOp != 0){ 6685 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); 6686 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); 6687 } 6688 } 6689 } 6690 6691 return SDValue(); 6692 } 6693 6694 SDValue SITargetLowering::performSetCCCombine(SDNode *N, 6695 DAGCombinerInfo &DCI) const { 6696 SelectionDAG &DAG = DCI.DAG; 6697 SDLoc SL(N); 6698 6699 SDValue LHS = N->getOperand(0); 6700 SDValue RHS = N->getOperand(1); 6701 EVT VT = LHS.getValueType(); 6702 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 6703 6704 auto CRHS = dyn_cast<ConstantSDNode>(RHS); 6705 if (!CRHS) { 6706 CRHS = dyn_cast<ConstantSDNode>(LHS); 6707 if (CRHS) { 6708 std::swap(LHS, RHS); 6709 CC = getSetCCSwappedOperands(CC); 6710 } 6711 } 6712 6713 if (CRHS && VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND && 6714 isBoolSGPR(LHS.getOperand(0))) { 6715 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1 6716 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc 6717 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1 6718 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc 6719 if ((CRHS->isAllOnesValue() && 6720 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) || 6721 (CRHS->isNullValue() && 6722 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE))) 6723 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), 6724 DAG.getConstant(-1, SL, MVT::i1)); 6725 if ((CRHS->isAllOnesValue() && 6726 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) || 6727 (CRHS->isNullValue() && 6728 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT))) 6729 return LHS.getOperand(0); 6730 } 6731 6732 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && 6733 VT != MVT::f16)) 6734 return SDValue(); 6735 6736 // Match isinf pattern 6737 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) 6738 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { 6739 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 6740 if (!CRHS) 6741 return SDValue(); 6742 6743 const APFloat &APF = CRHS->getValueAPF(); 6744 if (APF.isInfinity() && !APF.isNegative()) { 6745 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY; 6746 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), 6747 DAG.getConstant(Mask, SL, MVT::i32)); 6748 } 6749 } 6750 6751 return SDValue(); 6752 } 6753 6754 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, 6755 DAGCombinerInfo &DCI) const { 6756 SelectionDAG &DAG = DCI.DAG; 6757 SDLoc SL(N); 6758 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; 6759 6760 SDValue Src = N->getOperand(0); 6761 SDValue Srl = N->getOperand(0); 6762 if (Srl.getOpcode() == ISD::ZERO_EXTEND) 6763 Srl = Srl.getOperand(0); 6764 6765 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero. 6766 if (Srl.getOpcode() == ISD::SRL) { 6767 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x 6768 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x 6769 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x 6770 6771 if (const ConstantSDNode *C = 6772 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) { 6773 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)), 6774 EVT(MVT::i32)); 6775 6776 unsigned SrcOffset = C->getZExtValue() + 8 * Offset; 6777 if (SrcOffset < 32 && SrcOffset % 8 == 0) { 6778 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL, 6779 MVT::f32, Srl); 6780 } 6781 } 6782 } 6783 6784 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); 6785 6786 KnownBits Known; 6787 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 6788 !DCI.isBeforeLegalizeOps()); 6789 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6790 if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) || 6791 TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) { 6792 DCI.CommitTargetLoweringOpt(TLO); 6793 } 6794 6795 return SDValue(); 6796 } 6797 6798 SDValue SITargetLowering::PerformDAGCombine(SDNode *N, 6799 DAGCombinerInfo &DCI) const { 6800 switch (N->getOpcode()) { 6801 default: 6802 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 6803 case ISD::ADD: 6804 return performAddCombine(N, DCI); 6805 case ISD::SUB: 6806 return performSubCombine(N, DCI); 6807 case ISD::ADDCARRY: 6808 case ISD::SUBCARRY: 6809 return performAddCarrySubCarryCombine(N, DCI); 6810 case ISD::FADD: 6811 return performFAddCombine(N, DCI); 6812 case ISD::FSUB: 6813 return performFSubCombine(N, DCI); 6814 case ISD::SETCC: 6815 return performSetCCCombine(N, DCI); 6816 case ISD::FMAXNUM: 6817 case ISD::FMINNUM: 6818 case ISD::SMAX: 6819 case ISD::SMIN: 6820 case ISD::UMAX: 6821 case ISD::UMIN: 6822 case AMDGPUISD::FMIN_LEGACY: 6823 case AMDGPUISD::FMAX_LEGACY: { 6824 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG && 6825 getTargetMachine().getOptLevel() > CodeGenOpt::None) 6826 return performMinMaxCombine(N, DCI); 6827 break; 6828 } 6829 case ISD::LOAD: 6830 case ISD::STORE: 6831 case ISD::ATOMIC_LOAD: 6832 case ISD::ATOMIC_STORE: 6833 case ISD::ATOMIC_CMP_SWAP: 6834 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 6835 case ISD::ATOMIC_SWAP: 6836 case ISD::ATOMIC_LOAD_ADD: 6837 case ISD::ATOMIC_LOAD_SUB: 6838 case ISD::ATOMIC_LOAD_AND: 6839 case ISD::ATOMIC_LOAD_OR: 6840 case ISD::ATOMIC_LOAD_XOR: 6841 case ISD::ATOMIC_LOAD_NAND: 6842 case ISD::ATOMIC_LOAD_MIN: 6843 case ISD::ATOMIC_LOAD_MAX: 6844 case ISD::ATOMIC_LOAD_UMIN: 6845 case ISD::ATOMIC_LOAD_UMAX: 6846 case AMDGPUISD::ATOMIC_INC: 6847 case AMDGPUISD::ATOMIC_DEC: 6848 case AMDGPUISD::ATOMIC_LOAD_FADD: 6849 case AMDGPUISD::ATOMIC_LOAD_FMIN: 6850 case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics. 6851 if (DCI.isBeforeLegalize()) 6852 break; 6853 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI); 6854 case ISD::AND: 6855 return performAndCombine(N, DCI); 6856 case ISD::OR: 6857 return performOrCombine(N, DCI); 6858 case ISD::XOR: 6859 return performXorCombine(N, DCI); 6860 case ISD::ZERO_EXTEND: 6861 return performZeroExtendCombine(N, DCI); 6862 case AMDGPUISD::FP_CLASS: 6863 return performClassCombine(N, DCI); 6864 case ISD::FCANONICALIZE: 6865 return performFCanonicalizeCombine(N, DCI); 6866 case AMDGPUISD::FRACT: 6867 case AMDGPUISD::RCP: 6868 case AMDGPUISD::RSQ: 6869 case AMDGPUISD::RCP_LEGACY: 6870 case AMDGPUISD::RSQ_LEGACY: 6871 case AMDGPUISD::RSQ_CLAMP: 6872 case AMDGPUISD::LDEXP: { 6873 SDValue Src = N->getOperand(0); 6874 if (Src.isUndef()) 6875 return Src; 6876 break; 6877 } 6878 case ISD::SINT_TO_FP: 6879 case ISD::UINT_TO_FP: 6880 return performUCharToFloatCombine(N, DCI); 6881 case AMDGPUISD::CVT_F32_UBYTE0: 6882 case AMDGPUISD::CVT_F32_UBYTE1: 6883 case AMDGPUISD::CVT_F32_UBYTE2: 6884 case AMDGPUISD::CVT_F32_UBYTE3: 6885 return performCvtF32UByteNCombine(N, DCI); 6886 case AMDGPUISD::FMED3: 6887 return performFMed3Combine(N, DCI); 6888 case AMDGPUISD::CVT_PKRTZ_F16_F32: 6889 return performCvtPkRTZCombine(N, DCI); 6890 case ISD::SCALAR_TO_VECTOR: { 6891 SelectionDAG &DAG = DCI.DAG; 6892 EVT VT = N->getValueType(0); 6893 6894 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x)) 6895 if (VT == MVT::v2i16 || VT == MVT::v2f16) { 6896 SDLoc SL(N); 6897 SDValue Src = N->getOperand(0); 6898 EVT EltVT = Src.getValueType(); 6899 if (EltVT == MVT::f16) 6900 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src); 6901 6902 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src); 6903 return DAG.getNode(ISD::BITCAST, SL, VT, Ext); 6904 } 6905 6906 break; 6907 } 6908 case ISD::EXTRACT_VECTOR_ELT: 6909 return performExtractVectorEltCombine(N, DCI); 6910 case ISD::BUILD_VECTOR: 6911 return performBuildVectorCombine(N, DCI); 6912 } 6913 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 6914 } 6915 6916 /// \brief Helper function for adjustWritemask 6917 static unsigned SubIdx2Lane(unsigned Idx) { 6918 switch (Idx) { 6919 default: return 0; 6920 case AMDGPU::sub0: return 0; 6921 case AMDGPU::sub1: return 1; 6922 case AMDGPU::sub2: return 2; 6923 case AMDGPU::sub3: return 3; 6924 } 6925 } 6926 6927 /// \brief Adjust the writemask of MIMG instructions 6928 SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node, 6929 SelectionDAG &DAG) const { 6930 SDNode *Users[4] = { nullptr }; 6931 unsigned Lane = 0; 6932 unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3; 6933 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); 6934 unsigned NewDmask = 0; 6935 bool HasChain = Node->getNumValues() > 1; 6936 6937 if (OldDmask == 0) { 6938 // These are folded out, but on the chance it happens don't assert. 6939 return Node; 6940 } 6941 6942 // Try to figure out the used register components 6943 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); 6944 I != E; ++I) { 6945 6946 // Don't look at users of the chain. 6947 if (I.getUse().getResNo() != 0) 6948 continue; 6949 6950 // Abort if we can't understand the usage 6951 if (!I->isMachineOpcode() || 6952 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) 6953 return Node; 6954 6955 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used. 6956 // Note that subregs are packed, i.e. Lane==0 is the first bit set 6957 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 6958 // set, etc. 6959 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 6960 6961 // Set which texture component corresponds to the lane. 6962 unsigned Comp; 6963 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) { 6964 Comp = countTrailingZeros(Dmask); 6965 Dmask &= ~(1 << Comp); 6966 } 6967 6968 // Abort if we have more than one user per component 6969 if (Users[Lane]) 6970 return Node; 6971 6972 Users[Lane] = *I; 6973 NewDmask |= 1 << Comp; 6974 } 6975 6976 // Abort if there's no change 6977 if (NewDmask == OldDmask) 6978 return Node; 6979 6980 unsigned BitsSet = countPopulation(NewDmask); 6981 6982 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 6983 int NewOpcode = AMDGPU::getMaskedMIMGOp(*TII, 6984 Node->getMachineOpcode(), BitsSet); 6985 assert(NewOpcode != -1 && 6986 NewOpcode != static_cast<int>(Node->getMachineOpcode()) && 6987 "failed to find equivalent MIMG op"); 6988 6989 // Adjust the writemask in the node 6990 SmallVector<SDValue, 12> Ops; 6991 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); 6992 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); 6993 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); 6994 6995 MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT(); 6996 6997 MVT ResultVT = BitsSet == 1 ? 6998 SVT : MVT::getVectorVT(SVT, BitsSet == 3 ? 4 : BitsSet); 6999 SDVTList NewVTList = HasChain ? 7000 DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT); 7001 7002 7003 MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node), 7004 NewVTList, Ops); 7005 7006 if (HasChain) { 7007 // Update chain. 7008 NewNode->setMemRefs(Node->memoperands_begin(), Node->memoperands_end()); 7009 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1)); 7010 } 7011 7012 if (BitsSet == 1) { 7013 assert(Node->hasNUsesOfValue(1, 0)); 7014 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY, 7015 SDLoc(Node), Users[Lane]->getValueType(0), 7016 SDValue(NewNode, 0)); 7017 DAG.ReplaceAllUsesWith(Users[Lane], Copy); 7018 return nullptr; 7019 } 7020 7021 // Update the users of the node with the new indices 7022 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { 7023 SDNode *User = Users[i]; 7024 if (!User) 7025 continue; 7026 7027 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); 7028 DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op); 7029 7030 switch (Idx) { 7031 default: break; 7032 case AMDGPU::sub0: Idx = AMDGPU::sub1; break; 7033 case AMDGPU::sub1: Idx = AMDGPU::sub2; break; 7034 case AMDGPU::sub2: Idx = AMDGPU::sub3; break; 7035 } 7036 } 7037 7038 DAG.RemoveDeadNode(Node); 7039 return nullptr; 7040 } 7041 7042 static bool isFrameIndexOp(SDValue Op) { 7043 if (Op.getOpcode() == ISD::AssertZext) 7044 Op = Op.getOperand(0); 7045 7046 return isa<FrameIndexSDNode>(Op); 7047 } 7048 7049 /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) 7050 /// with frame index operands. 7051 /// LLVM assumes that inputs are to these instructions are registers. 7052 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, 7053 SelectionDAG &DAG) const { 7054 if (Node->getOpcode() == ISD::CopyToReg) { 7055 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1)); 7056 SDValue SrcVal = Node->getOperand(2); 7057 7058 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have 7059 // to try understanding copies to physical registers. 7060 if (SrcVal.getValueType() == MVT::i1 && 7061 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) { 7062 SDLoc SL(Node); 7063 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 7064 SDValue VReg = DAG.getRegister( 7065 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1); 7066 7067 SDNode *Glued = Node->getGluedNode(); 7068 SDValue ToVReg 7069 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal, 7070 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0)); 7071 SDValue ToResultReg 7072 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0), 7073 VReg, ToVReg.getValue(1)); 7074 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode()); 7075 DAG.RemoveDeadNode(Node); 7076 return ToResultReg.getNode(); 7077 } 7078 } 7079 7080 SmallVector<SDValue, 8> Ops; 7081 for (unsigned i = 0; i < Node->getNumOperands(); ++i) { 7082 if (!isFrameIndexOp(Node->getOperand(i))) { 7083 Ops.push_back(Node->getOperand(i)); 7084 continue; 7085 } 7086 7087 SDLoc DL(Node); 7088 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, 7089 Node->getOperand(i).getValueType(), 7090 Node->getOperand(i)), 0)); 7091 } 7092 7093 return DAG.UpdateNodeOperands(Node, Ops); 7094 } 7095 7096 /// \brief Fold the instructions after selecting them. 7097 /// Returns null if users were already updated. 7098 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, 7099 SelectionDAG &DAG) const { 7100 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 7101 unsigned Opcode = Node->getMachineOpcode(); 7102 7103 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && 7104 !TII->isGather4(Opcode)) { 7105 return adjustWritemask(Node, DAG); 7106 } 7107 7108 if (Opcode == AMDGPU::INSERT_SUBREG || 7109 Opcode == AMDGPU::REG_SEQUENCE) { 7110 legalizeTargetIndependentNode(Node, DAG); 7111 return Node; 7112 } 7113 7114 switch (Opcode) { 7115 case AMDGPU::V_DIV_SCALE_F32: 7116 case AMDGPU::V_DIV_SCALE_F64: { 7117 // Satisfy the operand register constraint when one of the inputs is 7118 // undefined. Ordinarily each undef value will have its own implicit_def of 7119 // a vreg, so force these to use a single register. 7120 SDValue Src0 = Node->getOperand(0); 7121 SDValue Src1 = Node->getOperand(1); 7122 SDValue Src2 = Node->getOperand(2); 7123 7124 if ((Src0.isMachineOpcode() && 7125 Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) && 7126 (Src0 == Src1 || Src0 == Src2)) 7127 break; 7128 7129 MVT VT = Src0.getValueType().getSimpleVT(); 7130 const TargetRegisterClass *RC = getRegClassFor(VT); 7131 7132 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 7133 SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT); 7134 7135 SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node), 7136 UndefReg, Src0, SDValue()); 7137 7138 // src0 must be the same register as src1 or src2, even if the value is 7139 // undefined, so make sure we don't violate this constraint. 7140 if (Src0.isMachineOpcode() && 7141 Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) { 7142 if (Src1.isMachineOpcode() && 7143 Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) 7144 Src0 = Src1; 7145 else if (Src2.isMachineOpcode() && 7146 Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) 7147 Src0 = Src2; 7148 else { 7149 assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF); 7150 Src0 = UndefReg; 7151 Src1 = UndefReg; 7152 } 7153 } else 7154 break; 7155 7156 SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 }; 7157 for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I) 7158 Ops.push_back(Node->getOperand(I)); 7159 7160 Ops.push_back(ImpDef.getValue(1)); 7161 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); 7162 } 7163 default: 7164 break; 7165 } 7166 7167 return Node; 7168 } 7169 7170 /// \brief Assign the register class depending on the number of 7171 /// bits set in the writemask 7172 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 7173 SDNode *Node) const { 7174 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 7175 7176 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 7177 7178 if (TII->isVOP3(MI.getOpcode())) { 7179 // Make sure constant bus requirements are respected. 7180 TII->legalizeOperandsVOP3(MRI, MI); 7181 return; 7182 } 7183 7184 // Replace unused atomics with the no return version. 7185 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); 7186 if (NoRetAtomicOp != -1) { 7187 if (!Node->hasAnyUseOfValue(0)) { 7188 MI.setDesc(TII->get(NoRetAtomicOp)); 7189 MI.RemoveOperand(0); 7190 return; 7191 } 7192 7193 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg 7194 // instruction, because the return type of these instructions is a vec2 of 7195 // the memory type, so it can be tied to the input operand. 7196 // This means these instructions always have a use, so we need to add a 7197 // special case to check if the atomic has only one extract_subreg use, 7198 // which itself has no uses. 7199 if ((Node->hasNUsesOfValue(1, 0) && 7200 Node->use_begin()->isMachineOpcode() && 7201 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && 7202 !Node->use_begin()->hasAnyUseOfValue(0))) { 7203 unsigned Def = MI.getOperand(0).getReg(); 7204 7205 // Change this into a noret atomic. 7206 MI.setDesc(TII->get(NoRetAtomicOp)); 7207 MI.RemoveOperand(0); 7208 7209 // If we only remove the def operand from the atomic instruction, the 7210 // extract_subreg will be left with a use of a vreg without a def. 7211 // So we need to insert an implicit_def to avoid machine verifier 7212 // errors. 7213 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), 7214 TII->get(AMDGPU::IMPLICIT_DEF), Def); 7215 } 7216 return; 7217 } 7218 } 7219 7220 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, 7221 uint64_t Val) { 7222 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); 7223 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); 7224 } 7225 7226 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, 7227 const SDLoc &DL, 7228 SDValue Ptr) const { 7229 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 7230 7231 // Build the half of the subregister with the constants before building the 7232 // full 128-bit register. If we are building multiple resource descriptors, 7233 // this will allow CSEing of the 2-component register. 7234 const SDValue Ops0[] = { 7235 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), 7236 buildSMovImm32(DAG, DL, 0), 7237 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 7238 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), 7239 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) 7240 }; 7241 7242 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, 7243 MVT::v2i32, Ops0), 0); 7244 7245 // Combine the constants and the pointer. 7246 const SDValue Ops1[] = { 7247 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 7248 Ptr, 7249 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), 7250 SubRegHi, 7251 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) 7252 }; 7253 7254 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); 7255 } 7256 7257 /// \brief Return a resource descriptor with the 'Add TID' bit enabled 7258 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] 7259 /// of the resource descriptor) to create an offset, which is added to 7260 /// the resource pointer. 7261 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, 7262 SDValue Ptr, uint32_t RsrcDword1, 7263 uint64_t RsrcDword2And3) const { 7264 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); 7265 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); 7266 if (RsrcDword1) { 7267 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, 7268 DAG.getConstant(RsrcDword1, DL, MVT::i32)), 7269 0); 7270 } 7271 7272 SDValue DataLo = buildSMovImm32(DAG, DL, 7273 RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); 7274 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); 7275 7276 const SDValue Ops[] = { 7277 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 7278 PtrLo, 7279 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 7280 PtrHi, 7281 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), 7282 DataLo, 7283 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), 7284 DataHi, 7285 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) 7286 }; 7287 7288 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); 7289 } 7290 7291 //===----------------------------------------------------------------------===// 7292 // SI Inline Assembly Support 7293 //===----------------------------------------------------------------------===// 7294 7295 std::pair<unsigned, const TargetRegisterClass *> 7296 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 7297 StringRef Constraint, 7298 MVT VT) const { 7299 if (!isTypeLegal(VT)) 7300 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 7301 7302 if (Constraint.size() == 1) { 7303 switch (Constraint[0]) { 7304 case 's': 7305 case 'r': 7306 switch (VT.getSizeInBits()) { 7307 default: 7308 return std::make_pair(0U, nullptr); 7309 case 32: 7310 case 16: 7311 return std::make_pair(0U, &AMDGPU::SReg_32_XM0RegClass); 7312 case 64: 7313 return std::make_pair(0U, &AMDGPU::SGPR_64RegClass); 7314 case 128: 7315 return std::make_pair(0U, &AMDGPU::SReg_128RegClass); 7316 case 256: 7317 return std::make_pair(0U, &AMDGPU::SReg_256RegClass); 7318 case 512: 7319 return std::make_pair(0U, &AMDGPU::SReg_512RegClass); 7320 } 7321 7322 case 'v': 7323 switch (VT.getSizeInBits()) { 7324 default: 7325 return std::make_pair(0U, nullptr); 7326 case 32: 7327 case 16: 7328 return std::make_pair(0U, &AMDGPU::VGPR_32RegClass); 7329 case 64: 7330 return std::make_pair(0U, &AMDGPU::VReg_64RegClass); 7331 case 96: 7332 return std::make_pair(0U, &AMDGPU::VReg_96RegClass); 7333 case 128: 7334 return std::make_pair(0U, &AMDGPU::VReg_128RegClass); 7335 case 256: 7336 return std::make_pair(0U, &AMDGPU::VReg_256RegClass); 7337 case 512: 7338 return std::make_pair(0U, &AMDGPU::VReg_512RegClass); 7339 } 7340 } 7341 } 7342 7343 if (Constraint.size() > 1) { 7344 const TargetRegisterClass *RC = nullptr; 7345 if (Constraint[1] == 'v') { 7346 RC = &AMDGPU::VGPR_32RegClass; 7347 } else if (Constraint[1] == 's') { 7348 RC = &AMDGPU::SGPR_32RegClass; 7349 } 7350 7351 if (RC) { 7352 uint32_t Idx; 7353 bool Failed = Constraint.substr(2).getAsInteger(10, Idx); 7354 if (!Failed && Idx < RC->getNumRegs()) 7355 return std::make_pair(RC->getRegister(Idx), RC); 7356 } 7357 } 7358 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 7359 } 7360 7361 SITargetLowering::ConstraintType 7362 SITargetLowering::getConstraintType(StringRef Constraint) const { 7363 if (Constraint.size() == 1) { 7364 switch (Constraint[0]) { 7365 default: break; 7366 case 's': 7367 case 'v': 7368 return C_RegisterClass; 7369 } 7370 } 7371 return TargetLowering::getConstraintType(Constraint); 7372 } 7373 7374 // Figure out which registers should be reserved for stack access. Only after 7375 // the function is legalized do we know all of the non-spill stack objects or if 7376 // calls are present. 7377 void SITargetLowering::finalizeLowering(MachineFunction &MF) const { 7378 MachineRegisterInfo &MRI = MF.getRegInfo(); 7379 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 7380 const MachineFrameInfo &MFI = MF.getFrameInfo(); 7381 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 7382 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 7383 7384 if (Info->isEntryFunction()) { 7385 // Callable functions have fixed registers used for stack access. 7386 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info); 7387 } 7388 7389 // We have to assume the SP is needed in case there are calls in the function 7390 // during lowering. Calls are only detected after the function is 7391 // lowered. We're about to reserve registers, so don't bother using it if we 7392 // aren't really going to use it. 7393 bool NeedSP = !Info->isEntryFunction() || 7394 MFI.hasVarSizedObjects() || 7395 MFI.hasCalls(); 7396 7397 if (NeedSP) { 7398 unsigned ReservedStackPtrOffsetReg = TRI->reservedStackPtrOffsetReg(MF); 7399 Info->setStackPtrOffsetReg(ReservedStackPtrOffsetReg); 7400 7401 assert(Info->getStackPtrOffsetReg() != Info->getFrameOffsetReg()); 7402 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(), 7403 Info->getStackPtrOffsetReg())); 7404 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg()); 7405 } 7406 7407 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg()); 7408 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg()); 7409 MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG, 7410 Info->getScratchWaveOffsetReg()); 7411 7412 TargetLoweringBase::finalizeLowering(MF); 7413 } 7414 7415 void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op, 7416 KnownBits &Known, 7417 const APInt &DemandedElts, 7418 const SelectionDAG &DAG, 7419 unsigned Depth) const { 7420 TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts, 7421 DAG, Depth); 7422 7423 if (getSubtarget()->enableHugePrivateBuffer()) 7424 return; 7425 7426 // Technically it may be possible to have a dispatch with a single workitem 7427 // that uses the full private memory size, but that's not really useful. We 7428 // can't use vaddr in MUBUF instructions if we don't know the address 7429 // calculation won't overflow, so assume the sign bit is never set. 7430 Known.Zero.setHighBits(AssumeFrameIndexHighZeroBits); 7431 } 7432