1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Custom DAG lowering for SI 11 // 12 //===----------------------------------------------------------------------===// 13 14 #if defined(_MSC_VER) || defined(__MINGW32__) 15 // Provide M_PI. 16 #define _USE_MATH_DEFINES 17 #endif 18 19 #include "SIISelLowering.h" 20 #include "AMDGPU.h" 21 #include "AMDGPUSubtarget.h" 22 #include "AMDGPUTargetMachine.h" 23 #include "SIDefines.h" 24 #include "SIInstrInfo.h" 25 #include "SIMachineFunctionInfo.h" 26 #include "SIRegisterInfo.h" 27 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 28 #include "Utils/AMDGPUBaseInfo.h" 29 #include "llvm/ADT/APFloat.h" 30 #include "llvm/ADT/APInt.h" 31 #include "llvm/ADT/ArrayRef.h" 32 #include "llvm/ADT/BitVector.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/StringRef.h" 36 #include "llvm/ADT/StringSwitch.h" 37 #include "llvm/ADT/Twine.h" 38 #include "llvm/CodeGen/Analysis.h" 39 #include "llvm/CodeGen/CallingConvLower.h" 40 #include "llvm/CodeGen/DAGCombine.h" 41 #include "llvm/CodeGen/ISDOpcodes.h" 42 #include "llvm/CodeGen/MachineBasicBlock.h" 43 #include "llvm/CodeGen/MachineFrameInfo.h" 44 #include "llvm/CodeGen/MachineFunction.h" 45 #include "llvm/CodeGen/MachineInstr.h" 46 #include "llvm/CodeGen/MachineInstrBuilder.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineModuleInfo.h" 49 #include "llvm/CodeGen/MachineOperand.h" 50 #include "llvm/CodeGen/MachineRegisterInfo.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/TargetCallingConv.h" 54 #include "llvm/CodeGen/TargetRegisterInfo.h" 55 #include "llvm/CodeGen/ValueTypes.h" 56 #include "llvm/IR/Constants.h" 57 #include "llvm/IR/DataLayout.h" 58 #include "llvm/IR/DebugLoc.h" 59 #include "llvm/IR/DerivedTypes.h" 60 #include "llvm/IR/DiagnosticInfo.h" 61 #include "llvm/IR/Function.h" 62 #include "llvm/IR/GlobalValue.h" 63 #include "llvm/IR/InstrTypes.h" 64 #include "llvm/IR/Instruction.h" 65 #include "llvm/IR/Instructions.h" 66 #include "llvm/IR/IntrinsicInst.h" 67 #include "llvm/IR/Type.h" 68 #include "llvm/Support/Casting.h" 69 #include "llvm/Support/CodeGen.h" 70 #include "llvm/Support/CommandLine.h" 71 #include "llvm/Support/Compiler.h" 72 #include "llvm/Support/ErrorHandling.h" 73 #include "llvm/Support/KnownBits.h" 74 #include "llvm/Support/MachineValueType.h" 75 #include "llvm/Support/MathExtras.h" 76 #include "llvm/Target/TargetOptions.h" 77 #include <cassert> 78 #include <cmath> 79 #include <cstdint> 80 #include <iterator> 81 #include <tuple> 82 #include <utility> 83 #include <vector> 84 85 using namespace llvm; 86 87 #define DEBUG_TYPE "si-lower" 88 89 STATISTIC(NumTailCalls, "Number of tail calls"); 90 91 static cl::opt<bool> EnableVGPRIndexMode( 92 "amdgpu-vgpr-index-mode", 93 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), 94 cl::init(false)); 95 96 static cl::opt<bool> DisableLoopAlignment( 97 "amdgpu-disable-loop-alignment", 98 cl::desc("Do not align and prefetch loops"), 99 cl::init(false)); 100 101 static unsigned findFirstFreeSGPR(CCState &CCInfo) { 102 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); 103 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { 104 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { 105 return AMDGPU::SGPR0 + Reg; 106 } 107 } 108 llvm_unreachable("Cannot allocate sgpr"); 109 } 110 111 SITargetLowering::SITargetLowering(const TargetMachine &TM, 112 const GCNSubtarget &STI) 113 : AMDGPUTargetLowering(TM, STI), 114 Subtarget(&STI) { 115 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); 116 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); 117 118 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass); 119 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); 120 121 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); 122 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); 123 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); 124 125 addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass); 126 addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass); 127 128 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); 129 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); 130 131 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); 132 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); 133 134 addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass); 135 addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass); 136 137 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); 138 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); 139 140 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); 141 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); 142 143 if (Subtarget->has16BitInsts()) { 144 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass); 145 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass); 146 147 // Unless there are also VOP3P operations, not operations are really legal. 148 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass); 149 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass); 150 addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass); 151 addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass); 152 } 153 154 computeRegisterProperties(Subtarget->getRegisterInfo()); 155 156 // We need to custom lower vector stores from local memory 157 setOperationAction(ISD::LOAD, MVT::v2i32, Custom); 158 setOperationAction(ISD::LOAD, MVT::v3i32, Custom); 159 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 160 setOperationAction(ISD::LOAD, MVT::v5i32, Custom); 161 setOperationAction(ISD::LOAD, MVT::v8i32, Custom); 162 setOperationAction(ISD::LOAD, MVT::v16i32, Custom); 163 setOperationAction(ISD::LOAD, MVT::i1, Custom); 164 setOperationAction(ISD::LOAD, MVT::v32i32, Custom); 165 166 setOperationAction(ISD::STORE, MVT::v2i32, Custom); 167 setOperationAction(ISD::STORE, MVT::v3i32, Custom); 168 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 169 setOperationAction(ISD::STORE, MVT::v5i32, Custom); 170 setOperationAction(ISD::STORE, MVT::v8i32, Custom); 171 setOperationAction(ISD::STORE, MVT::v16i32, Custom); 172 setOperationAction(ISD::STORE, MVT::i1, Custom); 173 setOperationAction(ISD::STORE, MVT::v32i32, Custom); 174 175 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); 176 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); 177 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); 178 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); 179 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); 180 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); 181 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); 182 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); 183 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); 184 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); 185 186 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 187 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 188 189 setOperationAction(ISD::SELECT, MVT::i1, Promote); 190 setOperationAction(ISD::SELECT, MVT::i64, Custom); 191 setOperationAction(ISD::SELECT, MVT::f64, Promote); 192 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); 193 194 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 195 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 196 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 197 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 198 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); 199 200 setOperationAction(ISD::SETCC, MVT::i1, Promote); 201 setOperationAction(ISD::SETCC, MVT::v2i1, Expand); 202 setOperationAction(ISD::SETCC, MVT::v4i1, Expand); 203 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); 204 205 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); 206 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 207 208 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); 209 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); 210 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 211 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); 212 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 213 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); 214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); 215 216 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 217 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); 218 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); 219 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); 220 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom); 221 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom); 222 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom); 223 224 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom); 225 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom); 226 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom); 227 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 228 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); 229 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); 230 231 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 232 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); 233 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); 234 setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom); 235 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 236 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 237 238 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 239 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 240 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 241 setOperationAction(ISD::BR_CC, MVT::i64, Expand); 242 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 243 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 244 245 setOperationAction(ISD::UADDO, MVT::i32, Legal); 246 setOperationAction(ISD::USUBO, MVT::i32, Legal); 247 248 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal); 249 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal); 250 251 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 252 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 253 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 254 255 #if 0 256 setOperationAction(ISD::ADDCARRY, MVT::i64, Legal); 257 setOperationAction(ISD::SUBCARRY, MVT::i64, Legal); 258 #endif 259 260 // We only support LOAD/STORE and vector manipulation ops for vectors 261 // with > 4 elements. 262 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, 263 MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16, MVT::v32i32 }) { 264 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 265 switch (Op) { 266 case ISD::LOAD: 267 case ISD::STORE: 268 case ISD::BUILD_VECTOR: 269 case ISD::BITCAST: 270 case ISD::EXTRACT_VECTOR_ELT: 271 case ISD::INSERT_VECTOR_ELT: 272 case ISD::INSERT_SUBVECTOR: 273 case ISD::EXTRACT_SUBVECTOR: 274 case ISD::SCALAR_TO_VECTOR: 275 break; 276 case ISD::CONCAT_VECTORS: 277 setOperationAction(Op, VT, Custom); 278 break; 279 default: 280 setOperationAction(Op, VT, Expand); 281 break; 282 } 283 } 284 } 285 286 setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand); 287 288 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that 289 // is expanded to avoid having two separate loops in case the index is a VGPR. 290 291 // Most operations are naturally 32-bit vector operations. We only support 292 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. 293 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { 294 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 295 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); 296 297 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 298 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); 299 300 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 301 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); 302 303 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 304 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); 305 } 306 307 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); 308 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); 309 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); 310 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); 311 312 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom); 313 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 314 315 // Avoid stack access for these. 316 // TODO: Generalize to more vector types. 317 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); 318 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); 319 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom); 320 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); 321 322 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 323 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 324 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom); 325 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom); 326 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom); 327 328 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom); 329 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom); 330 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom); 331 332 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom); 333 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom); 334 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom); 335 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); 336 337 // Deal with vec3 vector operations when widened to vec4. 338 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Expand); 339 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Expand); 340 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Expand); 341 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Expand); 342 343 // Deal with vec5 vector operations when widened to vec8. 344 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Expand); 345 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Expand); 346 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Expand); 347 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Expand); 348 349 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, 350 // and output demarshalling 351 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 352 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 353 354 // We can't return success/failure, only the old value, 355 // let LLVM add the comparison 356 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); 357 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); 358 359 if (Subtarget->hasFlatAddressSpace()) { 360 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); 361 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); 362 } 363 364 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 365 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 366 367 // On SI this is s_memtime and s_memrealtime on VI. 368 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 369 setOperationAction(ISD::TRAP, MVT::Other, Custom); 370 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom); 371 372 if (Subtarget->has16BitInsts()) { 373 setOperationAction(ISD::FLOG, MVT::f16, Custom); 374 setOperationAction(ISD::FEXP, MVT::f16, Custom); 375 setOperationAction(ISD::FLOG10, MVT::f16, Custom); 376 } 377 378 // v_mad_f32 does not support denormals according to some sources. 379 if (!Subtarget->hasFP32Denormals()) 380 setOperationAction(ISD::FMAD, MVT::f32, Legal); 381 382 if (!Subtarget->hasBFI()) { 383 // fcopysign can be done in a single instruction with BFI. 384 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 385 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 386 } 387 388 if (!Subtarget->hasBCNT(32)) 389 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 390 391 if (!Subtarget->hasBCNT(64)) 392 setOperationAction(ISD::CTPOP, MVT::i64, Expand); 393 394 if (Subtarget->hasFFBH()) 395 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); 396 397 if (Subtarget->hasFFBL()) 398 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); 399 400 // We only really have 32-bit BFE instructions (and 16-bit on VI). 401 // 402 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any 403 // effort to match them now. We want this to be false for i64 cases when the 404 // extraction isn't restricted to the upper or lower half. Ideally we would 405 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that 406 // span the midpoint are probably relatively rare, so don't worry about them 407 // for now. 408 if (Subtarget->hasBFE()) 409 setHasExtractBitsInsn(true); 410 411 setOperationAction(ISD::FMINNUM, MVT::f32, Custom); 412 setOperationAction(ISD::FMAXNUM, MVT::f32, Custom); 413 setOperationAction(ISD::FMINNUM, MVT::f64, Custom); 414 setOperationAction(ISD::FMAXNUM, MVT::f64, Custom); 415 416 417 // These are really only legal for ieee_mode functions. We should be avoiding 418 // them for functions that don't have ieee_mode enabled, so just say they are 419 // legal. 420 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); 421 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); 422 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); 423 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); 424 425 426 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) { 427 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 428 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 429 setOperationAction(ISD::FRINT, MVT::f64, Legal); 430 } else { 431 setOperationAction(ISD::FCEIL, MVT::f64, Custom); 432 setOperationAction(ISD::FTRUNC, MVT::f64, Custom); 433 setOperationAction(ISD::FRINT, MVT::f64, Custom); 434 setOperationAction(ISD::FFLOOR, MVT::f64, Custom); 435 } 436 437 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 438 439 setOperationAction(ISD::FSIN, MVT::f32, Custom); 440 setOperationAction(ISD::FCOS, MVT::f32, Custom); 441 setOperationAction(ISD::FDIV, MVT::f32, Custom); 442 setOperationAction(ISD::FDIV, MVT::f64, Custom); 443 444 if (Subtarget->has16BitInsts()) { 445 setOperationAction(ISD::Constant, MVT::i16, Legal); 446 447 setOperationAction(ISD::SMIN, MVT::i16, Legal); 448 setOperationAction(ISD::SMAX, MVT::i16, Legal); 449 450 setOperationAction(ISD::UMIN, MVT::i16, Legal); 451 setOperationAction(ISD::UMAX, MVT::i16, Legal); 452 453 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); 454 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); 455 456 setOperationAction(ISD::ROTR, MVT::i16, Promote); 457 setOperationAction(ISD::ROTL, MVT::i16, Promote); 458 459 setOperationAction(ISD::SDIV, MVT::i16, Promote); 460 setOperationAction(ISD::UDIV, MVT::i16, Promote); 461 setOperationAction(ISD::SREM, MVT::i16, Promote); 462 setOperationAction(ISD::UREM, MVT::i16, Promote); 463 464 setOperationAction(ISD::BSWAP, MVT::i16, Promote); 465 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); 466 467 setOperationAction(ISD::CTTZ, MVT::i16, Promote); 468 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); 469 setOperationAction(ISD::CTLZ, MVT::i16, Promote); 470 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); 471 setOperationAction(ISD::CTPOP, MVT::i16, Promote); 472 473 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); 474 475 setOperationAction(ISD::BR_CC, MVT::i16, Expand); 476 477 setOperationAction(ISD::LOAD, MVT::i16, Custom); 478 479 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 480 481 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); 482 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); 483 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); 484 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); 485 486 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); 487 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); 488 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); 489 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); 490 491 // F16 - Constant Actions. 492 setOperationAction(ISD::ConstantFP, MVT::f16, Legal); 493 494 // F16 - Load/Store Actions. 495 setOperationAction(ISD::LOAD, MVT::f16, Promote); 496 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); 497 setOperationAction(ISD::STORE, MVT::f16, Promote); 498 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); 499 500 // F16 - VOP1 Actions. 501 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); 502 setOperationAction(ISD::FCOS, MVT::f16, Promote); 503 setOperationAction(ISD::FSIN, MVT::f16, Promote); 504 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); 505 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); 506 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); 507 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); 508 setOperationAction(ISD::FROUND, MVT::f16, Custom); 509 510 // F16 - VOP2 Actions. 511 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 512 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 513 514 setOperationAction(ISD::FDIV, MVT::f16, Custom); 515 516 // F16 - VOP3 Actions. 517 setOperationAction(ISD::FMA, MVT::f16, Legal); 518 if (!Subtarget->hasFP16Denormals() && STI.hasMadF16()) 519 setOperationAction(ISD::FMAD, MVT::f16, Legal); 520 521 for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) { 522 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 523 switch (Op) { 524 case ISD::LOAD: 525 case ISD::STORE: 526 case ISD::BUILD_VECTOR: 527 case ISD::BITCAST: 528 case ISD::EXTRACT_VECTOR_ELT: 529 case ISD::INSERT_VECTOR_ELT: 530 case ISD::INSERT_SUBVECTOR: 531 case ISD::EXTRACT_SUBVECTOR: 532 case ISD::SCALAR_TO_VECTOR: 533 break; 534 case ISD::CONCAT_VECTORS: 535 setOperationAction(Op, VT, Custom); 536 break; 537 default: 538 setOperationAction(Op, VT, Expand); 539 break; 540 } 541 } 542 } 543 544 // XXX - Do these do anything? Vector constants turn into build_vector. 545 setOperationAction(ISD::Constant, MVT::v2i16, Legal); 546 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal); 547 548 setOperationAction(ISD::UNDEF, MVT::v2i16, Legal); 549 setOperationAction(ISD::UNDEF, MVT::v2f16, Legal); 550 551 setOperationAction(ISD::STORE, MVT::v2i16, Promote); 552 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32); 553 setOperationAction(ISD::STORE, MVT::v2f16, Promote); 554 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32); 555 556 setOperationAction(ISD::LOAD, MVT::v2i16, Promote); 557 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32); 558 setOperationAction(ISD::LOAD, MVT::v2f16, Promote); 559 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32); 560 561 setOperationAction(ISD::AND, MVT::v2i16, Promote); 562 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32); 563 setOperationAction(ISD::OR, MVT::v2i16, Promote); 564 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32); 565 setOperationAction(ISD::XOR, MVT::v2i16, Promote); 566 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32); 567 568 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 569 AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32); 570 setOperationAction(ISD::LOAD, MVT::v4f16, Promote); 571 AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32); 572 573 setOperationAction(ISD::STORE, MVT::v4i16, Promote); 574 AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32); 575 setOperationAction(ISD::STORE, MVT::v4f16, Promote); 576 AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32); 577 578 setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand); 579 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand); 580 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand); 581 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); 582 583 setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand); 584 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand); 585 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand); 586 587 if (!Subtarget->hasVOP3PInsts()) { 588 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom); 589 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom); 590 } 591 592 setOperationAction(ISD::FNEG, MVT::v2f16, Legal); 593 // This isn't really legal, but this avoids the legalizer unrolling it (and 594 // allows matching fneg (fabs x) patterns) 595 setOperationAction(ISD::FABS, MVT::v2f16, Legal); 596 597 setOperationAction(ISD::FMAXNUM, MVT::f16, Custom); 598 setOperationAction(ISD::FMINNUM, MVT::f16, Custom); 599 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal); 600 setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal); 601 602 setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom); 603 setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom); 604 605 setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand); 606 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand); 607 } 608 609 if (Subtarget->hasVOP3PInsts()) { 610 setOperationAction(ISD::ADD, MVT::v2i16, Legal); 611 setOperationAction(ISD::SUB, MVT::v2i16, Legal); 612 setOperationAction(ISD::MUL, MVT::v2i16, Legal); 613 setOperationAction(ISD::SHL, MVT::v2i16, Legal); 614 setOperationAction(ISD::SRL, MVT::v2i16, Legal); 615 setOperationAction(ISD::SRA, MVT::v2i16, Legal); 616 setOperationAction(ISD::SMIN, MVT::v2i16, Legal); 617 setOperationAction(ISD::UMIN, MVT::v2i16, Legal); 618 setOperationAction(ISD::SMAX, MVT::v2i16, Legal); 619 setOperationAction(ISD::UMAX, MVT::v2i16, Legal); 620 621 setOperationAction(ISD::FADD, MVT::v2f16, Legal); 622 setOperationAction(ISD::FMUL, MVT::v2f16, Legal); 623 setOperationAction(ISD::FMA, MVT::v2f16, Legal); 624 625 setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal); 626 setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal); 627 628 setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal); 629 630 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 631 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 632 633 setOperationAction(ISD::SHL, MVT::v4i16, Custom); 634 setOperationAction(ISD::SRA, MVT::v4i16, Custom); 635 setOperationAction(ISD::SRL, MVT::v4i16, Custom); 636 setOperationAction(ISD::ADD, MVT::v4i16, Custom); 637 setOperationAction(ISD::SUB, MVT::v4i16, Custom); 638 setOperationAction(ISD::MUL, MVT::v4i16, Custom); 639 640 setOperationAction(ISD::SMIN, MVT::v4i16, Custom); 641 setOperationAction(ISD::SMAX, MVT::v4i16, Custom); 642 setOperationAction(ISD::UMIN, MVT::v4i16, Custom); 643 setOperationAction(ISD::UMAX, MVT::v4i16, Custom); 644 645 setOperationAction(ISD::FADD, MVT::v4f16, Custom); 646 setOperationAction(ISD::FMUL, MVT::v4f16, Custom); 647 648 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom); 649 setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom); 650 651 setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom); 652 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom); 653 setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom); 654 655 setOperationAction(ISD::FEXP, MVT::v2f16, Custom); 656 setOperationAction(ISD::SELECT, MVT::v4i16, Custom); 657 setOperationAction(ISD::SELECT, MVT::v4f16, Custom); 658 } 659 660 setOperationAction(ISD::FNEG, MVT::v4f16, Custom); 661 setOperationAction(ISD::FABS, MVT::v4f16, Custom); 662 663 if (Subtarget->has16BitInsts()) { 664 setOperationAction(ISD::SELECT, MVT::v2i16, Promote); 665 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32); 666 setOperationAction(ISD::SELECT, MVT::v2f16, Promote); 667 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32); 668 } else { 669 // Legalization hack. 670 setOperationAction(ISD::SELECT, MVT::v2i16, Custom); 671 setOperationAction(ISD::SELECT, MVT::v2f16, Custom); 672 673 setOperationAction(ISD::FNEG, MVT::v2f16, Custom); 674 setOperationAction(ISD::FABS, MVT::v2f16, Custom); 675 } 676 677 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) { 678 setOperationAction(ISD::SELECT, VT, Custom); 679 } 680 681 setTargetDAGCombine(ISD::ADD); 682 setTargetDAGCombine(ISD::ADDCARRY); 683 setTargetDAGCombine(ISD::SUB); 684 setTargetDAGCombine(ISD::SUBCARRY); 685 setTargetDAGCombine(ISD::FADD); 686 setTargetDAGCombine(ISD::FSUB); 687 setTargetDAGCombine(ISD::FMINNUM); 688 setTargetDAGCombine(ISD::FMAXNUM); 689 setTargetDAGCombine(ISD::FMINNUM_IEEE); 690 setTargetDAGCombine(ISD::FMAXNUM_IEEE); 691 setTargetDAGCombine(ISD::FMA); 692 setTargetDAGCombine(ISD::SMIN); 693 setTargetDAGCombine(ISD::SMAX); 694 setTargetDAGCombine(ISD::UMIN); 695 setTargetDAGCombine(ISD::UMAX); 696 setTargetDAGCombine(ISD::SETCC); 697 setTargetDAGCombine(ISD::AND); 698 setTargetDAGCombine(ISD::OR); 699 setTargetDAGCombine(ISD::XOR); 700 setTargetDAGCombine(ISD::SINT_TO_FP); 701 setTargetDAGCombine(ISD::UINT_TO_FP); 702 setTargetDAGCombine(ISD::FCANONICALIZE); 703 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); 704 setTargetDAGCombine(ISD::ZERO_EXTEND); 705 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); 706 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 707 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 708 709 // All memory operations. Some folding on the pointer operand is done to help 710 // matching the constant offsets in the addressing modes. 711 setTargetDAGCombine(ISD::LOAD); 712 setTargetDAGCombine(ISD::STORE); 713 setTargetDAGCombine(ISD::ATOMIC_LOAD); 714 setTargetDAGCombine(ISD::ATOMIC_STORE); 715 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); 716 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 717 setTargetDAGCombine(ISD::ATOMIC_SWAP); 718 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); 719 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); 720 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); 721 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); 722 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); 723 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); 724 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); 725 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); 726 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); 727 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); 728 setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD); 729 730 setSchedulingPreference(Sched::RegPressure); 731 } 732 733 const GCNSubtarget *SITargetLowering::getSubtarget() const { 734 return Subtarget; 735 } 736 737 //===----------------------------------------------------------------------===// 738 // TargetLowering queries 739 //===----------------------------------------------------------------------===// 740 741 // v_mad_mix* support a conversion from f16 to f32. 742 // 743 // There is only one special case when denormals are enabled we don't currently, 744 // where this is OK to use. 745 bool SITargetLowering::isFPExtFoldable(unsigned Opcode, 746 EVT DestVT, EVT SrcVT) const { 747 return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) || 748 (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) && 749 DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() && 750 SrcVT.getScalarType() == MVT::f16; 751 } 752 753 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const { 754 // SI has some legal vector types, but no legal vector operations. Say no 755 // shuffles are legal in order to prefer scalarizing some vector operations. 756 return false; 757 } 758 759 MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, 760 CallingConv::ID CC, 761 EVT VT) const { 762 // TODO: Consider splitting all arguments into 32-bit pieces. 763 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { 764 EVT ScalarVT = VT.getScalarType(); 765 unsigned Size = ScalarVT.getSizeInBits(); 766 if (Size == 32) 767 return ScalarVT.getSimpleVT(); 768 769 if (Size == 64) 770 return MVT::i32; 771 772 if (Size == 16 && Subtarget->has16BitInsts()) 773 return VT.isInteger() ? MVT::v2i16 : MVT::v2f16; 774 } 775 776 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); 777 } 778 779 unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, 780 CallingConv::ID CC, 781 EVT VT) const { 782 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { 783 unsigned NumElts = VT.getVectorNumElements(); 784 EVT ScalarVT = VT.getScalarType(); 785 unsigned Size = ScalarVT.getSizeInBits(); 786 787 if (Size == 32) 788 return NumElts; 789 790 if (Size == 64) 791 return 2 * NumElts; 792 793 if (Size == 16 && Subtarget->has16BitInsts()) 794 return (VT.getVectorNumElements() + 1) / 2; 795 } 796 797 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); 798 } 799 800 unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv( 801 LLVMContext &Context, CallingConv::ID CC, 802 EVT VT, EVT &IntermediateVT, 803 unsigned &NumIntermediates, MVT &RegisterVT) const { 804 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { 805 unsigned NumElts = VT.getVectorNumElements(); 806 EVT ScalarVT = VT.getScalarType(); 807 unsigned Size = ScalarVT.getSizeInBits(); 808 if (Size == 32) { 809 RegisterVT = ScalarVT.getSimpleVT(); 810 IntermediateVT = RegisterVT; 811 NumIntermediates = NumElts; 812 return NumIntermediates; 813 } 814 815 if (Size == 64) { 816 RegisterVT = MVT::i32; 817 IntermediateVT = RegisterVT; 818 NumIntermediates = 2 * NumElts; 819 return NumIntermediates; 820 } 821 822 // FIXME: We should fix the ABI to be the same on targets without 16-bit 823 // support, but unless we can properly handle 3-vectors, it will be still be 824 // inconsistent. 825 if (Size == 16 && Subtarget->has16BitInsts()) { 826 RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16; 827 IntermediateVT = RegisterVT; 828 NumIntermediates = (NumElts + 1) / 2; 829 return NumIntermediates; 830 } 831 } 832 833 return TargetLowering::getVectorTypeBreakdownForCallingConv( 834 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT); 835 } 836 837 static MVT memVTFromAggregate(Type *Ty) { 838 // Only limited forms of aggregate type currently expected. 839 assert(Ty->isStructTy() && "Expected struct type"); 840 841 842 Type *ElementType = nullptr; 843 unsigned NumElts; 844 if (Ty->getContainedType(0)->isVectorTy()) { 845 VectorType *VecComponent = cast<VectorType>(Ty->getContainedType(0)); 846 ElementType = VecComponent->getElementType(); 847 NumElts = VecComponent->getNumElements(); 848 } else { 849 ElementType = Ty->getContainedType(0); 850 NumElts = 1; 851 } 852 853 assert((Ty->getContainedType(1) && Ty->getContainedType(1)->isIntegerTy(32)) && "Expected int32 type"); 854 855 // Calculate the size of the memVT type from the aggregate 856 unsigned Pow2Elts = 0; 857 unsigned ElementSize; 858 switch (ElementType->getTypeID()) { 859 default: 860 llvm_unreachable("Unknown type!"); 861 case Type::IntegerTyID: 862 ElementSize = cast<IntegerType>(ElementType)->getBitWidth(); 863 break; 864 case Type::HalfTyID: 865 ElementSize = 16; 866 break; 867 case Type::FloatTyID: 868 ElementSize = 32; 869 break; 870 } 871 unsigned AdditionalElts = ElementSize == 16 ? 2 : 1; 872 Pow2Elts = 1 << Log2_32_Ceil(NumElts + AdditionalElts); 873 874 return MVT::getVectorVT(MVT::getVT(ElementType, false), 875 Pow2Elts); 876 } 877 878 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 879 const CallInst &CI, 880 MachineFunction &MF, 881 unsigned IntrID) const { 882 if (const AMDGPU::RsrcIntrinsic *RsrcIntr = 883 AMDGPU::lookupRsrcIntrinsic(IntrID)) { 884 AttributeList Attr = Intrinsic::getAttributes(CI.getContext(), 885 (Intrinsic::ID)IntrID); 886 if (Attr.hasFnAttribute(Attribute::ReadNone)) 887 return false; 888 889 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 890 891 if (RsrcIntr->IsImage) { 892 Info.ptrVal = MFI->getImagePSV( 893 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(), 894 CI.getArgOperand(RsrcIntr->RsrcArg)); 895 Info.align = 0; 896 } else { 897 Info.ptrVal = MFI->getBufferPSV( 898 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(), 899 CI.getArgOperand(RsrcIntr->RsrcArg)); 900 } 901 902 Info.flags = MachineMemOperand::MODereferenceable; 903 if (Attr.hasFnAttribute(Attribute::ReadOnly)) { 904 Info.opc = ISD::INTRINSIC_W_CHAIN; 905 Info.memVT = MVT::getVT(CI.getType(), true); 906 if (Info.memVT == MVT::Other) { 907 // Some intrinsics return an aggregate type - special case to work out 908 // the correct memVT 909 Info.memVT = memVTFromAggregate(CI.getType()); 910 } 911 Info.flags |= MachineMemOperand::MOLoad; 912 } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) { 913 Info.opc = ISD::INTRINSIC_VOID; 914 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType()); 915 Info.flags |= MachineMemOperand::MOStore; 916 } else { 917 // Atomic 918 Info.opc = ISD::INTRINSIC_W_CHAIN; 919 Info.memVT = MVT::getVT(CI.getType()); 920 Info.flags = MachineMemOperand::MOLoad | 921 MachineMemOperand::MOStore | 922 MachineMemOperand::MODereferenceable; 923 924 // XXX - Should this be volatile without known ordering? 925 Info.flags |= MachineMemOperand::MOVolatile; 926 } 927 return true; 928 } 929 930 switch (IntrID) { 931 case Intrinsic::amdgcn_atomic_inc: 932 case Intrinsic::amdgcn_atomic_dec: 933 case Intrinsic::amdgcn_ds_ordered_add: 934 case Intrinsic::amdgcn_ds_ordered_swap: 935 case Intrinsic::amdgcn_ds_fadd: 936 case Intrinsic::amdgcn_ds_fmin: 937 case Intrinsic::amdgcn_ds_fmax: { 938 Info.opc = ISD::INTRINSIC_W_CHAIN; 939 Info.memVT = MVT::getVT(CI.getType()); 940 Info.ptrVal = CI.getOperand(0); 941 Info.align = 0; 942 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 943 944 const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4)); 945 if (!Vol->isZero()) 946 Info.flags |= MachineMemOperand::MOVolatile; 947 948 return true; 949 } 950 case Intrinsic::amdgcn_ds_append: 951 case Intrinsic::amdgcn_ds_consume: { 952 Info.opc = ISD::INTRINSIC_W_CHAIN; 953 Info.memVT = MVT::getVT(CI.getType()); 954 Info.ptrVal = CI.getOperand(0); 955 Info.align = 0; 956 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 957 958 const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1)); 959 if (!Vol->isZero()) 960 Info.flags |= MachineMemOperand::MOVolatile; 961 962 return true; 963 } 964 default: 965 return false; 966 } 967 } 968 969 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II, 970 SmallVectorImpl<Value*> &Ops, 971 Type *&AccessTy) const { 972 switch (II->getIntrinsicID()) { 973 case Intrinsic::amdgcn_atomic_inc: 974 case Intrinsic::amdgcn_atomic_dec: 975 case Intrinsic::amdgcn_ds_ordered_add: 976 case Intrinsic::amdgcn_ds_ordered_swap: 977 case Intrinsic::amdgcn_ds_fadd: 978 case Intrinsic::amdgcn_ds_fmin: 979 case Intrinsic::amdgcn_ds_fmax: { 980 Value *Ptr = II->getArgOperand(0); 981 AccessTy = II->getType(); 982 Ops.push_back(Ptr); 983 return true; 984 } 985 default: 986 return false; 987 } 988 } 989 990 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { 991 if (!Subtarget->hasFlatInstOffsets()) { 992 // Flat instructions do not have offsets, and only have the register 993 // address. 994 return AM.BaseOffs == 0 && AM.Scale == 0; 995 } 996 997 // GFX9 added a 13-bit signed offset. When using regular flat instructions, 998 // the sign bit is ignored and is treated as a 12-bit unsigned offset. 999 1000 // GFX10 shrinked signed offset to 12 bits. When using regular flat 1001 // instructions, the sign bit is also ignored and is treated as 11-bit 1002 // unsigned offset. 1003 1004 if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) 1005 return isUInt<11>(AM.BaseOffs) && AM.Scale == 0; 1006 1007 // Just r + i 1008 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0; 1009 } 1010 1011 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const { 1012 if (Subtarget->hasFlatGlobalInsts()) 1013 return isInt<13>(AM.BaseOffs) && AM.Scale == 0; 1014 1015 if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) { 1016 // Assume the we will use FLAT for all global memory accesses 1017 // on VI. 1018 // FIXME: This assumption is currently wrong. On VI we still use 1019 // MUBUF instructions for the r + i addressing mode. As currently 1020 // implemented, the MUBUF instructions only work on buffer < 4GB. 1021 // It may be possible to support > 4GB buffers with MUBUF instructions, 1022 // by setting the stride value in the resource descriptor which would 1023 // increase the size limit to (stride * 4GB). However, this is risky, 1024 // because it has never been validated. 1025 return isLegalFlatAddressingMode(AM); 1026 } 1027 1028 return isLegalMUBUFAddressingMode(AM); 1029 } 1030 1031 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { 1032 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and 1033 // additionally can do r + r + i with addr64. 32-bit has more addressing 1034 // mode options. Depending on the resource constant, it can also do 1035 // (i64 r0) + (i32 r1) * (i14 i). 1036 // 1037 // Private arrays end up using a scratch buffer most of the time, so also 1038 // assume those use MUBUF instructions. Scratch loads / stores are currently 1039 // implemented as mubuf instructions with offen bit set, so slightly 1040 // different than the normal addr64. 1041 if (!isUInt<12>(AM.BaseOffs)) 1042 return false; 1043 1044 // FIXME: Since we can split immediate into soffset and immediate offset, 1045 // would it make sense to allow any immediate? 1046 1047 switch (AM.Scale) { 1048 case 0: // r + i or just i, depending on HasBaseReg. 1049 return true; 1050 case 1: 1051 return true; // We have r + r or r + i. 1052 case 2: 1053 if (AM.HasBaseReg) { 1054 // Reject 2 * r + r. 1055 return false; 1056 } 1057 1058 // Allow 2 * r as r + r 1059 // Or 2 * r + i is allowed as r + r + i. 1060 return true; 1061 default: // Don't allow n * r 1062 return false; 1063 } 1064 } 1065 1066 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, 1067 const AddrMode &AM, Type *Ty, 1068 unsigned AS, Instruction *I) const { 1069 // No global is ever allowed as a base. 1070 if (AM.BaseGV) 1071 return false; 1072 1073 if (AS == AMDGPUAS::GLOBAL_ADDRESS) 1074 return isLegalGlobalAddressingMode(AM); 1075 1076 if (AS == AMDGPUAS::CONSTANT_ADDRESS || 1077 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || 1078 AS == AMDGPUAS::BUFFER_FAT_POINTER) { 1079 // If the offset isn't a multiple of 4, it probably isn't going to be 1080 // correctly aligned. 1081 // FIXME: Can we get the real alignment here? 1082 if (AM.BaseOffs % 4 != 0) 1083 return isLegalMUBUFAddressingMode(AM); 1084 1085 // There are no SMRD extloads, so if we have to do a small type access we 1086 // will use a MUBUF load. 1087 // FIXME?: We also need to do this if unaligned, but we don't know the 1088 // alignment here. 1089 if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4) 1090 return isLegalGlobalAddressingMode(AM); 1091 1092 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { 1093 // SMRD instructions have an 8-bit, dword offset on SI. 1094 if (!isUInt<8>(AM.BaseOffs / 4)) 1095 return false; 1096 } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) { 1097 // On CI+, this can also be a 32-bit literal constant offset. If it fits 1098 // in 8-bits, it can use a smaller encoding. 1099 if (!isUInt<32>(AM.BaseOffs / 4)) 1100 return false; 1101 } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 1102 // On VI, these use the SMEM format and the offset is 20-bit in bytes. 1103 if (!isUInt<20>(AM.BaseOffs)) 1104 return false; 1105 } else 1106 llvm_unreachable("unhandled generation"); 1107 1108 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 1109 return true; 1110 1111 if (AM.Scale == 1 && AM.HasBaseReg) 1112 return true; 1113 1114 return false; 1115 1116 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { 1117 return isLegalMUBUFAddressingMode(AM); 1118 } else if (AS == AMDGPUAS::LOCAL_ADDRESS || 1119 AS == AMDGPUAS::REGION_ADDRESS) { 1120 // Basic, single offset DS instructions allow a 16-bit unsigned immediate 1121 // field. 1122 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have 1123 // an 8-bit dword offset but we don't know the alignment here. 1124 if (!isUInt<16>(AM.BaseOffs)) 1125 return false; 1126 1127 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 1128 return true; 1129 1130 if (AM.Scale == 1 && AM.HasBaseReg) 1131 return true; 1132 1133 return false; 1134 } else if (AS == AMDGPUAS::FLAT_ADDRESS || 1135 AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) { 1136 // For an unknown address space, this usually means that this is for some 1137 // reason being used for pure arithmetic, and not based on some addressing 1138 // computation. We don't have instructions that compute pointers with any 1139 // addressing modes, so treat them as having no offset like flat 1140 // instructions. 1141 return isLegalFlatAddressingMode(AM); 1142 } else { 1143 llvm_unreachable("unhandled address space"); 1144 } 1145 } 1146 1147 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, 1148 const SelectionDAG &DAG) const { 1149 if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) { 1150 return (MemVT.getSizeInBits() <= 4 * 32); 1151 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { 1152 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize(); 1153 return (MemVT.getSizeInBits() <= MaxPrivateBits); 1154 } else if (AS == AMDGPUAS::LOCAL_ADDRESS) { 1155 return (MemVT.getSizeInBits() <= 2 * 32); 1156 } 1157 return true; 1158 } 1159 1160 bool SITargetLowering::allowsMisalignedMemoryAccesses( 1161 EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, 1162 bool *IsFast) const { 1163 if (IsFast) 1164 *IsFast = false; 1165 1166 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, 1167 // which isn't a simple VT. 1168 // Until MVT is extended to handle this, simply check for the size and 1169 // rely on the condition below: allow accesses if the size is a multiple of 4. 1170 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && 1171 VT.getStoreSize() > 16)) { 1172 return false; 1173 } 1174 1175 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || 1176 AddrSpace == AMDGPUAS::REGION_ADDRESS) { 1177 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte 1178 // aligned, 8 byte access in a single operation using ds_read2/write2_b32 1179 // with adjacent offsets. 1180 bool AlignedBy4 = (Align % 4 == 0); 1181 if (IsFast) 1182 *IsFast = AlignedBy4; 1183 1184 return AlignedBy4; 1185 } 1186 1187 // FIXME: We have to be conservative here and assume that flat operations 1188 // will access scratch. If we had access to the IR function, then we 1189 // could determine if any private memory was used in the function. 1190 if (!Subtarget->hasUnalignedScratchAccess() && 1191 (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS || 1192 AddrSpace == AMDGPUAS::FLAT_ADDRESS)) { 1193 bool AlignedBy4 = Align >= 4; 1194 if (IsFast) 1195 *IsFast = AlignedBy4; 1196 1197 return AlignedBy4; 1198 } 1199 1200 if (Subtarget->hasUnalignedBufferAccess()) { 1201 // If we have an uniform constant load, it still requires using a slow 1202 // buffer instruction if unaligned. 1203 if (IsFast) { 1204 *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS || 1205 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ? 1206 (Align % 4 == 0) : true; 1207 } 1208 1209 return true; 1210 } 1211 1212 // Smaller than dword value must be aligned. 1213 if (VT.bitsLT(MVT::i32)) 1214 return false; 1215 1216 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the 1217 // byte-address are ignored, thus forcing Dword alignment. 1218 // This applies to private, global, and constant memory. 1219 if (IsFast) 1220 *IsFast = true; 1221 1222 return VT.bitsGT(MVT::i32) && Align % 4 == 0; 1223 } 1224 1225 EVT SITargetLowering::getOptimalMemOpType( 1226 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, 1227 bool ZeroMemset, bool MemcpyStrSrc, 1228 const AttributeList &FuncAttributes) const { 1229 // FIXME: Should account for address space here. 1230 1231 // The default fallback uses the private pointer size as a guess for a type to 1232 // use. Make sure we switch these to 64-bit accesses. 1233 1234 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global 1235 return MVT::v4i32; 1236 1237 if (Size >= 8 && DstAlign >= 4) 1238 return MVT::v2i32; 1239 1240 // Use the default. 1241 return MVT::Other; 1242 } 1243 1244 static bool isFlatGlobalAddrSpace(unsigned AS) { 1245 return AS == AMDGPUAS::GLOBAL_ADDRESS || 1246 AS == AMDGPUAS::FLAT_ADDRESS || 1247 AS == AMDGPUAS::CONSTANT_ADDRESS || 1248 AS > AMDGPUAS::MAX_AMDGPU_ADDRESS; 1249 } 1250 1251 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, 1252 unsigned DestAS) const { 1253 return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS); 1254 } 1255 1256 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { 1257 const MemSDNode *MemNode = cast<MemSDNode>(N); 1258 const Value *Ptr = MemNode->getMemOperand()->getValue(); 1259 const Instruction *I = dyn_cast_or_null<Instruction>(Ptr); 1260 return I && I->getMetadata("amdgpu.noclobber"); 1261 } 1262 1263 bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS, 1264 unsigned DestAS) const { 1265 // Flat -> private/local is a simple truncate. 1266 // Flat -> global is no-op 1267 if (SrcAS == AMDGPUAS::FLAT_ADDRESS) 1268 return true; 1269 1270 return isNoopAddrSpaceCast(SrcAS, DestAS); 1271 } 1272 1273 bool SITargetLowering::isMemOpUniform(const SDNode *N) const { 1274 const MemSDNode *MemNode = cast<MemSDNode>(N); 1275 1276 return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand()); 1277 } 1278 1279 TargetLoweringBase::LegalizeTypeAction 1280 SITargetLowering::getPreferredVectorAction(MVT VT) const { 1281 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) 1282 return TypeSplitVector; 1283 1284 return TargetLoweringBase::getPreferredVectorAction(VT); 1285 } 1286 1287 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 1288 Type *Ty) const { 1289 // FIXME: Could be smarter if called for vector constants. 1290 return true; 1291 } 1292 1293 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { 1294 if (Subtarget->has16BitInsts() && VT == MVT::i16) { 1295 switch (Op) { 1296 case ISD::LOAD: 1297 case ISD::STORE: 1298 1299 // These operations are done with 32-bit instructions anyway. 1300 case ISD::AND: 1301 case ISD::OR: 1302 case ISD::XOR: 1303 case ISD::SELECT: 1304 // TODO: Extensions? 1305 return true; 1306 default: 1307 return false; 1308 } 1309 } 1310 1311 // SimplifySetCC uses this function to determine whether or not it should 1312 // create setcc with i1 operands. We don't have instructions for i1 setcc. 1313 if (VT == MVT::i1 && Op == ISD::SETCC) 1314 return false; 1315 1316 return TargetLowering::isTypeDesirableForOp(Op, VT); 1317 } 1318 1319 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG, 1320 const SDLoc &SL, 1321 SDValue Chain, 1322 uint64_t Offset) const { 1323 const DataLayout &DL = DAG.getDataLayout(); 1324 MachineFunction &MF = DAG.getMachineFunction(); 1325 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1326 1327 const ArgDescriptor *InputPtrReg; 1328 const TargetRegisterClass *RC; 1329 1330 std::tie(InputPtrReg, RC) 1331 = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 1332 1333 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1334 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); 1335 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, 1336 MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT); 1337 1338 return DAG.getObjectPtrOffset(SL, BasePtr, Offset); 1339 } 1340 1341 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG, 1342 const SDLoc &SL) const { 1343 uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(), 1344 FIRST_IMPLICIT); 1345 return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset); 1346 } 1347 1348 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, 1349 const SDLoc &SL, SDValue Val, 1350 bool Signed, 1351 const ISD::InputArg *Arg) const { 1352 // First, if it is a widened vector, narrow it. 1353 if (VT.isVector() && 1354 VT.getVectorNumElements() != MemVT.getVectorNumElements()) { 1355 EVT NarrowedVT = 1356 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 1357 VT.getVectorNumElements()); 1358 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val, 1359 DAG.getConstant(0, SL, MVT::i32)); 1360 } 1361 1362 // Then convert the vector elements or scalar value. 1363 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && 1364 VT.bitsLT(MemVT)) { 1365 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; 1366 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); 1367 } 1368 1369 if (MemVT.isFloatingPoint()) 1370 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT); 1371 else if (Signed) 1372 Val = DAG.getSExtOrTrunc(Val, SL, VT); 1373 else 1374 Val = DAG.getZExtOrTrunc(Val, SL, VT); 1375 1376 return Val; 1377 } 1378 1379 SDValue SITargetLowering::lowerKernargMemParameter( 1380 SelectionDAG &DAG, EVT VT, EVT MemVT, 1381 const SDLoc &SL, SDValue Chain, 1382 uint64_t Offset, unsigned Align, bool Signed, 1383 const ISD::InputArg *Arg) const { 1384 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 1385 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 1386 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 1387 1388 // Try to avoid using an extload by loading earlier than the argument address, 1389 // and extracting the relevant bits. The load should hopefully be merged with 1390 // the previous argument. 1391 if (MemVT.getStoreSize() < 4 && Align < 4) { 1392 // TODO: Handle align < 4 and size >= 4 (can happen with packed structs). 1393 int64_t AlignDownOffset = alignDown(Offset, 4); 1394 int64_t OffsetDiff = Offset - AlignDownOffset; 1395 1396 EVT IntVT = MemVT.changeTypeToInteger(); 1397 1398 // TODO: If we passed in the base kernel offset we could have a better 1399 // alignment than 4, but we don't really need it. 1400 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset); 1401 SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4, 1402 MachineMemOperand::MODereferenceable | 1403 MachineMemOperand::MOInvariant); 1404 1405 SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32); 1406 SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt); 1407 1408 SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract); 1409 ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal); 1410 ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg); 1411 1412 1413 return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL); 1414 } 1415 1416 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset); 1417 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align, 1418 MachineMemOperand::MODereferenceable | 1419 MachineMemOperand::MOInvariant); 1420 1421 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg); 1422 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); 1423 } 1424 1425 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA, 1426 const SDLoc &SL, SDValue Chain, 1427 const ISD::InputArg &Arg) const { 1428 MachineFunction &MF = DAG.getMachineFunction(); 1429 MachineFrameInfo &MFI = MF.getFrameInfo(); 1430 1431 if (Arg.Flags.isByVal()) { 1432 unsigned Size = Arg.Flags.getByValSize(); 1433 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false); 1434 return DAG.getFrameIndex(FrameIdx, MVT::i32); 1435 } 1436 1437 unsigned ArgOffset = VA.getLocMemOffset(); 1438 unsigned ArgSize = VA.getValVT().getStoreSize(); 1439 1440 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true); 1441 1442 // Create load nodes to retrieve arguments from the stack. 1443 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1444 SDValue ArgValue; 1445 1446 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) 1447 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 1448 MVT MemVT = VA.getValVT(); 1449 1450 switch (VA.getLocInfo()) { 1451 default: 1452 break; 1453 case CCValAssign::BCvt: 1454 MemVT = VA.getLocVT(); 1455 break; 1456 case CCValAssign::SExt: 1457 ExtType = ISD::SEXTLOAD; 1458 break; 1459 case CCValAssign::ZExt: 1460 ExtType = ISD::ZEXTLOAD; 1461 break; 1462 case CCValAssign::AExt: 1463 ExtType = ISD::EXTLOAD; 1464 break; 1465 } 1466 1467 ArgValue = DAG.getExtLoad( 1468 ExtType, SL, VA.getLocVT(), Chain, FIN, 1469 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), 1470 MemVT); 1471 return ArgValue; 1472 } 1473 1474 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG, 1475 const SIMachineFunctionInfo &MFI, 1476 EVT VT, 1477 AMDGPUFunctionArgInfo::PreloadedValue PVID) const { 1478 const ArgDescriptor *Reg; 1479 const TargetRegisterClass *RC; 1480 1481 std::tie(Reg, RC) = MFI.getPreloadedValue(PVID); 1482 return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT); 1483 } 1484 1485 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits, 1486 CallingConv::ID CallConv, 1487 ArrayRef<ISD::InputArg> Ins, 1488 BitVector &Skipped, 1489 FunctionType *FType, 1490 SIMachineFunctionInfo *Info) { 1491 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) { 1492 const ISD::InputArg *Arg = &Ins[I]; 1493 1494 assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && 1495 "vector type argument should have been split"); 1496 1497 // First check if it's a PS input addr. 1498 if (CallConv == CallingConv::AMDGPU_PS && 1499 !Arg->Flags.isInReg() && !Arg->Flags.isByVal() && PSInputNum <= 15) { 1500 1501 bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum); 1502 1503 // Inconveniently only the first part of the split is marked as isSplit, 1504 // so skip to the end. We only want to increment PSInputNum once for the 1505 // entire split argument. 1506 if (Arg->Flags.isSplit()) { 1507 while (!Arg->Flags.isSplitEnd()) { 1508 assert(!Arg->VT.isVector() && 1509 "unexpected vector split in ps argument type"); 1510 if (!SkipArg) 1511 Splits.push_back(*Arg); 1512 Arg = &Ins[++I]; 1513 } 1514 } 1515 1516 if (SkipArg) { 1517 // We can safely skip PS inputs. 1518 Skipped.set(Arg->getOrigArgIndex()); 1519 ++PSInputNum; 1520 continue; 1521 } 1522 1523 Info->markPSInputAllocated(PSInputNum); 1524 if (Arg->Used) 1525 Info->markPSInputEnabled(PSInputNum); 1526 1527 ++PSInputNum; 1528 } 1529 1530 Splits.push_back(*Arg); 1531 } 1532 } 1533 1534 // Allocate special inputs passed in VGPRs. 1535 static void allocateSpecialEntryInputVGPRs(CCState &CCInfo, 1536 MachineFunction &MF, 1537 const SIRegisterInfo &TRI, 1538 SIMachineFunctionInfo &Info) { 1539 if (Info.hasWorkItemIDX()) { 1540 unsigned Reg = AMDGPU::VGPR0; 1541 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1542 1543 CCInfo.AllocateReg(Reg); 1544 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg)); 1545 } 1546 1547 if (Info.hasWorkItemIDY()) { 1548 unsigned Reg = AMDGPU::VGPR1; 1549 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1550 1551 CCInfo.AllocateReg(Reg); 1552 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg)); 1553 } 1554 1555 if (Info.hasWorkItemIDZ()) { 1556 unsigned Reg = AMDGPU::VGPR2; 1557 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1558 1559 CCInfo.AllocateReg(Reg); 1560 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg)); 1561 } 1562 } 1563 1564 // Try to allocate a VGPR at the end of the argument list, or if no argument 1565 // VGPRs are left allocating a stack slot. 1566 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo) { 1567 ArrayRef<MCPhysReg> ArgVGPRs 1568 = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32); 1569 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs); 1570 if (RegIdx == ArgVGPRs.size()) { 1571 // Spill to stack required. 1572 int64_t Offset = CCInfo.AllocateStack(4, 4); 1573 1574 return ArgDescriptor::createStack(Offset); 1575 } 1576 1577 unsigned Reg = ArgVGPRs[RegIdx]; 1578 Reg = CCInfo.AllocateReg(Reg); 1579 assert(Reg != AMDGPU::NoRegister); 1580 1581 MachineFunction &MF = CCInfo.getMachineFunction(); 1582 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1583 return ArgDescriptor::createRegister(Reg); 1584 } 1585 1586 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo, 1587 const TargetRegisterClass *RC, 1588 unsigned NumArgRegs) { 1589 ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32); 1590 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs); 1591 if (RegIdx == ArgSGPRs.size()) 1592 report_fatal_error("ran out of SGPRs for arguments"); 1593 1594 unsigned Reg = ArgSGPRs[RegIdx]; 1595 Reg = CCInfo.AllocateReg(Reg); 1596 assert(Reg != AMDGPU::NoRegister); 1597 1598 MachineFunction &MF = CCInfo.getMachineFunction(); 1599 MF.addLiveIn(Reg, RC); 1600 return ArgDescriptor::createRegister(Reg); 1601 } 1602 1603 static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) { 1604 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32); 1605 } 1606 1607 static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) { 1608 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16); 1609 } 1610 1611 static void allocateSpecialInputVGPRs(CCState &CCInfo, 1612 MachineFunction &MF, 1613 const SIRegisterInfo &TRI, 1614 SIMachineFunctionInfo &Info) { 1615 if (Info.hasWorkItemIDX()) 1616 Info.setWorkItemIDX(allocateVGPR32Input(CCInfo)); 1617 1618 if (Info.hasWorkItemIDY()) 1619 Info.setWorkItemIDY(allocateVGPR32Input(CCInfo)); 1620 1621 if (Info.hasWorkItemIDZ()) 1622 Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo)); 1623 } 1624 1625 static void allocateSpecialInputSGPRs(CCState &CCInfo, 1626 MachineFunction &MF, 1627 const SIRegisterInfo &TRI, 1628 SIMachineFunctionInfo &Info) { 1629 auto &ArgInfo = Info.getArgInfo(); 1630 1631 // TODO: Unify handling with private memory pointers. 1632 1633 if (Info.hasDispatchPtr()) 1634 ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo); 1635 1636 if (Info.hasQueuePtr()) 1637 ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo); 1638 1639 if (Info.hasKernargSegmentPtr()) 1640 ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo); 1641 1642 if (Info.hasDispatchID()) 1643 ArgInfo.DispatchID = allocateSGPR64Input(CCInfo); 1644 1645 // flat_scratch_init is not applicable for non-kernel functions. 1646 1647 if (Info.hasWorkGroupIDX()) 1648 ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo); 1649 1650 if (Info.hasWorkGroupIDY()) 1651 ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo); 1652 1653 if (Info.hasWorkGroupIDZ()) 1654 ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo); 1655 1656 if (Info.hasImplicitArgPtr()) 1657 ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo); 1658 } 1659 1660 // Allocate special inputs passed in user SGPRs. 1661 static void allocateHSAUserSGPRs(CCState &CCInfo, 1662 MachineFunction &MF, 1663 const SIRegisterInfo &TRI, 1664 SIMachineFunctionInfo &Info) { 1665 if (Info.hasImplicitBufferPtr()) { 1666 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI); 1667 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 1668 CCInfo.AllocateReg(ImplicitBufferPtrReg); 1669 } 1670 1671 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 1672 if (Info.hasPrivateSegmentBuffer()) { 1673 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 1674 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 1675 CCInfo.AllocateReg(PrivateSegmentBufferReg); 1676 } 1677 1678 if (Info.hasDispatchPtr()) { 1679 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI); 1680 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 1681 CCInfo.AllocateReg(DispatchPtrReg); 1682 } 1683 1684 if (Info.hasQueuePtr()) { 1685 unsigned QueuePtrReg = Info.addQueuePtr(TRI); 1686 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 1687 CCInfo.AllocateReg(QueuePtrReg); 1688 } 1689 1690 if (Info.hasKernargSegmentPtr()) { 1691 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI); 1692 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); 1693 CCInfo.AllocateReg(InputPtrReg); 1694 } 1695 1696 if (Info.hasDispatchID()) { 1697 unsigned DispatchIDReg = Info.addDispatchID(TRI); 1698 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 1699 CCInfo.AllocateReg(DispatchIDReg); 1700 } 1701 1702 if (Info.hasFlatScratchInit()) { 1703 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI); 1704 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 1705 CCInfo.AllocateReg(FlatScratchInitReg); 1706 } 1707 1708 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 1709 // these from the dispatch pointer. 1710 } 1711 1712 // Allocate special input registers that are initialized per-wave. 1713 static void allocateSystemSGPRs(CCState &CCInfo, 1714 MachineFunction &MF, 1715 SIMachineFunctionInfo &Info, 1716 CallingConv::ID CallConv, 1717 bool IsShader) { 1718 if (Info.hasWorkGroupIDX()) { 1719 unsigned Reg = Info.addWorkGroupIDX(); 1720 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1721 CCInfo.AllocateReg(Reg); 1722 } 1723 1724 if (Info.hasWorkGroupIDY()) { 1725 unsigned Reg = Info.addWorkGroupIDY(); 1726 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1727 CCInfo.AllocateReg(Reg); 1728 } 1729 1730 if (Info.hasWorkGroupIDZ()) { 1731 unsigned Reg = Info.addWorkGroupIDZ(); 1732 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1733 CCInfo.AllocateReg(Reg); 1734 } 1735 1736 if (Info.hasWorkGroupInfo()) { 1737 unsigned Reg = Info.addWorkGroupInfo(); 1738 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1739 CCInfo.AllocateReg(Reg); 1740 } 1741 1742 if (Info.hasPrivateSegmentWaveByteOffset()) { 1743 // Scratch wave offset passed in system SGPR. 1744 unsigned PrivateSegmentWaveByteOffsetReg; 1745 1746 if (IsShader) { 1747 PrivateSegmentWaveByteOffsetReg = 1748 Info.getPrivateSegmentWaveByteOffsetSystemSGPR(); 1749 1750 // This is true if the scratch wave byte offset doesn't have a fixed 1751 // location. 1752 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) { 1753 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); 1754 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); 1755 } 1756 } else 1757 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset(); 1758 1759 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); 1760 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); 1761 } 1762 } 1763 1764 static void reservePrivateMemoryRegs(const TargetMachine &TM, 1765 MachineFunction &MF, 1766 const SIRegisterInfo &TRI, 1767 SIMachineFunctionInfo &Info) { 1768 // Now that we've figured out where the scratch register inputs are, see if 1769 // should reserve the arguments and use them directly. 1770 MachineFrameInfo &MFI = MF.getFrameInfo(); 1771 bool HasStackObjects = MFI.hasStackObjects(); 1772 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1773 1774 // Record that we know we have non-spill stack objects so we don't need to 1775 // check all stack objects later. 1776 if (HasStackObjects) 1777 Info.setHasNonSpillStackObjects(true); 1778 1779 // Everything live out of a block is spilled with fast regalloc, so it's 1780 // almost certain that spilling will be required. 1781 if (TM.getOptLevel() == CodeGenOpt::None) 1782 HasStackObjects = true; 1783 1784 // For now assume stack access is needed in any callee functions, so we need 1785 // the scratch registers to pass in. 1786 bool RequiresStackAccess = HasStackObjects || MFI.hasCalls(); 1787 1788 if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) { 1789 // If we have stack objects, we unquestionably need the private buffer 1790 // resource. For the Code Object V2 ABI, this will be the first 4 user 1791 // SGPR inputs. We can reserve those and use them directly. 1792 1793 unsigned PrivateSegmentBufferReg = 1794 Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 1795 Info.setScratchRSrcReg(PrivateSegmentBufferReg); 1796 } else { 1797 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF); 1798 // We tentatively reserve the last registers (skipping the last registers 1799 // which may contain VCC, FLAT_SCR, and XNACK). After register allocation, 1800 // we'll replace these with the ones immediately after those which were 1801 // really allocated. In the prologue copies will be inserted from the 1802 // argument to these reserved registers. 1803 1804 // Without HSA, relocations are used for the scratch pointer and the 1805 // buffer resource setup is always inserted in the prologue. Scratch wave 1806 // offset is still in an input SGPR. 1807 Info.setScratchRSrcReg(ReservedBufferReg); 1808 } 1809 1810 // This should be accurate for kernels even before the frame is finalized. 1811 const bool HasFP = ST.getFrameLowering()->hasFP(MF); 1812 if (HasFP) { 1813 unsigned ReservedOffsetReg = 1814 TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1815 MachineRegisterInfo &MRI = MF.getRegInfo(); 1816 1817 // Try to use s32 as the SP, but move it if it would interfere with input 1818 // arguments. This won't work with calls though. 1819 // 1820 // FIXME: Move SP to avoid any possible inputs, or find a way to spill input 1821 // registers. 1822 if (!MRI.isLiveIn(AMDGPU::SGPR32)) { 1823 Info.setStackPtrOffsetReg(AMDGPU::SGPR32); 1824 } else { 1825 assert(AMDGPU::isShader(MF.getFunction().getCallingConv())); 1826 1827 if (MFI.hasCalls()) 1828 report_fatal_error("call in graphics shader with too many input SGPRs"); 1829 1830 for (unsigned Reg : AMDGPU::SGPR_32RegClass) { 1831 if (!MRI.isLiveIn(Reg)) { 1832 Info.setStackPtrOffsetReg(Reg); 1833 break; 1834 } 1835 } 1836 1837 if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG) 1838 report_fatal_error("failed to find register for SP"); 1839 } 1840 1841 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1842 Info.setFrameOffsetReg(ReservedOffsetReg); 1843 } else if (RequiresStackAccess) { 1844 assert(!MFI.hasCalls()); 1845 // We know there are accesses and they will be done relative to SP, so just 1846 // pin it to the input. 1847 // 1848 // FIXME: Should not do this if inline asm is reading/writing these 1849 // registers. 1850 unsigned PreloadedSP = Info.getPreloadedReg( 1851 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1852 1853 Info.setStackPtrOffsetReg(PreloadedSP); 1854 Info.setScratchWaveOffsetReg(PreloadedSP); 1855 Info.setFrameOffsetReg(PreloadedSP); 1856 } else { 1857 assert(!MFI.hasCalls()); 1858 1859 // There may not be stack access at all. There may still be spills, or 1860 // access of a constant pointer (in which cases an extra copy will be 1861 // emitted in the prolog). 1862 unsigned ReservedOffsetReg 1863 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1864 Info.setStackPtrOffsetReg(ReservedOffsetReg); 1865 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1866 Info.setFrameOffsetReg(ReservedOffsetReg); 1867 } 1868 } 1869 1870 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const { 1871 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1872 return !Info->isEntryFunction(); 1873 } 1874 1875 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 1876 1877 } 1878 1879 void SITargetLowering::insertCopiesSplitCSR( 1880 MachineBasicBlock *Entry, 1881 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 1882 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 1883 1884 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 1885 if (!IStart) 1886 return; 1887 1888 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 1889 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 1890 MachineBasicBlock::iterator MBBI = Entry->begin(); 1891 for (const MCPhysReg *I = IStart; *I; ++I) { 1892 const TargetRegisterClass *RC = nullptr; 1893 if (AMDGPU::SReg_64RegClass.contains(*I)) 1894 RC = &AMDGPU::SGPR_64RegClass; 1895 else if (AMDGPU::SReg_32RegClass.contains(*I)) 1896 RC = &AMDGPU::SGPR_32RegClass; 1897 else 1898 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 1899 1900 unsigned NewVR = MRI->createVirtualRegister(RC); 1901 // Create copy from CSR to a virtual register. 1902 Entry->addLiveIn(*I); 1903 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 1904 .addReg(*I); 1905 1906 // Insert the copy-back instructions right before the terminator. 1907 for (auto *Exit : Exits) 1908 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 1909 TII->get(TargetOpcode::COPY), *I) 1910 .addReg(NewVR); 1911 } 1912 } 1913 1914 SDValue SITargetLowering::LowerFormalArguments( 1915 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1916 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1917 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1918 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 1919 1920 MachineFunction &MF = DAG.getMachineFunction(); 1921 const Function &Fn = MF.getFunction(); 1922 FunctionType *FType = MF.getFunction().getFunctionType(); 1923 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1924 1925 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { 1926 DiagnosticInfoUnsupported NoGraphicsHSA( 1927 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); 1928 DAG.getContext()->diagnose(NoGraphicsHSA); 1929 return DAG.getEntryNode(); 1930 } 1931 1932 SmallVector<ISD::InputArg, 16> Splits; 1933 SmallVector<CCValAssign, 16> ArgLocs; 1934 BitVector Skipped(Ins.size()); 1935 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1936 *DAG.getContext()); 1937 1938 bool IsShader = AMDGPU::isShader(CallConv); 1939 bool IsKernel = AMDGPU::isKernel(CallConv); 1940 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv); 1941 1942 if (IsShader) { 1943 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info); 1944 1945 // At least one interpolation mode must be enabled or else the GPU will 1946 // hang. 1947 // 1948 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 1949 // set PSInputAddr, the user wants to enable some bits after the compilation 1950 // based on run-time states. Since we can't know what the final PSInputEna 1951 // will look like, so we shouldn't do anything here and the user should take 1952 // responsibility for the correct programming. 1953 // 1954 // Otherwise, the following restrictions apply: 1955 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 1956 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 1957 // enabled too. 1958 if (CallConv == CallingConv::AMDGPU_PS) { 1959 if ((Info->getPSInputAddr() & 0x7F) == 0 || 1960 ((Info->getPSInputAddr() & 0xF) == 0 && 1961 Info->isPSInputAllocated(11))) { 1962 CCInfo.AllocateReg(AMDGPU::VGPR0); 1963 CCInfo.AllocateReg(AMDGPU::VGPR1); 1964 Info->markPSInputAllocated(0); 1965 Info->markPSInputEnabled(0); 1966 } 1967 if (Subtarget->isAmdPalOS()) { 1968 // For isAmdPalOS, the user does not enable some bits after compilation 1969 // based on run-time states; the register values being generated here are 1970 // the final ones set in hardware. Therefore we need to apply the 1971 // workaround to PSInputAddr and PSInputEnable together. (The case where 1972 // a bit is set in PSInputAddr but not PSInputEnable is where the 1973 // frontend set up an input arg for a particular interpolation mode, but 1974 // nothing uses that input arg. Really we should have an earlier pass 1975 // that removes such an arg.) 1976 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 1977 if ((PsInputBits & 0x7F) == 0 || 1978 ((PsInputBits & 0xF) == 0 && 1979 (PsInputBits >> 11 & 1))) 1980 Info->markPSInputEnabled( 1981 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 1982 } 1983 } 1984 1985 assert(!Info->hasDispatchPtr() && 1986 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && 1987 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && 1988 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && 1989 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && 1990 !Info->hasWorkItemIDZ()); 1991 } else if (IsKernel) { 1992 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); 1993 } else { 1994 Splits.append(Ins.begin(), Ins.end()); 1995 } 1996 1997 if (IsEntryFunc) { 1998 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 1999 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info); 2000 } 2001 2002 if (IsKernel) { 2003 analyzeFormalArgumentsCompute(CCInfo, Ins); 2004 } else { 2005 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg); 2006 CCInfo.AnalyzeFormalArguments(Splits, AssignFn); 2007 } 2008 2009 SmallVector<SDValue, 16> Chains; 2010 2011 // FIXME: This is the minimum kernel argument alignment. We should improve 2012 // this to the maximum alignment of the arguments. 2013 // 2014 // FIXME: Alignment of explicit arguments totally broken with non-0 explicit 2015 // kern arg offset. 2016 const unsigned KernelArgBaseAlign = 16; 2017 2018 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { 2019 const ISD::InputArg &Arg = Ins[i]; 2020 if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) { 2021 InVals.push_back(DAG.getUNDEF(Arg.VT)); 2022 continue; 2023 } 2024 2025 CCValAssign &VA = ArgLocs[ArgIdx++]; 2026 MVT VT = VA.getLocVT(); 2027 2028 if (IsEntryFunc && VA.isMemLoc()) { 2029 VT = Ins[i].VT; 2030 EVT MemVT = VA.getLocVT(); 2031 2032 const uint64_t Offset = VA.getLocMemOffset(); 2033 unsigned Align = MinAlign(KernelArgBaseAlign, Offset); 2034 2035 SDValue Arg = lowerKernargMemParameter( 2036 DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]); 2037 Chains.push_back(Arg.getValue(1)); 2038 2039 auto *ParamTy = 2040 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); 2041 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && 2042 ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 2043 ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) { 2044 // On SI local pointers are just offsets into LDS, so they are always 2045 // less than 16-bits. On CI and newer they could potentially be 2046 // real pointers, so we can't guarantee their size. 2047 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, 2048 DAG.getValueType(MVT::i16)); 2049 } 2050 2051 InVals.push_back(Arg); 2052 continue; 2053 } else if (!IsEntryFunc && VA.isMemLoc()) { 2054 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg); 2055 InVals.push_back(Val); 2056 if (!Arg.Flags.isByVal()) 2057 Chains.push_back(Val.getValue(1)); 2058 continue; 2059 } 2060 2061 assert(VA.isRegLoc() && "Parameter must be in a register!"); 2062 2063 unsigned Reg = VA.getLocReg(); 2064 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 2065 EVT ValVT = VA.getValVT(); 2066 2067 Reg = MF.addLiveIn(Reg, RC); 2068 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); 2069 2070 if (Arg.Flags.isSRet()) { 2071 // The return object should be reasonably addressable. 2072 2073 // FIXME: This helps when the return is a real sret. If it is a 2074 // automatically inserted sret (i.e. CanLowerReturn returns false), an 2075 // extra copy is inserted in SelectionDAGBuilder which obscures this. 2076 unsigned NumBits 2077 = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex(); 2078 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, 2079 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits))); 2080 } 2081 2082 // If this is an 8 or 16-bit value, it is really passed promoted 2083 // to 32 bits. Insert an assert[sz]ext to capture this, then 2084 // truncate to the right size. 2085 switch (VA.getLocInfo()) { 2086 case CCValAssign::Full: 2087 break; 2088 case CCValAssign::BCvt: 2089 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val); 2090 break; 2091 case CCValAssign::SExt: 2092 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val, 2093 DAG.getValueType(ValVT)); 2094 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 2095 break; 2096 case CCValAssign::ZExt: 2097 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, 2098 DAG.getValueType(ValVT)); 2099 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 2100 break; 2101 case CCValAssign::AExt: 2102 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 2103 break; 2104 default: 2105 llvm_unreachable("Unknown loc info!"); 2106 } 2107 2108 InVals.push_back(Val); 2109 } 2110 2111 if (!IsEntryFunc) { 2112 // Special inputs come after user arguments. 2113 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); 2114 } 2115 2116 // Start adding system SGPRs. 2117 if (IsEntryFunc) { 2118 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader); 2119 } else { 2120 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 2121 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg()); 2122 CCInfo.AllocateReg(Info->getFrameOffsetReg()); 2123 allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 2124 } 2125 2126 auto &ArgUsageInfo = 2127 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); 2128 ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo()); 2129 2130 unsigned StackArgSize = CCInfo.getNextStackOffset(); 2131 Info->setBytesInStackArgArea(StackArgSize); 2132 2133 return Chains.empty() ? Chain : 2134 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 2135 } 2136 2137 // TODO: If return values can't fit in registers, we should return as many as 2138 // possible in registers before passing on stack. 2139 bool SITargetLowering::CanLowerReturn( 2140 CallingConv::ID CallConv, 2141 MachineFunction &MF, bool IsVarArg, 2142 const SmallVectorImpl<ISD::OutputArg> &Outs, 2143 LLVMContext &Context) const { 2144 // Replacing returns with sret/stack usage doesn't make sense for shaders. 2145 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn 2146 // for shaders. Vector types should be explicitly handled by CC. 2147 if (AMDGPU::isEntryFunctionCC(CallConv)) 2148 return true; 2149 2150 SmallVector<CCValAssign, 16> RVLocs; 2151 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 2152 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); 2153 } 2154 2155 SDValue 2156 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 2157 bool isVarArg, 2158 const SmallVectorImpl<ISD::OutputArg> &Outs, 2159 const SmallVectorImpl<SDValue> &OutVals, 2160 const SDLoc &DL, SelectionDAG &DAG) const { 2161 MachineFunction &MF = DAG.getMachineFunction(); 2162 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2163 2164 if (AMDGPU::isKernel(CallConv)) { 2165 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, 2166 OutVals, DL, DAG); 2167 } 2168 2169 bool IsShader = AMDGPU::isShader(CallConv); 2170 2171 Info->setIfReturnsVoid(Outs.empty()); 2172 bool IsWaveEnd = Info->returnsVoid() && IsShader; 2173 2174 // CCValAssign - represent the assignment of the return value to a location. 2175 SmallVector<CCValAssign, 48> RVLocs; 2176 SmallVector<ISD::OutputArg, 48> Splits; 2177 2178 // CCState - Info about the registers and stack slots. 2179 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 2180 *DAG.getContext()); 2181 2182 // Analyze outgoing return values. 2183 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); 2184 2185 SDValue Flag; 2186 SmallVector<SDValue, 48> RetOps; 2187 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2188 2189 // Add return address for callable functions. 2190 if (!Info->isEntryFunction()) { 2191 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2192 SDValue ReturnAddrReg = CreateLiveInRegister( 2193 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); 2194 2195 // FIXME: Should be able to use a vreg here, but need a way to prevent it 2196 // from being allcoated to a CSR. 2197 2198 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), 2199 MVT::i64); 2200 2201 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag); 2202 Flag = Chain.getValue(1); 2203 2204 RetOps.push_back(PhysReturnAddrReg); 2205 } 2206 2207 // Copy the result values into the output registers. 2208 for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E; 2209 ++I, ++RealRVLocIdx) { 2210 CCValAssign &VA = RVLocs[I]; 2211 assert(VA.isRegLoc() && "Can only return in registers!"); 2212 // TODO: Partially return in registers if return values don't fit. 2213 SDValue Arg = OutVals[RealRVLocIdx]; 2214 2215 // Copied from other backends. 2216 switch (VA.getLocInfo()) { 2217 case CCValAssign::Full: 2218 break; 2219 case CCValAssign::BCvt: 2220 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 2221 break; 2222 case CCValAssign::SExt: 2223 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 2224 break; 2225 case CCValAssign::ZExt: 2226 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 2227 break; 2228 case CCValAssign::AExt: 2229 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 2230 break; 2231 default: 2232 llvm_unreachable("Unknown loc info!"); 2233 } 2234 2235 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 2236 Flag = Chain.getValue(1); 2237 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2238 } 2239 2240 // FIXME: Does sret work properly? 2241 if (!Info->isEntryFunction()) { 2242 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2243 const MCPhysReg *I = 2244 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 2245 if (I) { 2246 for (; *I; ++I) { 2247 if (AMDGPU::SReg_64RegClass.contains(*I)) 2248 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 2249 else if (AMDGPU::SReg_32RegClass.contains(*I)) 2250 RetOps.push_back(DAG.getRegister(*I, MVT::i32)); 2251 else 2252 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 2253 } 2254 } 2255 } 2256 2257 // Update chain and glue. 2258 RetOps[0] = Chain; 2259 if (Flag.getNode()) 2260 RetOps.push_back(Flag); 2261 2262 unsigned Opc = AMDGPUISD::ENDPGM; 2263 if (!IsWaveEnd) 2264 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG; 2265 return DAG.getNode(Opc, DL, MVT::Other, RetOps); 2266 } 2267 2268 SDValue SITargetLowering::LowerCallResult( 2269 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, 2270 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 2271 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn, 2272 SDValue ThisVal) const { 2273 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg); 2274 2275 // Assign locations to each value returned by this call. 2276 SmallVector<CCValAssign, 16> RVLocs; 2277 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 2278 *DAG.getContext()); 2279 CCInfo.AnalyzeCallResult(Ins, RetCC); 2280 2281 // Copy all of the result registers out of their specified physreg. 2282 for (unsigned i = 0; i != RVLocs.size(); ++i) { 2283 CCValAssign VA = RVLocs[i]; 2284 SDValue Val; 2285 2286 if (VA.isRegLoc()) { 2287 Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); 2288 Chain = Val.getValue(1); 2289 InFlag = Val.getValue(2); 2290 } else if (VA.isMemLoc()) { 2291 report_fatal_error("TODO: return values in memory"); 2292 } else 2293 llvm_unreachable("unknown argument location type"); 2294 2295 switch (VA.getLocInfo()) { 2296 case CCValAssign::Full: 2297 break; 2298 case CCValAssign::BCvt: 2299 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 2300 break; 2301 case CCValAssign::ZExt: 2302 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val, 2303 DAG.getValueType(VA.getValVT())); 2304 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2305 break; 2306 case CCValAssign::SExt: 2307 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val, 2308 DAG.getValueType(VA.getValVT())); 2309 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2310 break; 2311 case CCValAssign::AExt: 2312 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2313 break; 2314 default: 2315 llvm_unreachable("Unknown loc info!"); 2316 } 2317 2318 InVals.push_back(Val); 2319 } 2320 2321 return Chain; 2322 } 2323 2324 // Add code to pass special inputs required depending on used features separate 2325 // from the explicit user arguments present in the IR. 2326 void SITargetLowering::passSpecialInputs( 2327 CallLoweringInfo &CLI, 2328 CCState &CCInfo, 2329 const SIMachineFunctionInfo &Info, 2330 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 2331 SmallVectorImpl<SDValue> &MemOpChains, 2332 SDValue Chain) const { 2333 // If we don't have a call site, this was a call inserted by 2334 // legalization. These can never use special inputs. 2335 if (!CLI.CS) 2336 return; 2337 2338 const Function *CalleeFunc = CLI.CS.getCalledFunction(); 2339 assert(CalleeFunc); 2340 2341 SelectionDAG &DAG = CLI.DAG; 2342 const SDLoc &DL = CLI.DL; 2343 2344 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2345 2346 auto &ArgUsageInfo = 2347 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); 2348 const AMDGPUFunctionArgInfo &CalleeArgInfo 2349 = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc); 2350 2351 const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo(); 2352 2353 // TODO: Unify with private memory register handling. This is complicated by 2354 // the fact that at least in kernels, the input argument is not necessarily 2355 // in the same location as the input. 2356 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { 2357 AMDGPUFunctionArgInfo::DISPATCH_PTR, 2358 AMDGPUFunctionArgInfo::QUEUE_PTR, 2359 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR, 2360 AMDGPUFunctionArgInfo::DISPATCH_ID, 2361 AMDGPUFunctionArgInfo::WORKGROUP_ID_X, 2362 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, 2363 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z, 2364 AMDGPUFunctionArgInfo::WORKITEM_ID_X, 2365 AMDGPUFunctionArgInfo::WORKITEM_ID_Y, 2366 AMDGPUFunctionArgInfo::WORKITEM_ID_Z, 2367 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR 2368 }; 2369 2370 for (auto InputID : InputRegs) { 2371 const ArgDescriptor *OutgoingArg; 2372 const TargetRegisterClass *ArgRC; 2373 2374 std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID); 2375 if (!OutgoingArg) 2376 continue; 2377 2378 const ArgDescriptor *IncomingArg; 2379 const TargetRegisterClass *IncomingArgRC; 2380 std::tie(IncomingArg, IncomingArgRC) 2381 = CallerArgInfo.getPreloadedValue(InputID); 2382 assert(IncomingArgRC == ArgRC); 2383 2384 // All special arguments are ints for now. 2385 EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32; 2386 SDValue InputReg; 2387 2388 if (IncomingArg) { 2389 InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg); 2390 } else { 2391 // The implicit arg ptr is special because it doesn't have a corresponding 2392 // input for kernels, and is computed from the kernarg segment pointer. 2393 assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); 2394 InputReg = getImplicitArgPtr(DAG, DL); 2395 } 2396 2397 if (OutgoingArg->isRegister()) { 2398 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); 2399 } else { 2400 unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4); 2401 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, 2402 SpecialArgOffset); 2403 MemOpChains.push_back(ArgStore); 2404 } 2405 } 2406 } 2407 2408 static bool canGuaranteeTCO(CallingConv::ID CC) { 2409 return CC == CallingConv::Fast; 2410 } 2411 2412 /// Return true if we might ever do TCO for calls with this calling convention. 2413 static bool mayTailCallThisCC(CallingConv::ID CC) { 2414 switch (CC) { 2415 case CallingConv::C: 2416 return true; 2417 default: 2418 return canGuaranteeTCO(CC); 2419 } 2420 } 2421 2422 bool SITargetLowering::isEligibleForTailCallOptimization( 2423 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg, 2424 const SmallVectorImpl<ISD::OutputArg> &Outs, 2425 const SmallVectorImpl<SDValue> &OutVals, 2426 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { 2427 if (!mayTailCallThisCC(CalleeCC)) 2428 return false; 2429 2430 MachineFunction &MF = DAG.getMachineFunction(); 2431 const Function &CallerF = MF.getFunction(); 2432 CallingConv::ID CallerCC = CallerF.getCallingConv(); 2433 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2434 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2435 2436 // Kernels aren't callable, and don't have a live in return address so it 2437 // doesn't make sense to do a tail call with entry functions. 2438 if (!CallerPreserved) 2439 return false; 2440 2441 bool CCMatch = CallerCC == CalleeCC; 2442 2443 if (DAG.getTarget().Options.GuaranteedTailCallOpt) { 2444 if (canGuaranteeTCO(CalleeCC) && CCMatch) 2445 return true; 2446 return false; 2447 } 2448 2449 // TODO: Can we handle var args? 2450 if (IsVarArg) 2451 return false; 2452 2453 for (const Argument &Arg : CallerF.args()) { 2454 if (Arg.hasByValAttr()) 2455 return false; 2456 } 2457 2458 LLVMContext &Ctx = *DAG.getContext(); 2459 2460 // Check that the call results are passed in the same way. 2461 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins, 2462 CCAssignFnForCall(CalleeCC, IsVarArg), 2463 CCAssignFnForCall(CallerCC, IsVarArg))) 2464 return false; 2465 2466 // The callee has to preserve all registers the caller needs to preserve. 2467 if (!CCMatch) { 2468 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2469 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2470 return false; 2471 } 2472 2473 // Nothing more to check if the callee is taking no arguments. 2474 if (Outs.empty()) 2475 return true; 2476 2477 SmallVector<CCValAssign, 16> ArgLocs; 2478 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx); 2479 2480 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg)); 2481 2482 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 2483 // If the stack arguments for this call do not fit into our own save area then 2484 // the call cannot be made tail. 2485 // TODO: Is this really necessary? 2486 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) 2487 return false; 2488 2489 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2490 return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals); 2491 } 2492 2493 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 2494 if (!CI->isTailCall()) 2495 return false; 2496 2497 const Function *ParentFn = CI->getParent()->getParent(); 2498 if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv())) 2499 return false; 2500 2501 auto Attr = ParentFn->getFnAttribute("disable-tail-calls"); 2502 return (Attr.getValueAsString() != "true"); 2503 } 2504 2505 // The wave scratch offset register is used as the global base pointer. 2506 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI, 2507 SmallVectorImpl<SDValue> &InVals) const { 2508 SelectionDAG &DAG = CLI.DAG; 2509 const SDLoc &DL = CLI.DL; 2510 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 2511 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 2512 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 2513 SDValue Chain = CLI.Chain; 2514 SDValue Callee = CLI.Callee; 2515 bool &IsTailCall = CLI.IsTailCall; 2516 CallingConv::ID CallConv = CLI.CallConv; 2517 bool IsVarArg = CLI.IsVarArg; 2518 bool IsSibCall = false; 2519 bool IsThisReturn = false; 2520 MachineFunction &MF = DAG.getMachineFunction(); 2521 2522 if (IsVarArg) { 2523 return lowerUnhandledCall(CLI, InVals, 2524 "unsupported call to variadic function "); 2525 } 2526 2527 if (!CLI.CS.getInstruction()) 2528 report_fatal_error("unsupported libcall legalization"); 2529 2530 if (!CLI.CS.getCalledFunction()) { 2531 return lowerUnhandledCall(CLI, InVals, 2532 "unsupported indirect call to function "); 2533 } 2534 2535 if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) { 2536 return lowerUnhandledCall(CLI, InVals, 2537 "unsupported required tail call to function "); 2538 } 2539 2540 if (AMDGPU::isShader(MF.getFunction().getCallingConv())) { 2541 // Note the issue is with the CC of the calling function, not of the call 2542 // itself. 2543 return lowerUnhandledCall(CLI, InVals, 2544 "unsupported call from graphics shader of function "); 2545 } 2546 2547 if (IsTailCall) { 2548 IsTailCall = isEligibleForTailCallOptimization( 2549 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG); 2550 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) { 2551 report_fatal_error("failed to perform tail call elimination on a call " 2552 "site marked musttail"); 2553 } 2554 2555 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 2556 2557 // A sibling call is one where we're under the usual C ABI and not planning 2558 // to change that but can still do a tail call: 2559 if (!TailCallOpt && IsTailCall) 2560 IsSibCall = true; 2561 2562 if (IsTailCall) 2563 ++NumTailCalls; 2564 } 2565 2566 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2567 2568 // Analyze operands of the call, assigning locations to each operand. 2569 SmallVector<CCValAssign, 16> ArgLocs; 2570 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 2571 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg); 2572 2573 CCInfo.AnalyzeCallOperands(Outs, AssignFn); 2574 2575 // Get a count of how many bytes are to be pushed on the stack. 2576 unsigned NumBytes = CCInfo.getNextStackOffset(); 2577 2578 if (IsSibCall) { 2579 // Since we're not changing the ABI to make this a tail call, the memory 2580 // operands are already available in the caller's incoming argument space. 2581 NumBytes = 0; 2582 } 2583 2584 // FPDiff is the byte offset of the call's argument area from the callee's. 2585 // Stores to callee stack arguments will be placed in FixedStackSlots offset 2586 // by this amount for a tail call. In a sibling call it must be 0 because the 2587 // caller will deallocate the entire stack and the callee still expects its 2588 // arguments to begin at SP+0. Completely unused for non-tail calls. 2589 int32_t FPDiff = 0; 2590 MachineFrameInfo &MFI = MF.getFrameInfo(); 2591 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2592 2593 SDValue CallerSavedFP; 2594 2595 // Adjust the stack pointer for the new arguments... 2596 // These operations are automatically eliminated by the prolog/epilog pass 2597 if (!IsSibCall) { 2598 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL); 2599 2600 SmallVector<SDValue, 4> CopyFromChains; 2601 2602 unsigned OffsetReg = Info->getScratchWaveOffsetReg(); 2603 2604 // In the HSA case, this should be an identity copy. 2605 SDValue ScratchRSrcReg 2606 = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32); 2607 RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); 2608 CopyFromChains.push_back(ScratchRSrcReg.getValue(1)); 2609 2610 // TODO: Don't hardcode these registers and get from the callee function. 2611 SDValue ScratchWaveOffsetReg 2612 = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32); 2613 RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg); 2614 CopyFromChains.push_back(ScratchWaveOffsetReg.getValue(1)); 2615 2616 if (!Info->isEntryFunction()) { 2617 // Avoid clobbering this function's FP value. In the current convention 2618 // callee will overwrite this, so do save/restore around the call site. 2619 CallerSavedFP = DAG.getCopyFromReg(Chain, DL, 2620 Info->getFrameOffsetReg(), MVT::i32); 2621 CopyFromChains.push_back(CallerSavedFP.getValue(1)); 2622 } 2623 2624 Chain = DAG.getTokenFactor(DL, CopyFromChains); 2625 } 2626 2627 SmallVector<SDValue, 8> MemOpChains; 2628 MVT PtrVT = MVT::i32; 2629 2630 // Walk the register/memloc assignments, inserting copies/loads. 2631 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e; 2632 ++i, ++realArgIdx) { 2633 CCValAssign &VA = ArgLocs[i]; 2634 SDValue Arg = OutVals[realArgIdx]; 2635 2636 // Promote the value if needed. 2637 switch (VA.getLocInfo()) { 2638 case CCValAssign::Full: 2639 break; 2640 case CCValAssign::BCvt: 2641 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 2642 break; 2643 case CCValAssign::ZExt: 2644 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 2645 break; 2646 case CCValAssign::SExt: 2647 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 2648 break; 2649 case CCValAssign::AExt: 2650 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 2651 break; 2652 case CCValAssign::FPExt: 2653 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); 2654 break; 2655 default: 2656 llvm_unreachable("Unknown loc info!"); 2657 } 2658 2659 if (VA.isRegLoc()) { 2660 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2661 } else { 2662 assert(VA.isMemLoc()); 2663 2664 SDValue DstAddr; 2665 MachinePointerInfo DstInfo; 2666 2667 unsigned LocMemOffset = VA.getLocMemOffset(); 2668 int32_t Offset = LocMemOffset; 2669 2670 SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT); 2671 unsigned Align = 0; 2672 2673 if (IsTailCall) { 2674 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2675 unsigned OpSize = Flags.isByVal() ? 2676 Flags.getByValSize() : VA.getValVT().getStoreSize(); 2677 2678 // FIXME: We can have better than the minimum byval required alignment. 2679 Align = Flags.isByVal() ? Flags.getByValAlign() : 2680 MinAlign(Subtarget->getStackAlignment(), Offset); 2681 2682 Offset = Offset + FPDiff; 2683 int FI = MFI.CreateFixedObject(OpSize, Offset, true); 2684 2685 DstAddr = DAG.getFrameIndex(FI, PtrVT); 2686 DstInfo = MachinePointerInfo::getFixedStack(MF, FI); 2687 2688 // Make sure any stack arguments overlapping with where we're storing 2689 // are loaded before this eventual operation. Otherwise they'll be 2690 // clobbered. 2691 2692 // FIXME: Why is this really necessary? This seems to just result in a 2693 // lot of code to copy the stack and write them back to the same 2694 // locations, which are supposed to be immutable? 2695 Chain = addTokenForArgument(Chain, DAG, MFI, FI); 2696 } else { 2697 DstAddr = PtrOff; 2698 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset); 2699 Align = MinAlign(Subtarget->getStackAlignment(), LocMemOffset); 2700 } 2701 2702 if (Outs[i].Flags.isByVal()) { 2703 SDValue SizeNode = 2704 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32); 2705 SDValue Cpy = DAG.getMemcpy( 2706 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(), 2707 /*isVol = */ false, /*AlwaysInline = */ true, 2708 /*isTailCall = */ false, DstInfo, 2709 MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy( 2710 *DAG.getContext(), AMDGPUAS::PRIVATE_ADDRESS)))); 2711 2712 MemOpChains.push_back(Cpy); 2713 } else { 2714 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Align); 2715 MemOpChains.push_back(Store); 2716 } 2717 } 2718 } 2719 2720 // Copy special input registers after user input arguments. 2721 passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain); 2722 2723 if (!MemOpChains.empty()) 2724 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 2725 2726 // Build a sequence of copy-to-reg nodes chained together with token chain 2727 // and flag operands which copy the outgoing args into the appropriate regs. 2728 SDValue InFlag; 2729 for (auto &RegToPass : RegsToPass) { 2730 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, 2731 RegToPass.second, InFlag); 2732 InFlag = Chain.getValue(1); 2733 } 2734 2735 2736 SDValue PhysReturnAddrReg; 2737 if (IsTailCall) { 2738 // Since the return is being combined with the call, we need to pass on the 2739 // return address. 2740 2741 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2742 SDValue ReturnAddrReg = CreateLiveInRegister( 2743 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); 2744 2745 PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), 2746 MVT::i64); 2747 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag); 2748 InFlag = Chain.getValue(1); 2749 } 2750 2751 // We don't usually want to end the call-sequence here because we would tidy 2752 // the frame up *after* the call, however in the ABI-changing tail-call case 2753 // we've carefully laid out the parameters so that when sp is reset they'll be 2754 // in the correct location. 2755 if (IsTailCall && !IsSibCall) { 2756 Chain = DAG.getCALLSEQ_END(Chain, 2757 DAG.getTargetConstant(NumBytes, DL, MVT::i32), 2758 DAG.getTargetConstant(0, DL, MVT::i32), 2759 InFlag, DL); 2760 InFlag = Chain.getValue(1); 2761 } 2762 2763 std::vector<SDValue> Ops; 2764 Ops.push_back(Chain); 2765 Ops.push_back(Callee); 2766 // Add a redundant copy of the callee global which will not be legalized, as 2767 // we need direct access to the callee later. 2768 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee); 2769 const GlobalValue *GV = GSD->getGlobal(); 2770 Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64)); 2771 2772 if (IsTailCall) { 2773 // Each tail call may have to adjust the stack by a different amount, so 2774 // this information must travel along with the operation for eventual 2775 // consumption by emitEpilogue. 2776 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); 2777 2778 Ops.push_back(PhysReturnAddrReg); 2779 } 2780 2781 // Add argument registers to the end of the list so that they are known live 2782 // into the call. 2783 for (auto &RegToPass : RegsToPass) { 2784 Ops.push_back(DAG.getRegister(RegToPass.first, 2785 RegToPass.second.getValueType())); 2786 } 2787 2788 // Add a register mask operand representing the call-preserved registers. 2789 2790 auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo()); 2791 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 2792 assert(Mask && "Missing call preserved mask for calling convention"); 2793 Ops.push_back(DAG.getRegisterMask(Mask)); 2794 2795 if (InFlag.getNode()) 2796 Ops.push_back(InFlag); 2797 2798 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2799 2800 // If we're doing a tall call, use a TC_RETURN here rather than an 2801 // actual call instruction. 2802 if (IsTailCall) { 2803 MFI.setHasTailCall(); 2804 return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops); 2805 } 2806 2807 // Returns a chain and a flag for retval copy to use. 2808 SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops); 2809 Chain = Call.getValue(0); 2810 InFlag = Call.getValue(1); 2811 2812 if (CallerSavedFP) { 2813 SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32); 2814 Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag); 2815 InFlag = Chain.getValue(1); 2816 } 2817 2818 uint64_t CalleePopBytes = NumBytes; 2819 Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32), 2820 DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32), 2821 InFlag, DL); 2822 if (!Ins.empty()) 2823 InFlag = Chain.getValue(1); 2824 2825 // Handle result values, copying them out of physregs into vregs that we 2826 // return. 2827 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, 2828 InVals, IsThisReturn, 2829 IsThisReturn ? OutVals[0] : SDValue()); 2830 } 2831 2832 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT, 2833 SelectionDAG &DAG) const { 2834 unsigned Reg = StringSwitch<unsigned>(RegName) 2835 .Case("m0", AMDGPU::M0) 2836 .Case("exec", AMDGPU::EXEC) 2837 .Case("exec_lo", AMDGPU::EXEC_LO) 2838 .Case("exec_hi", AMDGPU::EXEC_HI) 2839 .Case("flat_scratch", AMDGPU::FLAT_SCR) 2840 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) 2841 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) 2842 .Default(AMDGPU::NoRegister); 2843 2844 if (Reg == AMDGPU::NoRegister) { 2845 report_fatal_error(Twine("invalid register name \"" 2846 + StringRef(RegName) + "\".")); 2847 2848 } 2849 2850 if ((Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS || 2851 Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) && 2852 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { 2853 report_fatal_error(Twine("invalid register \"" 2854 + StringRef(RegName) + "\" for subtarget.")); 2855 } 2856 2857 switch (Reg) { 2858 case AMDGPU::M0: 2859 case AMDGPU::EXEC_LO: 2860 case AMDGPU::EXEC_HI: 2861 case AMDGPU::FLAT_SCR_LO: 2862 case AMDGPU::FLAT_SCR_HI: 2863 if (VT.getSizeInBits() == 32) 2864 return Reg; 2865 break; 2866 case AMDGPU::EXEC: 2867 case AMDGPU::FLAT_SCR: 2868 if (VT.getSizeInBits() == 64) 2869 return Reg; 2870 break; 2871 default: 2872 llvm_unreachable("missing register type checking"); 2873 } 2874 2875 report_fatal_error(Twine("invalid type for register \"" 2876 + StringRef(RegName) + "\".")); 2877 } 2878 2879 // If kill is not the last instruction, split the block so kill is always a 2880 // proper terminator. 2881 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI, 2882 MachineBasicBlock *BB) const { 2883 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 2884 2885 MachineBasicBlock::iterator SplitPoint(&MI); 2886 ++SplitPoint; 2887 2888 if (SplitPoint == BB->end()) { 2889 // Don't bother with a new block. 2890 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); 2891 return BB; 2892 } 2893 2894 MachineFunction *MF = BB->getParent(); 2895 MachineBasicBlock *SplitBB 2896 = MF->CreateMachineBasicBlock(BB->getBasicBlock()); 2897 2898 MF->insert(++MachineFunction::iterator(BB), SplitBB); 2899 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end()); 2900 2901 SplitBB->transferSuccessorsAndUpdatePHIs(BB); 2902 BB->addSuccessor(SplitBB); 2903 2904 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); 2905 return SplitBB; 2906 } 2907 2908 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the 2909 // wavefront. If the value is uniform and just happens to be in a VGPR, this 2910 // will only do one iteration. In the worst case, this will loop 64 times. 2911 // 2912 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. 2913 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop( 2914 const SIInstrInfo *TII, 2915 MachineRegisterInfo &MRI, 2916 MachineBasicBlock &OrigBB, 2917 MachineBasicBlock &LoopBB, 2918 const DebugLoc &DL, 2919 const MachineOperand &IdxReg, 2920 unsigned InitReg, 2921 unsigned ResultReg, 2922 unsigned PhiReg, 2923 unsigned InitSaveExecReg, 2924 int Offset, 2925 bool UseGPRIdxMode, 2926 bool IsIndirectSrc) { 2927 MachineBasicBlock::iterator I = LoopBB.begin(); 2928 2929 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2930 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2931 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2932 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2933 2934 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) 2935 .addReg(InitReg) 2936 .addMBB(&OrigBB) 2937 .addReg(ResultReg) 2938 .addMBB(&LoopBB); 2939 2940 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) 2941 .addReg(InitSaveExecReg) 2942 .addMBB(&OrigBB) 2943 .addReg(NewExec) 2944 .addMBB(&LoopBB); 2945 2946 // Read the next variant <- also loop target. 2947 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) 2948 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef())); 2949 2950 // Compare the just read M0 value to all possible Idx values. 2951 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) 2952 .addReg(CurrentIdxReg) 2953 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg()); 2954 2955 // Update EXEC, save the original EXEC value to VCC. 2956 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec) 2957 .addReg(CondReg, RegState::Kill); 2958 2959 MRI.setSimpleHint(NewExec, CondReg); 2960 2961 if (UseGPRIdxMode) { 2962 unsigned IdxReg; 2963 if (Offset == 0) { 2964 IdxReg = CurrentIdxReg; 2965 } else { 2966 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2967 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg) 2968 .addReg(CurrentIdxReg, RegState::Kill) 2969 .addImm(Offset); 2970 } 2971 unsigned IdxMode = IsIndirectSrc ? 2972 AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE; 2973 MachineInstr *SetOn = 2974 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 2975 .addReg(IdxReg, RegState::Kill) 2976 .addImm(IdxMode); 2977 SetOn->getOperand(3).setIsUndef(); 2978 } else { 2979 // Move index from VCC into M0 2980 if (Offset == 0) { 2981 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 2982 .addReg(CurrentIdxReg, RegState::Kill); 2983 } else { 2984 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 2985 .addReg(CurrentIdxReg, RegState::Kill) 2986 .addImm(Offset); 2987 } 2988 } 2989 2990 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 2991 MachineInstr *InsertPt = 2992 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC) 2993 .addReg(AMDGPU::EXEC) 2994 .addReg(NewExec); 2995 2996 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use 2997 // s_cbranch_scc0? 2998 2999 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. 3000 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 3001 .addMBB(&LoopBB); 3002 3003 return InsertPt->getIterator(); 3004 } 3005 3006 // This has slightly sub-optimal regalloc when the source vector is killed by 3007 // the read. The register allocator does not understand that the kill is 3008 // per-workitem, so is kept alive for the whole loop so we end up not re-using a 3009 // subregister from it, using 1 more VGPR than necessary. This was saved when 3010 // this was expanded after register allocation. 3011 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII, 3012 MachineBasicBlock &MBB, 3013 MachineInstr &MI, 3014 unsigned InitResultReg, 3015 unsigned PhiReg, 3016 int Offset, 3017 bool UseGPRIdxMode, 3018 bool IsIndirectSrc) { 3019 MachineFunction *MF = MBB.getParent(); 3020 MachineRegisterInfo &MRI = MF->getRegInfo(); 3021 const DebugLoc &DL = MI.getDebugLoc(); 3022 MachineBasicBlock::iterator I(&MI); 3023 3024 unsigned DstReg = MI.getOperand(0).getReg(); 3025 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 3026 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 3027 3028 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); 3029 3030 // Save the EXEC mask 3031 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec) 3032 .addReg(AMDGPU::EXEC); 3033 3034 // To insert the loop we need to split the block. Move everything after this 3035 // point to a new block, and insert a new empty block between the two. 3036 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); 3037 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); 3038 MachineFunction::iterator MBBI(MBB); 3039 ++MBBI; 3040 3041 MF->insert(MBBI, LoopBB); 3042 MF->insert(MBBI, RemainderBB); 3043 3044 LoopBB->addSuccessor(LoopBB); 3045 LoopBB->addSuccessor(RemainderBB); 3046 3047 // Move the rest of the block into a new block. 3048 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 3049 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 3050 3051 MBB.addSuccessor(LoopBB); 3052 3053 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 3054 3055 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, 3056 InitResultReg, DstReg, PhiReg, TmpExec, 3057 Offset, UseGPRIdxMode, IsIndirectSrc); 3058 3059 MachineBasicBlock::iterator First = RemainderBB->begin(); 3060 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 3061 .addReg(SaveExec); 3062 3063 return InsPt; 3064 } 3065 3066 // Returns subreg index, offset 3067 static std::pair<unsigned, int> 3068 computeIndirectRegAndOffset(const SIRegisterInfo &TRI, 3069 const TargetRegisterClass *SuperRC, 3070 unsigned VecReg, 3071 int Offset) { 3072 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32; 3073 3074 // Skip out of bounds offsets, or else we would end up using an undefined 3075 // register. 3076 if (Offset >= NumElts || Offset < 0) 3077 return std::make_pair(AMDGPU::sub0, Offset); 3078 3079 return std::make_pair(AMDGPU::sub0 + Offset, 0); 3080 } 3081 3082 // Return true if the index is an SGPR and was set. 3083 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, 3084 MachineRegisterInfo &MRI, 3085 MachineInstr &MI, 3086 int Offset, 3087 bool UseGPRIdxMode, 3088 bool IsIndirectSrc) { 3089 MachineBasicBlock *MBB = MI.getParent(); 3090 const DebugLoc &DL = MI.getDebugLoc(); 3091 MachineBasicBlock::iterator I(&MI); 3092 3093 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 3094 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); 3095 3096 assert(Idx->getReg() != AMDGPU::NoRegister); 3097 3098 if (!TII->getRegisterInfo().isSGPRClass(IdxRC)) 3099 return false; 3100 3101 if (UseGPRIdxMode) { 3102 unsigned IdxMode = IsIndirectSrc ? 3103 AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE; 3104 if (Offset == 0) { 3105 MachineInstr *SetOn = 3106 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 3107 .add(*Idx) 3108 .addImm(IdxMode); 3109 3110 SetOn->getOperand(3).setIsUndef(); 3111 } else { 3112 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3113 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) 3114 .add(*Idx) 3115 .addImm(Offset); 3116 MachineInstr *SetOn = 3117 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 3118 .addReg(Tmp, RegState::Kill) 3119 .addImm(IdxMode); 3120 3121 SetOn->getOperand(3).setIsUndef(); 3122 } 3123 3124 return true; 3125 } 3126 3127 if (Offset == 0) { 3128 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 3129 .add(*Idx); 3130 } else { 3131 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 3132 .add(*Idx) 3133 .addImm(Offset); 3134 } 3135 3136 return true; 3137 } 3138 3139 // Control flow needs to be inserted if indexing with a VGPR. 3140 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, 3141 MachineBasicBlock &MBB, 3142 const GCNSubtarget &ST) { 3143 const SIInstrInfo *TII = ST.getInstrInfo(); 3144 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 3145 MachineFunction *MF = MBB.getParent(); 3146 MachineRegisterInfo &MRI = MF->getRegInfo(); 3147 3148 unsigned Dst = MI.getOperand(0).getReg(); 3149 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); 3150 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 3151 3152 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); 3153 3154 unsigned SubReg; 3155 std::tie(SubReg, Offset) 3156 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); 3157 3158 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode); 3159 3160 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) { 3161 MachineBasicBlock::iterator I(&MI); 3162 const DebugLoc &DL = MI.getDebugLoc(); 3163 3164 if (UseGPRIdxMode) { 3165 // TODO: Look at the uses to avoid the copy. This may require rescheduling 3166 // to avoid interfering with other uses, so probably requires a new 3167 // optimization pass. 3168 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 3169 .addReg(SrcReg, RegState::Undef, SubReg) 3170 .addReg(SrcReg, RegState::Implicit) 3171 .addReg(AMDGPU::M0, RegState::Implicit); 3172 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3173 } else { 3174 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 3175 .addReg(SrcReg, RegState::Undef, SubReg) 3176 .addReg(SrcReg, RegState::Implicit); 3177 } 3178 3179 MI.eraseFromParent(); 3180 3181 return &MBB; 3182 } 3183 3184 const DebugLoc &DL = MI.getDebugLoc(); 3185 MachineBasicBlock::iterator I(&MI); 3186 3187 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3188 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3189 3190 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); 3191 3192 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, 3193 Offset, UseGPRIdxMode, true); 3194 MachineBasicBlock *LoopBB = InsPt->getParent(); 3195 3196 if (UseGPRIdxMode) { 3197 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 3198 .addReg(SrcReg, RegState::Undef, SubReg) 3199 .addReg(SrcReg, RegState::Implicit) 3200 .addReg(AMDGPU::M0, RegState::Implicit); 3201 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3202 } else { 3203 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 3204 .addReg(SrcReg, RegState::Undef, SubReg) 3205 .addReg(SrcReg, RegState::Implicit); 3206 } 3207 3208 MI.eraseFromParent(); 3209 3210 return LoopBB; 3211 } 3212 3213 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI, 3214 const TargetRegisterClass *VecRC) { 3215 switch (TRI.getRegSizeInBits(*VecRC)) { 3216 case 32: // 4 bytes 3217 return AMDGPU::V_MOVRELD_B32_V1; 3218 case 64: // 8 bytes 3219 return AMDGPU::V_MOVRELD_B32_V2; 3220 case 128: // 16 bytes 3221 return AMDGPU::V_MOVRELD_B32_V4; 3222 case 256: // 32 bytes 3223 return AMDGPU::V_MOVRELD_B32_V8; 3224 case 512: // 64 bytes 3225 return AMDGPU::V_MOVRELD_B32_V16; 3226 default: 3227 llvm_unreachable("unsupported size for MOVRELD pseudos"); 3228 } 3229 } 3230 3231 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, 3232 MachineBasicBlock &MBB, 3233 const GCNSubtarget &ST) { 3234 const SIInstrInfo *TII = ST.getInstrInfo(); 3235 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 3236 MachineFunction *MF = MBB.getParent(); 3237 MachineRegisterInfo &MRI = MF->getRegInfo(); 3238 3239 unsigned Dst = MI.getOperand(0).getReg(); 3240 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); 3241 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 3242 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); 3243 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 3244 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); 3245 3246 // This can be an immediate, but will be folded later. 3247 assert(Val->getReg()); 3248 3249 unsigned SubReg; 3250 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, 3251 SrcVec->getReg(), 3252 Offset); 3253 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode); 3254 3255 if (Idx->getReg() == AMDGPU::NoRegister) { 3256 MachineBasicBlock::iterator I(&MI); 3257 const DebugLoc &DL = MI.getDebugLoc(); 3258 3259 assert(Offset == 0); 3260 3261 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) 3262 .add(*SrcVec) 3263 .add(*Val) 3264 .addImm(SubReg); 3265 3266 MI.eraseFromParent(); 3267 return &MBB; 3268 } 3269 3270 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) { 3271 MachineBasicBlock::iterator I(&MI); 3272 const DebugLoc &DL = MI.getDebugLoc(); 3273 3274 if (UseGPRIdxMode) { 3275 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 3276 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst 3277 .add(*Val) 3278 .addReg(Dst, RegState::ImplicitDefine) 3279 .addReg(SrcVec->getReg(), RegState::Implicit) 3280 .addReg(AMDGPU::M0, RegState::Implicit); 3281 3282 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3283 } else { 3284 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC)); 3285 3286 BuildMI(MBB, I, DL, MovRelDesc) 3287 .addReg(Dst, RegState::Define) 3288 .addReg(SrcVec->getReg()) 3289 .add(*Val) 3290 .addImm(SubReg - AMDGPU::sub0); 3291 } 3292 3293 MI.eraseFromParent(); 3294 return &MBB; 3295 } 3296 3297 if (Val->isReg()) 3298 MRI.clearKillFlags(Val->getReg()); 3299 3300 const DebugLoc &DL = MI.getDebugLoc(); 3301 3302 unsigned PhiReg = MRI.createVirtualRegister(VecRC); 3303 3304 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, 3305 Offset, UseGPRIdxMode, false); 3306 MachineBasicBlock *LoopBB = InsPt->getParent(); 3307 3308 if (UseGPRIdxMode) { 3309 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 3310 .addReg(PhiReg, RegState::Undef, SubReg) // vdst 3311 .add(*Val) // src0 3312 .addReg(Dst, RegState::ImplicitDefine) 3313 .addReg(PhiReg, RegState::Implicit) 3314 .addReg(AMDGPU::M0, RegState::Implicit); 3315 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3316 } else { 3317 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC)); 3318 3319 BuildMI(*LoopBB, InsPt, DL, MovRelDesc) 3320 .addReg(Dst, RegState::Define) 3321 .addReg(PhiReg) 3322 .add(*Val) 3323 .addImm(SubReg - AMDGPU::sub0); 3324 } 3325 3326 MI.eraseFromParent(); 3327 3328 return LoopBB; 3329 } 3330 3331 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( 3332 MachineInstr &MI, MachineBasicBlock *BB) const { 3333 3334 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3335 MachineFunction *MF = BB->getParent(); 3336 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 3337 3338 if (TII->isMIMG(MI)) { 3339 if (MI.memoperands_empty() && MI.mayLoadOrStore()) { 3340 report_fatal_error("missing mem operand from MIMG instruction"); 3341 } 3342 // Add a memoperand for mimg instructions so that they aren't assumed to 3343 // be ordered memory instuctions. 3344 3345 return BB; 3346 } 3347 3348 switch (MI.getOpcode()) { 3349 case AMDGPU::S_ADD_U64_PSEUDO: 3350 case AMDGPU::S_SUB_U64_PSEUDO: { 3351 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 3352 const DebugLoc &DL = MI.getDebugLoc(); 3353 3354 MachineOperand &Dest = MI.getOperand(0); 3355 MachineOperand &Src0 = MI.getOperand(1); 3356 MachineOperand &Src1 = MI.getOperand(2); 3357 3358 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3359 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3360 3361 MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, 3362 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0, 3363 &AMDGPU::SReg_32_XM0RegClass); 3364 MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, 3365 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1, 3366 &AMDGPU::SReg_32_XM0RegClass); 3367 3368 MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, 3369 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0, 3370 &AMDGPU::SReg_32_XM0RegClass); 3371 MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, 3372 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1, 3373 &AMDGPU::SReg_32_XM0RegClass); 3374 3375 bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 3376 3377 unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; 3378 unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; 3379 BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0) 3380 .add(Src0Sub0) 3381 .add(Src1Sub0); 3382 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1) 3383 .add(Src0Sub1) 3384 .add(Src1Sub1); 3385 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) 3386 .addReg(DestSub0) 3387 .addImm(AMDGPU::sub0) 3388 .addReg(DestSub1) 3389 .addImm(AMDGPU::sub1); 3390 MI.eraseFromParent(); 3391 return BB; 3392 } 3393 case AMDGPU::SI_INIT_M0: { 3394 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), 3395 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 3396 .add(MI.getOperand(0)); 3397 MI.eraseFromParent(); 3398 return BB; 3399 } 3400 case AMDGPU::SI_INIT_EXEC: 3401 // This should be before all vector instructions. 3402 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64), 3403 AMDGPU::EXEC) 3404 .addImm(MI.getOperand(0).getImm()); 3405 MI.eraseFromParent(); 3406 return BB; 3407 3408 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: { 3409 // Extract the thread count from an SGPR input and set EXEC accordingly. 3410 // Since BFM can't shift by 64, handle that case with CMP + CMOV. 3411 // 3412 // S_BFE_U32 count, input, {shift, 7} 3413 // S_BFM_B64 exec, count, 0 3414 // S_CMP_EQ_U32 count, 64 3415 // S_CMOV_B64 exec, -1 3416 MachineInstr *FirstMI = &*BB->begin(); 3417 MachineRegisterInfo &MRI = MF->getRegInfo(); 3418 unsigned InputReg = MI.getOperand(0).getReg(); 3419 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3420 bool Found = false; 3421 3422 // Move the COPY of the input reg to the beginning, so that we can use it. 3423 for (auto I = BB->begin(); I != &MI; I++) { 3424 if (I->getOpcode() != TargetOpcode::COPY || 3425 I->getOperand(0).getReg() != InputReg) 3426 continue; 3427 3428 if (I == FirstMI) { 3429 FirstMI = &*++BB->begin(); 3430 } else { 3431 I->removeFromParent(); 3432 BB->insert(FirstMI, &*I); 3433 } 3434 Found = true; 3435 break; 3436 } 3437 assert(Found); 3438 (void)Found; 3439 3440 // This should be before all vector instructions. 3441 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg) 3442 .addReg(InputReg) 3443 .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000); 3444 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64), 3445 AMDGPU::EXEC) 3446 .addReg(CountReg) 3447 .addImm(0); 3448 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32)) 3449 .addReg(CountReg, RegState::Kill) 3450 .addImm(64); 3451 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64), 3452 AMDGPU::EXEC) 3453 .addImm(-1); 3454 MI.eraseFromParent(); 3455 return BB; 3456 } 3457 3458 case AMDGPU::GET_GROUPSTATICSIZE: { 3459 DebugLoc DL = MI.getDebugLoc(); 3460 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) 3461 .add(MI.getOperand(0)) 3462 .addImm(MFI->getLDSSize()); 3463 MI.eraseFromParent(); 3464 return BB; 3465 } 3466 case AMDGPU::SI_INDIRECT_SRC_V1: 3467 case AMDGPU::SI_INDIRECT_SRC_V2: 3468 case AMDGPU::SI_INDIRECT_SRC_V4: 3469 case AMDGPU::SI_INDIRECT_SRC_V8: 3470 case AMDGPU::SI_INDIRECT_SRC_V16: 3471 return emitIndirectSrc(MI, *BB, *getSubtarget()); 3472 case AMDGPU::SI_INDIRECT_DST_V1: 3473 case AMDGPU::SI_INDIRECT_DST_V2: 3474 case AMDGPU::SI_INDIRECT_DST_V4: 3475 case AMDGPU::SI_INDIRECT_DST_V8: 3476 case AMDGPU::SI_INDIRECT_DST_V16: 3477 return emitIndirectDst(MI, *BB, *getSubtarget()); 3478 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 3479 case AMDGPU::SI_KILL_I1_PSEUDO: 3480 return splitKillBlock(MI, BB); 3481 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 3482 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 3483 3484 unsigned Dst = MI.getOperand(0).getReg(); 3485 unsigned Src0 = MI.getOperand(1).getReg(); 3486 unsigned Src1 = MI.getOperand(2).getReg(); 3487 const DebugLoc &DL = MI.getDebugLoc(); 3488 unsigned SrcCond = MI.getOperand(3).getReg(); 3489 3490 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3491 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3492 unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 3493 3494 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy) 3495 .addReg(SrcCond); 3496 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 3497 .addImm(0) 3498 .addReg(Src0, 0, AMDGPU::sub0) 3499 .addImm(0) 3500 .addReg(Src1, 0, AMDGPU::sub0) 3501 .addReg(SrcCondCopy); 3502 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 3503 .addImm(0) 3504 .addReg(Src0, 0, AMDGPU::sub1) 3505 .addImm(0) 3506 .addReg(Src1, 0, AMDGPU::sub1) 3507 .addReg(SrcCondCopy); 3508 3509 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) 3510 .addReg(DstLo) 3511 .addImm(AMDGPU::sub0) 3512 .addReg(DstHi) 3513 .addImm(AMDGPU::sub1); 3514 MI.eraseFromParent(); 3515 return BB; 3516 } 3517 case AMDGPU::SI_BR_UNDEF: { 3518 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3519 const DebugLoc &DL = MI.getDebugLoc(); 3520 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) 3521 .add(MI.getOperand(0)); 3522 Br->getOperand(1).setIsUndef(true); // read undef SCC 3523 MI.eraseFromParent(); 3524 return BB; 3525 } 3526 case AMDGPU::ADJCALLSTACKUP: 3527 case AMDGPU::ADJCALLSTACKDOWN: { 3528 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 3529 MachineInstrBuilder MIB(*MF, &MI); 3530 3531 // Add an implicit use of the frame offset reg to prevent the restore copy 3532 // inserted after the call from being reorderd after stack operations in the 3533 // the caller's frame. 3534 MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine) 3535 .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit) 3536 .addReg(Info->getFrameOffsetReg(), RegState::Implicit); 3537 return BB; 3538 } 3539 case AMDGPU::SI_CALL_ISEL: { 3540 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3541 const DebugLoc &DL = MI.getDebugLoc(); 3542 3543 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF); 3544 3545 MachineInstrBuilder MIB; 3546 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg); 3547 3548 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) 3549 MIB.add(MI.getOperand(I)); 3550 3551 MIB.cloneMemRefs(MI); 3552 MI.eraseFromParent(); 3553 return BB; 3554 } 3555 case AMDGPU::V_ADD_I32_e32: 3556 case AMDGPU::V_SUB_I32_e32: 3557 case AMDGPU::V_SUBREV_I32_e32: { 3558 // TODO: Define distinct V_*_I32_Pseudo instructions instead. 3559 const DebugLoc &DL = MI.getDebugLoc(); 3560 unsigned Opc = MI.getOpcode(); 3561 3562 bool NeedClampOperand = false; 3563 if (TII->pseudoToMCOpcode(Opc) == -1) { 3564 Opc = AMDGPU::getVOPe64(Opc); 3565 NeedClampOperand = true; 3566 } 3567 3568 auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg()); 3569 if (TII->isVOP3(*I)) { 3570 I.addReg(AMDGPU::VCC, RegState::Define); 3571 } 3572 I.add(MI.getOperand(1)) 3573 .add(MI.getOperand(2)); 3574 if (NeedClampOperand) 3575 I.addImm(0); // clamp bit for e64 encoding 3576 3577 TII->legalizeOperands(*I); 3578 3579 MI.eraseFromParent(); 3580 return BB; 3581 } 3582 default: 3583 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 3584 } 3585 } 3586 3587 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const { 3588 return isTypeLegal(VT.getScalarType()); 3589 } 3590 3591 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { 3592 // This currently forces unfolding various combinations of fsub into fma with 3593 // free fneg'd operands. As long as we have fast FMA (controlled by 3594 // isFMAFasterThanFMulAndFAdd), we should perform these. 3595 3596 // When fma is quarter rate, for f64 where add / sub are at best half rate, 3597 // most of these combines appear to be cycle neutral but save on instruction 3598 // count / code size. 3599 return true; 3600 } 3601 3602 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, 3603 EVT VT) const { 3604 if (!VT.isVector()) { 3605 return MVT::i1; 3606 } 3607 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); 3608 } 3609 3610 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { 3611 // TODO: Should i16 be used always if legal? For now it would force VALU 3612 // shifts. 3613 return (VT == MVT::i16) ? MVT::i16 : MVT::i32; 3614 } 3615 3616 // Answering this is somewhat tricky and depends on the specific device which 3617 // have different rates for fma or all f64 operations. 3618 // 3619 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other 3620 // regardless of which device (although the number of cycles differs between 3621 // devices), so it is always profitable for f64. 3622 // 3623 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable 3624 // only on full rate devices. Normally, we should prefer selecting v_mad_f32 3625 // which we can always do even without fused FP ops since it returns the same 3626 // result as the separate operations and since it is always full 3627 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 3628 // however does not support denormals, so we do report fma as faster if we have 3629 // a fast fma device and require denormals. 3630 // 3631 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 3632 VT = VT.getScalarType(); 3633 3634 switch (VT.getSimpleVT().SimpleTy) { 3635 case MVT::f32: { 3636 // This is as fast on some subtargets. However, we always have full rate f32 3637 // mad available which returns the same result as the separate operations 3638 // which we should prefer over fma. We can't use this if we want to support 3639 // denormals, so only report this in these cases. 3640 if (Subtarget->hasFP32Denormals()) 3641 return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts(); 3642 3643 // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32. 3644 return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts(); 3645 } 3646 case MVT::f64: 3647 return true; 3648 case MVT::f16: 3649 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals(); 3650 default: 3651 break; 3652 } 3653 3654 return false; 3655 } 3656 3657 //===----------------------------------------------------------------------===// 3658 // Custom DAG Lowering Operations 3659 //===----------------------------------------------------------------------===// 3660 3661 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the 3662 // wider vector type is legal. 3663 SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op, 3664 SelectionDAG &DAG) const { 3665 unsigned Opc = Op.getOpcode(); 3666 EVT VT = Op.getValueType(); 3667 assert(VT == MVT::v4f16); 3668 3669 SDValue Lo, Hi; 3670 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0); 3671 3672 SDLoc SL(Op); 3673 SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo, 3674 Op->getFlags()); 3675 SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi, 3676 Op->getFlags()); 3677 3678 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); 3679 } 3680 3681 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the 3682 // wider vector type is legal. 3683 SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op, 3684 SelectionDAG &DAG) const { 3685 unsigned Opc = Op.getOpcode(); 3686 EVT VT = Op.getValueType(); 3687 assert(VT == MVT::v4i16 || VT == MVT::v4f16); 3688 3689 SDValue Lo0, Hi0; 3690 std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0); 3691 SDValue Lo1, Hi1; 3692 std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); 3693 3694 SDLoc SL(Op); 3695 3696 SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, 3697 Op->getFlags()); 3698 SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, 3699 Op->getFlags()); 3700 3701 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); 3702 } 3703 3704 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 3705 switch (Op.getOpcode()) { 3706 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 3707 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 3708 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 3709 case ISD::LOAD: { 3710 SDValue Result = LowerLOAD(Op, DAG); 3711 assert((!Result.getNode() || 3712 Result.getNode()->getNumValues() == 2) && 3713 "Load should return a value and a chain"); 3714 return Result; 3715 } 3716 3717 case ISD::FSIN: 3718 case ISD::FCOS: 3719 return LowerTrig(Op, DAG); 3720 case ISD::SELECT: return LowerSELECT(Op, DAG); 3721 case ISD::FDIV: return LowerFDIV(Op, DAG); 3722 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); 3723 case ISD::STORE: return LowerSTORE(Op, DAG); 3724 case ISD::GlobalAddress: { 3725 MachineFunction &MF = DAG.getMachineFunction(); 3726 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 3727 return LowerGlobalAddress(MFI, Op, DAG); 3728 } 3729 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 3730 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 3731 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 3732 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); 3733 case ISD::INSERT_VECTOR_ELT: 3734 return lowerINSERT_VECTOR_ELT(Op, DAG); 3735 case ISD::EXTRACT_VECTOR_ELT: 3736 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 3737 case ISD::BUILD_VECTOR: 3738 return lowerBUILD_VECTOR(Op, DAG); 3739 case ISD::FP_ROUND: 3740 return lowerFP_ROUND(Op, DAG); 3741 case ISD::TRAP: 3742 return lowerTRAP(Op, DAG); 3743 case ISD::DEBUGTRAP: 3744 return lowerDEBUGTRAP(Op, DAG); 3745 case ISD::FABS: 3746 case ISD::FNEG: 3747 case ISD::FCANONICALIZE: 3748 return splitUnaryVectorOp(Op, DAG); 3749 case ISD::FMINNUM: 3750 case ISD::FMAXNUM: 3751 return lowerFMINNUM_FMAXNUM(Op, DAG); 3752 case ISD::SHL: 3753 case ISD::SRA: 3754 case ISD::SRL: 3755 case ISD::ADD: 3756 case ISD::SUB: 3757 case ISD::MUL: 3758 case ISD::SMIN: 3759 case ISD::SMAX: 3760 case ISD::UMIN: 3761 case ISD::UMAX: 3762 case ISD::FADD: 3763 case ISD::FMUL: 3764 case ISD::FMINNUM_IEEE: 3765 case ISD::FMAXNUM_IEEE: 3766 return splitBinaryVectorOp(Op, DAG); 3767 } 3768 return SDValue(); 3769 } 3770 3771 static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT, 3772 const SDLoc &DL, 3773 SelectionDAG &DAG, bool Unpacked) { 3774 if (!LoadVT.isVector()) 3775 return Result; 3776 3777 if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16. 3778 // Truncate to v2i16/v4i16. 3779 EVT IntLoadVT = LoadVT.changeTypeToInteger(); 3780 3781 // Workaround legalizer not scalarizing truncate after vector op 3782 // legalization byt not creating intermediate vector trunc. 3783 SmallVector<SDValue, 4> Elts; 3784 DAG.ExtractVectorElements(Result, Elts); 3785 for (SDValue &Elt : Elts) 3786 Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt); 3787 3788 Result = DAG.getBuildVector(IntLoadVT, DL, Elts); 3789 3790 // Bitcast to original type (v2f16/v4f16). 3791 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result); 3792 } 3793 3794 // Cast back to the original packed type. 3795 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result); 3796 } 3797 3798 SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode, 3799 MemSDNode *M, 3800 SelectionDAG &DAG, 3801 ArrayRef<SDValue> Ops, 3802 bool IsIntrinsic) const { 3803 SDLoc DL(M); 3804 3805 bool Unpacked = Subtarget->hasUnpackedD16VMem(); 3806 EVT LoadVT = M->getValueType(0); 3807 3808 EVT EquivLoadVT = LoadVT; 3809 if (Unpacked && LoadVT.isVector()) { 3810 EquivLoadVT = LoadVT.isVector() ? 3811 EVT::getVectorVT(*DAG.getContext(), MVT::i32, 3812 LoadVT.getVectorNumElements()) : LoadVT; 3813 } 3814 3815 // Change from v4f16/v2f16 to EquivLoadVT. 3816 SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other); 3817 3818 SDValue Load 3819 = DAG.getMemIntrinsicNode( 3820 IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL, 3821 VTList, Ops, M->getMemoryVT(), 3822 M->getMemOperand()); 3823 if (!Unpacked) // Just adjusted the opcode. 3824 return Load; 3825 3826 SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked); 3827 3828 return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL); 3829 } 3830 3831 static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI, 3832 SDNode *N, SelectionDAG &DAG) { 3833 EVT VT = N->getValueType(0); 3834 const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); 3835 int CondCode = CD->getSExtValue(); 3836 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || 3837 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE) 3838 return DAG.getUNDEF(VT); 3839 3840 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); 3841 3842 3843 SDValue LHS = N->getOperand(1); 3844 SDValue RHS = N->getOperand(2); 3845 3846 SDLoc DL(N); 3847 3848 EVT CmpVT = LHS.getValueType(); 3849 if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) { 3850 unsigned PromoteOp = ICmpInst::isSigned(IcInput) ? 3851 ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3852 LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS); 3853 RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS); 3854 } 3855 3856 ISD::CondCode CCOpcode = getICmpCondCode(IcInput); 3857 3858 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, LHS, RHS, 3859 DAG.getCondCode(CCOpcode)); 3860 } 3861 3862 static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI, 3863 SDNode *N, SelectionDAG &DAG) { 3864 EVT VT = N->getValueType(0); 3865 const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); 3866 3867 int CondCode = CD->getSExtValue(); 3868 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE || 3869 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) { 3870 return DAG.getUNDEF(VT); 3871 } 3872 3873 SDValue Src0 = N->getOperand(1); 3874 SDValue Src1 = N->getOperand(2); 3875 EVT CmpVT = Src0.getValueType(); 3876 SDLoc SL(N); 3877 3878 if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) { 3879 Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); 3880 Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); 3881 } 3882 3883 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); 3884 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); 3885 return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src0, 3886 Src1, DAG.getCondCode(CCOpcode)); 3887 } 3888 3889 void SITargetLowering::ReplaceNodeResults(SDNode *N, 3890 SmallVectorImpl<SDValue> &Results, 3891 SelectionDAG &DAG) const { 3892 switch (N->getOpcode()) { 3893 case ISD::INSERT_VECTOR_ELT: { 3894 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) 3895 Results.push_back(Res); 3896 return; 3897 } 3898 case ISD::EXTRACT_VECTOR_ELT: { 3899 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) 3900 Results.push_back(Res); 3901 return; 3902 } 3903 case ISD::INTRINSIC_WO_CHAIN: { 3904 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 3905 switch (IID) { 3906 case Intrinsic::amdgcn_cvt_pkrtz: { 3907 SDValue Src0 = N->getOperand(1); 3908 SDValue Src1 = N->getOperand(2); 3909 SDLoc SL(N); 3910 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32, 3911 Src0, Src1); 3912 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt)); 3913 return; 3914 } 3915 case Intrinsic::amdgcn_cvt_pknorm_i16: 3916 case Intrinsic::amdgcn_cvt_pknorm_u16: 3917 case Intrinsic::amdgcn_cvt_pk_i16: 3918 case Intrinsic::amdgcn_cvt_pk_u16: { 3919 SDValue Src0 = N->getOperand(1); 3920 SDValue Src1 = N->getOperand(2); 3921 SDLoc SL(N); 3922 unsigned Opcode; 3923 3924 if (IID == Intrinsic::amdgcn_cvt_pknorm_i16) 3925 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; 3926 else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16) 3927 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; 3928 else if (IID == Intrinsic::amdgcn_cvt_pk_i16) 3929 Opcode = AMDGPUISD::CVT_PK_I16_I32; 3930 else 3931 Opcode = AMDGPUISD::CVT_PK_U16_U32; 3932 3933 EVT VT = N->getValueType(0); 3934 if (isTypeLegal(VT)) 3935 Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1)); 3936 else { 3937 SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1); 3938 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt)); 3939 } 3940 return; 3941 } 3942 } 3943 break; 3944 } 3945 case ISD::INTRINSIC_W_CHAIN: { 3946 if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) { 3947 Results.push_back(Res); 3948 Results.push_back(Res.getValue(1)); 3949 return; 3950 } 3951 3952 break; 3953 } 3954 case ISD::SELECT: { 3955 SDLoc SL(N); 3956 EVT VT = N->getValueType(0); 3957 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 3958 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1)); 3959 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2)); 3960 3961 EVT SelectVT = NewVT; 3962 if (NewVT.bitsLT(MVT::i32)) { 3963 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS); 3964 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS); 3965 SelectVT = MVT::i32; 3966 } 3967 3968 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT, 3969 N->getOperand(0), LHS, RHS); 3970 3971 if (NewVT != SelectVT) 3972 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect); 3973 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect)); 3974 return; 3975 } 3976 case ISD::FNEG: { 3977 if (N->getValueType(0) != MVT::v2f16) 3978 break; 3979 3980 SDLoc SL(N); 3981 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); 3982 3983 SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32, 3984 BC, 3985 DAG.getConstant(0x80008000, SL, MVT::i32)); 3986 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); 3987 return; 3988 } 3989 case ISD::FABS: { 3990 if (N->getValueType(0) != MVT::v2f16) 3991 break; 3992 3993 SDLoc SL(N); 3994 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); 3995 3996 SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32, 3997 BC, 3998 DAG.getConstant(0x7fff7fff, SL, MVT::i32)); 3999 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); 4000 return; 4001 } 4002 default: 4003 break; 4004 } 4005 } 4006 4007 /// Helper function for LowerBRCOND 4008 static SDNode *findUser(SDValue Value, unsigned Opcode) { 4009 4010 SDNode *Parent = Value.getNode(); 4011 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); 4012 I != E; ++I) { 4013 4014 if (I.getUse().get() != Value) 4015 continue; 4016 4017 if (I->getOpcode() == Opcode) 4018 return *I; 4019 } 4020 return nullptr; 4021 } 4022 4023 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { 4024 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 4025 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { 4026 case Intrinsic::amdgcn_if: 4027 return AMDGPUISD::IF; 4028 case Intrinsic::amdgcn_else: 4029 return AMDGPUISD::ELSE; 4030 case Intrinsic::amdgcn_loop: 4031 return AMDGPUISD::LOOP; 4032 case Intrinsic::amdgcn_end_cf: 4033 llvm_unreachable("should not occur"); 4034 default: 4035 return 0; 4036 } 4037 } 4038 4039 // break, if_break, else_break are all only used as inputs to loop, not 4040 // directly as branch conditions. 4041 return 0; 4042 } 4043 4044 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { 4045 const Triple &TT = getTargetMachine().getTargetTriple(); 4046 return (GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 4047 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && 4048 AMDGPU::shouldEmitConstantsToTextSection(TT); 4049 } 4050 4051 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { 4052 // FIXME: Either avoid relying on address space here or change the default 4053 // address space for functions to avoid the explicit check. 4054 return (GV->getValueType()->isFunctionTy() || 4055 GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || 4056 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 4057 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && 4058 !shouldEmitFixup(GV) && 4059 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 4060 } 4061 4062 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { 4063 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); 4064 } 4065 4066 /// This transforms the control flow intrinsics to get the branch destination as 4067 /// last parameter, also switches branch target with BR if the need arise 4068 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, 4069 SelectionDAG &DAG) const { 4070 SDLoc DL(BRCOND); 4071 4072 SDNode *Intr = BRCOND.getOperand(1).getNode(); 4073 SDValue Target = BRCOND.getOperand(2); 4074 SDNode *BR = nullptr; 4075 SDNode *SetCC = nullptr; 4076 4077 if (Intr->getOpcode() == ISD::SETCC) { 4078 // As long as we negate the condition everything is fine 4079 SetCC = Intr; 4080 Intr = SetCC->getOperand(0).getNode(); 4081 4082 } else { 4083 // Get the target from BR if we don't negate the condition 4084 BR = findUser(BRCOND, ISD::BR); 4085 Target = BR->getOperand(1); 4086 } 4087 4088 // FIXME: This changes the types of the intrinsics instead of introducing new 4089 // nodes with the correct types. 4090 // e.g. llvm.amdgcn.loop 4091 4092 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3 4093 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088> 4094 4095 unsigned CFNode = isCFIntrinsic(Intr); 4096 if (CFNode == 0) { 4097 // This is a uniform branch so we don't need to legalize. 4098 return BRCOND; 4099 } 4100 4101 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || 4102 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; 4103 4104 assert(!SetCC || 4105 (SetCC->getConstantOperandVal(1) == 1 && 4106 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == 4107 ISD::SETNE)); 4108 4109 // operands of the new intrinsic call 4110 SmallVector<SDValue, 4> Ops; 4111 if (HaveChain) 4112 Ops.push_back(BRCOND.getOperand(0)); 4113 4114 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end()); 4115 Ops.push_back(Target); 4116 4117 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); 4118 4119 // build the new intrinsic call 4120 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode(); 4121 4122 if (!HaveChain) { 4123 SDValue Ops[] = { 4124 SDValue(Result, 0), 4125 BRCOND.getOperand(0) 4126 }; 4127 4128 Result = DAG.getMergeValues(Ops, DL).getNode(); 4129 } 4130 4131 if (BR) { 4132 // Give the branch instruction our target 4133 SDValue Ops[] = { 4134 BR->getOperand(0), 4135 BRCOND.getOperand(2) 4136 }; 4137 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); 4138 DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); 4139 BR = NewBR.getNode(); 4140 } 4141 4142 SDValue Chain = SDValue(Result, Result->getNumValues() - 1); 4143 4144 // Copy the intrinsic results to registers 4145 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { 4146 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); 4147 if (!CopyToReg) 4148 continue; 4149 4150 Chain = DAG.getCopyToReg( 4151 Chain, DL, 4152 CopyToReg->getOperand(1), 4153 SDValue(Result, i - 1), 4154 SDValue()); 4155 4156 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); 4157 } 4158 4159 // Remove the old intrinsic from the chain 4160 DAG.ReplaceAllUsesOfValueWith( 4161 SDValue(Intr, Intr->getNumValues() - 1), 4162 Intr->getOperand(0)); 4163 4164 return Chain; 4165 } 4166 4167 SDValue SITargetLowering::LowerRETURNADDR(SDValue Op, 4168 SelectionDAG &DAG) const { 4169 MVT VT = Op.getSimpleValueType(); 4170 SDLoc DL(Op); 4171 // Checking the depth 4172 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) 4173 return DAG.getConstant(0, DL, VT); 4174 4175 MachineFunction &MF = DAG.getMachineFunction(); 4176 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 4177 // Check for kernel and shader functions 4178 if (Info->isEntryFunction()) 4179 return DAG.getConstant(0, DL, VT); 4180 4181 MachineFrameInfo &MFI = MF.getFrameInfo(); 4182 // There is a call to @llvm.returnaddress in this function 4183 MFI.setReturnAddressIsTaken(true); 4184 4185 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 4186 // Get the return address reg and mark it as an implicit live-in 4187 unsigned Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent())); 4188 4189 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT); 4190 } 4191 4192 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG, 4193 SDValue Op, 4194 const SDLoc &DL, 4195 EVT VT) const { 4196 return Op.getValueType().bitsLE(VT) ? 4197 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : 4198 DAG.getNode(ISD::FTRUNC, DL, VT, Op); 4199 } 4200 4201 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 4202 assert(Op.getValueType() == MVT::f16 && 4203 "Do not know how to custom lower FP_ROUND for non-f16 type"); 4204 4205 SDValue Src = Op.getOperand(0); 4206 EVT SrcVT = Src.getValueType(); 4207 if (SrcVT != MVT::f64) 4208 return Op; 4209 4210 SDLoc DL(Op); 4211 4212 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); 4213 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); 4214 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc); 4215 } 4216 4217 SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op, 4218 SelectionDAG &DAG) const { 4219 EVT VT = Op.getValueType(); 4220 const MachineFunction &MF = DAG.getMachineFunction(); 4221 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 4222 bool IsIEEEMode = Info->getMode().IEEE; 4223 4224 // FIXME: Assert during eslection that this is only selected for 4225 // ieee_mode. Currently a combine can produce the ieee version for non-ieee 4226 // mode functions, but this happens to be OK since it's only done in cases 4227 // where there is known no sNaN. 4228 if (IsIEEEMode) 4229 return expandFMINNUM_FMAXNUM(Op.getNode(), DAG); 4230 4231 if (VT == MVT::v4f16) 4232 return splitBinaryVectorOp(Op, DAG); 4233 return Op; 4234 } 4235 4236 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { 4237 SDLoc SL(Op); 4238 SDValue Chain = Op.getOperand(0); 4239 4240 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa || 4241 !Subtarget->isTrapHandlerEnabled()) 4242 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain); 4243 4244 MachineFunction &MF = DAG.getMachineFunction(); 4245 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 4246 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 4247 assert(UserSGPR != AMDGPU::NoRegister); 4248 SDValue QueuePtr = CreateLiveInRegister( 4249 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 4250 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64); 4251 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01, 4252 QueuePtr, SDValue()); 4253 SDValue Ops[] = { 4254 ToReg, 4255 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16), 4256 SGPR01, 4257 ToReg.getValue(1) 4258 }; 4259 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); 4260 } 4261 4262 SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const { 4263 SDLoc SL(Op); 4264 SDValue Chain = Op.getOperand(0); 4265 MachineFunction &MF = DAG.getMachineFunction(); 4266 4267 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa || 4268 !Subtarget->isTrapHandlerEnabled()) { 4269 DiagnosticInfoUnsupported NoTrap(MF.getFunction(), 4270 "debugtrap handler not supported", 4271 Op.getDebugLoc(), 4272 DS_Warning); 4273 LLVMContext &Ctx = MF.getFunction().getContext(); 4274 Ctx.diagnose(NoTrap); 4275 return Chain; 4276 } 4277 4278 SDValue Ops[] = { 4279 Chain, 4280 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16) 4281 }; 4282 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); 4283 } 4284 4285 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL, 4286 SelectionDAG &DAG) const { 4287 // FIXME: Use inline constants (src_{shared, private}_base) instead. 4288 if (Subtarget->hasApertureRegs()) { 4289 unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ? 4290 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE : 4291 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE; 4292 unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ? 4293 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE : 4294 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE; 4295 unsigned Encoding = 4296 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ | 4297 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ | 4298 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_; 4299 4300 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16); 4301 SDValue ApertureReg = SDValue( 4302 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0); 4303 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32); 4304 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount); 4305 } 4306 4307 MachineFunction &MF = DAG.getMachineFunction(); 4308 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 4309 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 4310 assert(UserSGPR != AMDGPU::NoRegister); 4311 4312 SDValue QueuePtr = CreateLiveInRegister( 4313 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 4314 4315 // Offset into amd_queue_t for group_segment_aperture_base_hi / 4316 // private_segment_aperture_base_hi. 4317 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44; 4318 4319 SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset); 4320 4321 // TODO: Use custom target PseudoSourceValue. 4322 // TODO: We should use the value from the IR intrinsic call, but it might not 4323 // be available and how do we get it? 4324 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()), 4325 AMDGPUAS::CONSTANT_ADDRESS)); 4326 4327 MachinePointerInfo PtrInfo(V, StructOffset); 4328 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo, 4329 MinAlign(64, StructOffset), 4330 MachineMemOperand::MODereferenceable | 4331 MachineMemOperand::MOInvariant); 4332 } 4333 4334 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, 4335 SelectionDAG &DAG) const { 4336 SDLoc SL(Op); 4337 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); 4338 4339 SDValue Src = ASC->getOperand(0); 4340 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); 4341 4342 const AMDGPUTargetMachine &TM = 4343 static_cast<const AMDGPUTargetMachine &>(getTargetMachine()); 4344 4345 // flat -> local/private 4346 if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 4347 unsigned DestAS = ASC->getDestAddressSpace(); 4348 4349 if (DestAS == AMDGPUAS::LOCAL_ADDRESS || 4350 DestAS == AMDGPUAS::PRIVATE_ADDRESS) { 4351 unsigned NullVal = TM.getNullPointerValue(DestAS); 4352 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 4353 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); 4354 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); 4355 4356 return DAG.getNode(ISD::SELECT, SL, MVT::i32, 4357 NonNull, Ptr, SegmentNullPtr); 4358 } 4359 } 4360 4361 // local/private -> flat 4362 if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 4363 unsigned SrcAS = ASC->getSrcAddressSpace(); 4364 4365 if (SrcAS == AMDGPUAS::LOCAL_ADDRESS || 4366 SrcAS == AMDGPUAS::PRIVATE_ADDRESS) { 4367 unsigned NullVal = TM.getNullPointerValue(SrcAS); 4368 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 4369 4370 SDValue NonNull 4371 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); 4372 4373 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG); 4374 SDValue CvtPtr 4375 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); 4376 4377 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, 4378 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), 4379 FlatNullPtr); 4380 } 4381 } 4382 4383 // global <-> flat are no-ops and never emitted. 4384 4385 const MachineFunction &MF = DAG.getMachineFunction(); 4386 DiagnosticInfoUnsupported InvalidAddrSpaceCast( 4387 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); 4388 DAG.getContext()->diagnose(InvalidAddrSpaceCast); 4389 4390 return DAG.getUNDEF(ASC->getValueType(0)); 4391 } 4392 4393 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 4394 SelectionDAG &DAG) const { 4395 SDValue Vec = Op.getOperand(0); 4396 SDValue InsVal = Op.getOperand(1); 4397 SDValue Idx = Op.getOperand(2); 4398 EVT VecVT = Vec.getValueType(); 4399 EVT EltVT = VecVT.getVectorElementType(); 4400 unsigned VecSize = VecVT.getSizeInBits(); 4401 unsigned EltSize = EltVT.getSizeInBits(); 4402 4403 4404 assert(VecSize <= 64); 4405 4406 unsigned NumElts = VecVT.getVectorNumElements(); 4407 SDLoc SL(Op); 4408 auto KIdx = dyn_cast<ConstantSDNode>(Idx); 4409 4410 if (NumElts == 4 && EltSize == 16 && KIdx) { 4411 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec); 4412 4413 SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, 4414 DAG.getConstant(0, SL, MVT::i32)); 4415 SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, 4416 DAG.getConstant(1, SL, MVT::i32)); 4417 4418 SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf); 4419 SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf); 4420 4421 unsigned Idx = KIdx->getZExtValue(); 4422 bool InsertLo = Idx < 2; 4423 SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16, 4424 InsertLo ? LoVec : HiVec, 4425 DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal), 4426 DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32)); 4427 4428 InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf); 4429 4430 SDValue Concat = InsertLo ? 4431 DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) : 4432 DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf }); 4433 4434 return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat); 4435 } 4436 4437 if (isa<ConstantSDNode>(Idx)) 4438 return SDValue(); 4439 4440 MVT IntVT = MVT::getIntegerVT(VecSize); 4441 4442 // Avoid stack access for dynamic indexing. 4443 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec 4444 4445 // Create a congruent vector with the target value in each element so that 4446 // the required element can be masked and ORed into the target vector. 4447 SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT, 4448 DAG.getSplatBuildVector(VecVT, SL, InsVal)); 4449 4450 assert(isPowerOf2_32(EltSize)); 4451 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); 4452 4453 // Convert vector index to bit-index. 4454 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); 4455 4456 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); 4457 SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT, 4458 DAG.getConstant(0xffff, SL, IntVT), 4459 ScaledIdx); 4460 4461 SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal); 4462 SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT, 4463 DAG.getNOT(SL, BFM, IntVT), BCVec); 4464 4465 SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS); 4466 return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI); 4467 } 4468 4469 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 4470 SelectionDAG &DAG) const { 4471 SDLoc SL(Op); 4472 4473 EVT ResultVT = Op.getValueType(); 4474 SDValue Vec = Op.getOperand(0); 4475 SDValue Idx = Op.getOperand(1); 4476 EVT VecVT = Vec.getValueType(); 4477 unsigned VecSize = VecVT.getSizeInBits(); 4478 EVT EltVT = VecVT.getVectorElementType(); 4479 assert(VecSize <= 64); 4480 4481 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); 4482 4483 // Make sure we do any optimizations that will make it easier to fold 4484 // source modifiers before obscuring it with bit operations. 4485 4486 // XXX - Why doesn't this get called when vector_shuffle is expanded? 4487 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI)) 4488 return Combined; 4489 4490 unsigned EltSize = EltVT.getSizeInBits(); 4491 assert(isPowerOf2_32(EltSize)); 4492 4493 MVT IntVT = MVT::getIntegerVT(VecSize); 4494 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); 4495 4496 // Convert vector index to bit-index (* EltSize) 4497 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); 4498 4499 SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); 4500 SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx); 4501 4502 if (ResultVT == MVT::f16) { 4503 SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt); 4504 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 4505 } 4506 4507 return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT); 4508 } 4509 4510 SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op, 4511 SelectionDAG &DAG) const { 4512 SDLoc SL(Op); 4513 EVT VT = Op.getValueType(); 4514 4515 if (VT == MVT::v4i16 || VT == MVT::v4f16) { 4516 EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2); 4517 4518 // Turn into pair of packed build_vectors. 4519 // TODO: Special case for constants that can be materialized with s_mov_b64. 4520 SDValue Lo = DAG.getBuildVector(HalfVT, SL, 4521 { Op.getOperand(0), Op.getOperand(1) }); 4522 SDValue Hi = DAG.getBuildVector(HalfVT, SL, 4523 { Op.getOperand(2), Op.getOperand(3) }); 4524 4525 SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo); 4526 SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi); 4527 4528 SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi }); 4529 return DAG.getNode(ISD::BITCAST, SL, VT, Blend); 4530 } 4531 4532 assert(VT == MVT::v2f16 || VT == MVT::v2i16); 4533 assert(!Subtarget->hasVOP3PInsts() && "this should be legal"); 4534 4535 SDValue Lo = Op.getOperand(0); 4536 SDValue Hi = Op.getOperand(1); 4537 4538 // Avoid adding defined bits with the zero_extend. 4539 if (Hi.isUndef()) { 4540 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); 4541 SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo); 4542 return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo); 4543 } 4544 4545 Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi); 4546 Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi); 4547 4548 SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi, 4549 DAG.getConstant(16, SL, MVT::i32)); 4550 if (Lo.isUndef()) 4551 return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi); 4552 4553 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); 4554 Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo); 4555 4556 SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi); 4557 return DAG.getNode(ISD::BITCAST, SL, VT, Or); 4558 } 4559 4560 bool 4561 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 4562 // We can fold offsets for anything that doesn't require a GOT relocation. 4563 return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || 4564 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 4565 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && 4566 !shouldEmitGOTReloc(GA->getGlobal()); 4567 } 4568 4569 static SDValue 4570 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, 4571 const SDLoc &DL, unsigned Offset, EVT PtrVT, 4572 unsigned GAFlags = SIInstrInfo::MO_NONE) { 4573 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is 4574 // lowered to the following code sequence: 4575 // 4576 // For constant address space: 4577 // s_getpc_b64 s[0:1] 4578 // s_add_u32 s0, s0, $symbol 4579 // s_addc_u32 s1, s1, 0 4580 // 4581 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 4582 // a fixup or relocation is emitted to replace $symbol with a literal 4583 // constant, which is a pc-relative offset from the encoding of the $symbol 4584 // operand to the global variable. 4585 // 4586 // For global address space: 4587 // s_getpc_b64 s[0:1] 4588 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo 4589 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi 4590 // 4591 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 4592 // fixups or relocations are emitted to replace $symbol@*@lo and 4593 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, 4594 // which is a 64-bit pc-relative offset from the encoding of the $symbol 4595 // operand to the global variable. 4596 // 4597 // What we want here is an offset from the value returned by s_getpc 4598 // (which is the address of the s_add_u32 instruction) to the global 4599 // variable, but since the encoding of $symbol starts 4 bytes after the start 4600 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too 4601 // small. This requires us to add 4 to the global variable offset in order to 4602 // compute the correct address. 4603 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 4604 GAFlags); 4605 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 4606 GAFlags == SIInstrInfo::MO_NONE ? 4607 GAFlags : GAFlags + 1); 4608 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); 4609 } 4610 4611 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 4612 SDValue Op, 4613 SelectionDAG &DAG) const { 4614 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 4615 const GlobalValue *GV = GSD->getGlobal(); 4616 if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 4617 GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS || 4618 GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) 4619 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 4620 4621 SDLoc DL(GSD); 4622 EVT PtrVT = Op.getValueType(); 4623 4624 // FIXME: Should not make address space based decisions here. 4625 if (shouldEmitFixup(GV)) 4626 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); 4627 else if (shouldEmitPCReloc(GV)) 4628 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, 4629 SIInstrInfo::MO_REL32); 4630 4631 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, 4632 SIInstrInfo::MO_GOTPCREL32); 4633 4634 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); 4635 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 4636 const DataLayout &DataLayout = DAG.getDataLayout(); 4637 unsigned Align = DataLayout.getABITypeAlignment(PtrTy); 4638 MachinePointerInfo PtrInfo 4639 = MachinePointerInfo::getGOT(DAG.getMachineFunction()); 4640 4641 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align, 4642 MachineMemOperand::MODereferenceable | 4643 MachineMemOperand::MOInvariant); 4644 } 4645 4646 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, 4647 const SDLoc &DL, SDValue V) const { 4648 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as 4649 // the destination register. 4650 // 4651 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, 4652 // so we will end up with redundant moves to m0. 4653 // 4654 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. 4655 4656 // A Null SDValue creates a glue result. 4657 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, 4658 V, Chain); 4659 return SDValue(M0, 0); 4660 } 4661 4662 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, 4663 SDValue Op, 4664 MVT VT, 4665 unsigned Offset) const { 4666 SDLoc SL(Op); 4667 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL, 4668 DAG.getEntryNode(), Offset, 4, false); 4669 // The local size values will have the hi 16-bits as zero. 4670 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, 4671 DAG.getValueType(VT)); 4672 } 4673 4674 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 4675 EVT VT) { 4676 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), 4677 "non-hsa intrinsic with hsa target", 4678 DL.getDebugLoc()); 4679 DAG.getContext()->diagnose(BadIntrin); 4680 return DAG.getUNDEF(VT); 4681 } 4682 4683 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 4684 EVT VT) { 4685 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), 4686 "intrinsic not supported on subtarget", 4687 DL.getDebugLoc()); 4688 DAG.getContext()->diagnose(BadIntrin); 4689 return DAG.getUNDEF(VT); 4690 } 4691 4692 static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL, 4693 ArrayRef<SDValue> Elts) { 4694 assert(!Elts.empty()); 4695 MVT Type; 4696 unsigned NumElts; 4697 4698 if (Elts.size() == 1) { 4699 Type = MVT::f32; 4700 NumElts = 1; 4701 } else if (Elts.size() == 2) { 4702 Type = MVT::v2f32; 4703 NumElts = 2; 4704 } else if (Elts.size() <= 4) { 4705 Type = MVT::v4f32; 4706 NumElts = 4; 4707 } else if (Elts.size() <= 8) { 4708 Type = MVT::v8f32; 4709 NumElts = 8; 4710 } else { 4711 assert(Elts.size() <= 16); 4712 Type = MVT::v16f32; 4713 NumElts = 16; 4714 } 4715 4716 SmallVector<SDValue, 16> VecElts(NumElts); 4717 for (unsigned i = 0; i < Elts.size(); ++i) { 4718 SDValue Elt = Elts[i]; 4719 if (Elt.getValueType() != MVT::f32) 4720 Elt = DAG.getBitcast(MVT::f32, Elt); 4721 VecElts[i] = Elt; 4722 } 4723 for (unsigned i = Elts.size(); i < NumElts; ++i) 4724 VecElts[i] = DAG.getUNDEF(MVT::f32); 4725 4726 if (NumElts == 1) 4727 return VecElts[0]; 4728 return DAG.getBuildVector(Type, DL, VecElts); 4729 } 4730 4731 static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG, 4732 SDValue *GLC, SDValue *SLC, SDValue *DLC) { 4733 auto CachePolicyConst = cast<ConstantSDNode>(CachePolicy.getNode()); 4734 4735 uint64_t Value = CachePolicyConst->getZExtValue(); 4736 SDLoc DL(CachePolicy); 4737 if (GLC) { 4738 *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32); 4739 Value &= ~(uint64_t)0x1; 4740 } 4741 if (SLC) { 4742 *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32); 4743 Value &= ~(uint64_t)0x2; 4744 } 4745 if (DLC) { 4746 *DLC = DAG.getTargetConstant((Value & 0x4) ? 1 : 0, DL, MVT::i32); 4747 Value &= ~(uint64_t)0x4; 4748 } 4749 4750 return Value == 0; 4751 } 4752 4753 // Re-construct the required return value for a image load intrinsic. 4754 // This is more complicated due to the optional use TexFailCtrl which means the required 4755 // return type is an aggregate 4756 static SDValue constructRetValue(SelectionDAG &DAG, 4757 MachineSDNode *Result, 4758 ArrayRef<EVT> ResultTypes, 4759 bool IsTexFail, bool Unpacked, bool IsD16, 4760 int DMaskPop, int NumVDataDwords, 4761 const SDLoc &DL, LLVMContext &Context) { 4762 // Determine the required return type. This is the same regardless of IsTexFail flag 4763 EVT ReqRetVT = ResultTypes[0]; 4764 EVT ReqRetEltVT = ReqRetVT.isVector() ? ReqRetVT.getVectorElementType() : ReqRetVT; 4765 int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1; 4766 EVT AdjEltVT = Unpacked && IsD16 ? MVT::i32 : ReqRetEltVT; 4767 EVT AdjVT = Unpacked ? ReqRetNumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, ReqRetNumElts) 4768 : AdjEltVT 4769 : ReqRetVT; 4770 4771 // Extract data part of the result 4772 // Bitcast the result to the same type as the required return type 4773 int NumElts; 4774 if (IsD16 && !Unpacked) 4775 NumElts = NumVDataDwords << 1; 4776 else 4777 NumElts = NumVDataDwords; 4778 4779 EVT CastVT = NumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, NumElts) 4780 : AdjEltVT; 4781 4782 // Special case for v6f16. Rather than add support for this, use v3i32 to 4783 // extract the data elements 4784 bool V6F16Special = false; 4785 if (NumElts == 6) { 4786 CastVT = EVT::getVectorVT(Context, MVT::i32, NumElts / 2); 4787 DMaskPop >>= 1; 4788 ReqRetNumElts >>= 1; 4789 V6F16Special = true; 4790 AdjVT = MVT::v2i32; 4791 } 4792 4793 SDValue N = SDValue(Result, 0); 4794 SDValue CastRes = DAG.getNode(ISD::BITCAST, DL, CastVT, N); 4795 4796 // Iterate over the result 4797 SmallVector<SDValue, 4> BVElts; 4798 4799 if (CastVT.isVector()) { 4800 DAG.ExtractVectorElements(CastRes, BVElts, 0, DMaskPop); 4801 } else { 4802 BVElts.push_back(CastRes); 4803 } 4804 int ExtraElts = ReqRetNumElts - DMaskPop; 4805 while(ExtraElts--) 4806 BVElts.push_back(DAG.getUNDEF(AdjEltVT)); 4807 4808 SDValue PreTFCRes; 4809 if (ReqRetNumElts > 1) { 4810 SDValue NewVec = DAG.getBuildVector(AdjVT, DL, BVElts); 4811 if (IsD16 && Unpacked) 4812 PreTFCRes = adjustLoadValueTypeImpl(NewVec, ReqRetVT, DL, DAG, Unpacked); 4813 else 4814 PreTFCRes = NewVec; 4815 } else { 4816 PreTFCRes = BVElts[0]; 4817 } 4818 4819 if (V6F16Special) 4820 PreTFCRes = DAG.getNode(ISD::BITCAST, DL, MVT::v4f16, PreTFCRes); 4821 4822 if (!IsTexFail) { 4823 if (Result->getNumValues() > 1) 4824 return DAG.getMergeValues({PreTFCRes, SDValue(Result, 1)}, DL); 4825 else 4826 return PreTFCRes; 4827 } 4828 4829 // Extract the TexFail result and insert into aggregate return 4830 SmallVector<SDValue, 1> TFCElt; 4831 DAG.ExtractVectorElements(N, TFCElt, DMaskPop, 1); 4832 SDValue TFCRes = DAG.getNode(ISD::BITCAST, DL, ResultTypes[1], TFCElt[0]); 4833 return DAG.getMergeValues({PreTFCRes, TFCRes, SDValue(Result, 1)}, DL); 4834 } 4835 4836 static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE, 4837 SDValue *LWE, bool &IsTexFail) { 4838 auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode()); 4839 4840 uint64_t Value = TexFailCtrlConst->getZExtValue(); 4841 if (Value) { 4842 IsTexFail = true; 4843 } 4844 4845 SDLoc DL(TexFailCtrlConst); 4846 *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32); 4847 Value &= ~(uint64_t)0x1; 4848 *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32); 4849 Value &= ~(uint64_t)0x2; 4850 4851 return Value == 0; 4852 } 4853 4854 SDValue SITargetLowering::lowerImage(SDValue Op, 4855 const AMDGPU::ImageDimIntrinsicInfo *Intr, 4856 SelectionDAG &DAG) const { 4857 SDLoc DL(Op); 4858 MachineFunction &MF = DAG.getMachineFunction(); 4859 const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>(); 4860 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 4861 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); 4862 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); 4863 const AMDGPU::MIMGLZMappingInfo *LZMappingInfo = 4864 AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode); 4865 const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo = 4866 AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode); 4867 unsigned IntrOpcode = Intr->BaseOpcode; 4868 bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10; 4869 4870 SmallVector<EVT, 3> ResultTypes(Op->value_begin(), Op->value_end()); 4871 SmallVector<EVT, 3> OrigResultTypes(Op->value_begin(), Op->value_end()); 4872 bool IsD16 = false; 4873 bool IsA16 = false; 4874 SDValue VData; 4875 int NumVDataDwords; 4876 bool AdjustRetType = false; 4877 4878 unsigned AddrIdx; // Index of first address argument 4879 unsigned DMask; 4880 unsigned DMaskLanes = 0; 4881 4882 if (BaseOpcode->Atomic) { 4883 VData = Op.getOperand(2); 4884 4885 bool Is64Bit = VData.getValueType() == MVT::i64; 4886 if (BaseOpcode->AtomicX2) { 4887 SDValue VData2 = Op.getOperand(3); 4888 VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL, 4889 {VData, VData2}); 4890 if (Is64Bit) 4891 VData = DAG.getBitcast(MVT::v4i32, VData); 4892 4893 ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32; 4894 DMask = Is64Bit ? 0xf : 0x3; 4895 NumVDataDwords = Is64Bit ? 4 : 2; 4896 AddrIdx = 4; 4897 } else { 4898 DMask = Is64Bit ? 0x3 : 0x1; 4899 NumVDataDwords = Is64Bit ? 2 : 1; 4900 AddrIdx = 3; 4901 } 4902 } else { 4903 unsigned DMaskIdx = BaseOpcode->Store ? 3 : isa<MemSDNode>(Op) ? 2 : 1; 4904 auto DMaskConst = cast<ConstantSDNode>(Op.getOperand(DMaskIdx)); 4905 DMask = DMaskConst->getZExtValue(); 4906 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask); 4907 4908 if (BaseOpcode->Store) { 4909 VData = Op.getOperand(2); 4910 4911 MVT StoreVT = VData.getSimpleValueType(); 4912 if (StoreVT.getScalarType() == MVT::f16) { 4913 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS || 4914 !BaseOpcode->HasD16) 4915 return Op; // D16 is unsupported for this instruction 4916 4917 IsD16 = true; 4918 VData = handleD16VData(VData, DAG); 4919 } 4920 4921 NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32; 4922 } else { 4923 // Work out the num dwords based on the dmask popcount and underlying type 4924 // and whether packing is supported. 4925 MVT LoadVT = ResultTypes[0].getSimpleVT(); 4926 if (LoadVT.getScalarType() == MVT::f16) { 4927 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS || 4928 !BaseOpcode->HasD16) 4929 return Op; // D16 is unsupported for this instruction 4930 4931 IsD16 = true; 4932 } 4933 4934 // Confirm that the return type is large enough for the dmask specified 4935 if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) || 4936 (!LoadVT.isVector() && DMaskLanes > 1)) 4937 return Op; 4938 4939 if (IsD16 && !Subtarget->hasUnpackedD16VMem()) 4940 NumVDataDwords = (DMaskLanes + 1) / 2; 4941 else 4942 NumVDataDwords = DMaskLanes; 4943 4944 AdjustRetType = true; 4945 } 4946 4947 AddrIdx = DMaskIdx + 1; 4948 } 4949 4950 unsigned NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0; 4951 unsigned NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0; 4952 unsigned NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0; 4953 unsigned NumVAddrs = BaseOpcode->NumExtraArgs + NumGradients + 4954 NumCoords + NumLCM; 4955 unsigned NumMIVAddrs = NumVAddrs; 4956 4957 SmallVector<SDValue, 4> VAddrs; 4958 4959 // Optimize _L to _LZ when _L is zero 4960 if (LZMappingInfo) { 4961 if (auto ConstantLod = 4962 dyn_cast<ConstantFPSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) { 4963 if (ConstantLod->isZero() || ConstantLod->isNegative()) { 4964 IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l 4965 NumMIVAddrs--; // remove 'lod' 4966 } 4967 } 4968 } 4969 4970 // Optimize _mip away, when 'lod' is zero 4971 if (MIPMappingInfo) { 4972 if (auto ConstantLod = 4973 dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) { 4974 if (ConstantLod->isNullValue()) { 4975 IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip 4976 NumMIVAddrs--; // remove 'lod' 4977 } 4978 } 4979 } 4980 4981 // Check for 16 bit addresses and pack if true. 4982 unsigned DimIdx = AddrIdx + BaseOpcode->NumExtraArgs; 4983 MVT VAddrVT = Op.getOperand(DimIdx).getSimpleValueType(); 4984 const MVT VAddrScalarVT = VAddrVT.getScalarType(); 4985 if (((VAddrScalarVT == MVT::f16) || (VAddrScalarVT == MVT::i16)) && 4986 ST->hasFeature(AMDGPU::FeatureR128A16)) { 4987 IsA16 = true; 4988 const MVT VectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16; 4989 for (unsigned i = AddrIdx; i < (AddrIdx + NumMIVAddrs); ++i) { 4990 SDValue AddrLo, AddrHi; 4991 // Push back extra arguments. 4992 if (i < DimIdx) { 4993 AddrLo = Op.getOperand(i); 4994 } else { 4995 AddrLo = Op.getOperand(i); 4996 // Dz/dh, dz/dv and the last odd coord are packed with undef. Also, 4997 // in 1D, derivatives dx/dh and dx/dv are packed with undef. 4998 if (((i + 1) >= (AddrIdx + NumMIVAddrs)) || 4999 ((NumGradients / 2) % 2 == 1 && 5000 (i == DimIdx + (NumGradients / 2) - 1 || 5001 i == DimIdx + NumGradients - 1))) { 5002 AddrHi = DAG.getUNDEF(MVT::f16); 5003 } else { 5004 AddrHi = Op.getOperand(i + 1); 5005 i++; 5006 } 5007 AddrLo = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VectorVT, 5008 {AddrLo, AddrHi}); 5009 AddrLo = DAG.getBitcast(MVT::i32, AddrLo); 5010 } 5011 VAddrs.push_back(AddrLo); 5012 } 5013 } else { 5014 for (unsigned i = 0; i < NumMIVAddrs; ++i) 5015 VAddrs.push_back(Op.getOperand(AddrIdx + i)); 5016 } 5017 5018 // If the register allocator cannot place the address registers contiguously 5019 // without introducing moves, then using the non-sequential address encoding 5020 // is always preferable, since it saves VALU instructions and is usually a 5021 // wash in terms of code size or even better. 5022 // 5023 // However, we currently have no way of hinting to the register allocator that 5024 // MIMG addresses should be placed contiguously when it is possible to do so, 5025 // so force non-NSA for the common 2-address case as a heuristic. 5026 // 5027 // SIShrinkInstructions will convert NSA encodings to non-NSA after register 5028 // allocation when possible. 5029 bool UseNSA = 5030 ST->hasFeature(AMDGPU::FeatureNSAEncoding) && VAddrs.size() >= 3; 5031 SDValue VAddr; 5032 if (!UseNSA) 5033 VAddr = getBuildDwordsVector(DAG, DL, VAddrs); 5034 5035 SDValue True = DAG.getTargetConstant(1, DL, MVT::i1); 5036 SDValue False = DAG.getTargetConstant(0, DL, MVT::i1); 5037 unsigned CtrlIdx; // Index of texfailctrl argument 5038 SDValue Unorm; 5039 if (!BaseOpcode->Sampler) { 5040 Unorm = True; 5041 CtrlIdx = AddrIdx + NumVAddrs + 1; 5042 } else { 5043 auto UnormConst = 5044 cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2)); 5045 5046 Unorm = UnormConst->getZExtValue() ? True : False; 5047 CtrlIdx = AddrIdx + NumVAddrs + 3; 5048 } 5049 5050 SDValue TFE; 5051 SDValue LWE; 5052 SDValue TexFail = Op.getOperand(CtrlIdx); 5053 bool IsTexFail = false; 5054 if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail)) 5055 return Op; 5056 5057 if (IsTexFail) { 5058 if (!DMaskLanes) { 5059 // Expecting to get an error flag since TFC is on - and dmask is 0 5060 // Force dmask to be at least 1 otherwise the instruction will fail 5061 DMask = 0x1; 5062 DMaskLanes = 1; 5063 NumVDataDwords = 1; 5064 } 5065 NumVDataDwords += 1; 5066 AdjustRetType = true; 5067 } 5068 5069 // Has something earlier tagged that the return type needs adjusting 5070 // This happens if the instruction is a load or has set TexFailCtrl flags 5071 if (AdjustRetType) { 5072 // NumVDataDwords reflects the true number of dwords required in the return type 5073 if (DMaskLanes == 0 && !BaseOpcode->Store) { 5074 // This is a no-op load. This can be eliminated 5075 SDValue Undef = DAG.getUNDEF(Op.getValueType()); 5076 if (isa<MemSDNode>(Op)) 5077 return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL); 5078 return Undef; 5079 } 5080 5081 EVT NewVT = NumVDataDwords > 1 ? 5082 EVT::getVectorVT(*DAG.getContext(), MVT::f32, NumVDataDwords) 5083 : MVT::f32; 5084 5085 ResultTypes[0] = NewVT; 5086 if (ResultTypes.size() == 3) { 5087 // Original result was aggregate type used for TexFailCtrl results 5088 // The actual instruction returns as a vector type which has now been 5089 // created. Remove the aggregate result. 5090 ResultTypes.erase(&ResultTypes[1]); 5091 } 5092 } 5093 5094 SDValue GLC; 5095 SDValue SLC; 5096 SDValue DLC; 5097 if (BaseOpcode->Atomic) { 5098 GLC = True; // TODO no-return optimization 5099 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC, 5100 IsGFX10 ? &DLC : nullptr)) 5101 return Op; 5102 } else { 5103 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC, 5104 IsGFX10 ? &DLC : nullptr)) 5105 return Op; 5106 } 5107 5108 SmallVector<SDValue, 26> Ops; 5109 if (BaseOpcode->Store || BaseOpcode->Atomic) 5110 Ops.push_back(VData); // vdata 5111 if (UseNSA) { 5112 for (const SDValue &Addr : VAddrs) 5113 Ops.push_back(Addr); 5114 } else { 5115 Ops.push_back(VAddr); 5116 } 5117 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc 5118 if (BaseOpcode->Sampler) 5119 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler 5120 Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32)); 5121 if (IsGFX10) 5122 Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32)); 5123 Ops.push_back(Unorm); 5124 if (IsGFX10) 5125 Ops.push_back(DLC); 5126 Ops.push_back(GLC); 5127 Ops.push_back(SLC); 5128 Ops.push_back(IsA16 && // a16 or r128 5129 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False); 5130 Ops.push_back(TFE); // tfe 5131 Ops.push_back(LWE); // lwe 5132 if (!IsGFX10) 5133 Ops.push_back(DimInfo->DA ? True : False); 5134 if (BaseOpcode->HasD16) 5135 Ops.push_back(IsD16 ? True : False); 5136 if (isa<MemSDNode>(Op)) 5137 Ops.push_back(Op.getOperand(0)); // chain 5138 5139 int NumVAddrDwords = 5140 UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32; 5141 int Opcode = -1; 5142 5143 if (IsGFX10) { 5144 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, 5145 UseNSA ? AMDGPU::MIMGEncGfx10NSA 5146 : AMDGPU::MIMGEncGfx10Default, 5147 NumVDataDwords, NumVAddrDwords); 5148 } else { 5149 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 5150 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, 5151 NumVDataDwords, NumVAddrDwords); 5152 if (Opcode == -1) 5153 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, 5154 NumVDataDwords, NumVAddrDwords); 5155 } 5156 assert(Opcode != -1); 5157 5158 MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops); 5159 if (auto MemOp = dyn_cast<MemSDNode>(Op)) { 5160 MachineMemOperand *MemRef = MemOp->getMemOperand(); 5161 DAG.setNodeMemRefs(NewNode, {MemRef}); 5162 } 5163 5164 if (BaseOpcode->AtomicX2) { 5165 SmallVector<SDValue, 1> Elt; 5166 DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1); 5167 return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL); 5168 } else if (!BaseOpcode->Store) { 5169 return constructRetValue(DAG, NewNode, 5170 OrigResultTypes, IsTexFail, 5171 Subtarget->hasUnpackedD16VMem(), IsD16, 5172 DMaskLanes, NumVDataDwords, DL, 5173 *DAG.getContext()); 5174 } 5175 5176 return SDValue(NewNode, 0); 5177 } 5178 5179 SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc, 5180 SDValue Offset, SDValue GLC, 5181 SelectionDAG &DAG) const { 5182 MachineFunction &MF = DAG.getMachineFunction(); 5183 MachineMemOperand *MMO = MF.getMachineMemOperand( 5184 MachinePointerInfo(), 5185 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 5186 MachineMemOperand::MOInvariant, 5187 VT.getStoreSize(), VT.getStoreSize()); 5188 5189 if (!Offset->isDivergent()) { 5190 SDValue Ops[] = { 5191 Rsrc, 5192 Offset, // Offset 5193 GLC // glc 5194 }; 5195 return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL, 5196 DAG.getVTList(VT), Ops, VT, MMO); 5197 } 5198 5199 // We have a divergent offset. Emit a MUBUF buffer load instead. We can 5200 // assume that the buffer is unswizzled. 5201 SmallVector<SDValue, 4> Loads; 5202 unsigned NumLoads = 1; 5203 MVT LoadVT = VT.getSimpleVT(); 5204 unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1; 5205 assert((LoadVT.getScalarType() == MVT::i32 || 5206 LoadVT.getScalarType() == MVT::f32) && 5207 isPowerOf2_32(NumElts)); 5208 5209 if (NumElts == 8 || NumElts == 16) { 5210 NumLoads = NumElts == 16 ? 4 : 2; 5211 LoadVT = MVT::v4i32; 5212 } 5213 5214 SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue}); 5215 unsigned CachePolicy = cast<ConstantSDNode>(GLC)->getZExtValue(); 5216 SDValue Ops[] = { 5217 DAG.getEntryNode(), // Chain 5218 Rsrc, // rsrc 5219 DAG.getConstant(0, DL, MVT::i32), // vindex 5220 {}, // voffset 5221 {}, // soffset 5222 {}, // offset 5223 DAG.getConstant(CachePolicy, DL, MVT::i32), // cachepolicy 5224 DAG.getConstant(0, DL, MVT::i1), // idxen 5225 }; 5226 5227 // Use the alignment to ensure that the required offsets will fit into the 5228 // immediate offsets. 5229 setBufferOffsets(Offset, DAG, &Ops[3], NumLoads > 1 ? 16 * NumLoads : 4); 5230 5231 uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue(); 5232 for (unsigned i = 0; i < NumLoads; ++i) { 5233 Ops[5] = DAG.getConstant(InstOffset + 16 * i, DL, MVT::i32); 5234 Loads.push_back(DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, 5235 Ops, LoadVT, MMO)); 5236 } 5237 5238 if (VT == MVT::v8i32 || VT == MVT::v16i32) 5239 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads); 5240 5241 return Loads[0]; 5242 } 5243 5244 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 5245 SelectionDAG &DAG) const { 5246 MachineFunction &MF = DAG.getMachineFunction(); 5247 auto MFI = MF.getInfo<SIMachineFunctionInfo>(); 5248 5249 EVT VT = Op.getValueType(); 5250 SDLoc DL(Op); 5251 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5252 5253 // TODO: Should this propagate fast-math-flags? 5254 5255 switch (IntrinsicID) { 5256 case Intrinsic::amdgcn_implicit_buffer_ptr: { 5257 if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction())) 5258 return emitNonHSAIntrinsicError(DAG, DL, VT); 5259 return getPreloadedValue(DAG, *MFI, VT, 5260 AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR); 5261 } 5262 case Intrinsic::amdgcn_dispatch_ptr: 5263 case Intrinsic::amdgcn_queue_ptr: { 5264 if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) { 5265 DiagnosticInfoUnsupported BadIntrin( 5266 MF.getFunction(), "unsupported hsa intrinsic without hsa target", 5267 DL.getDebugLoc()); 5268 DAG.getContext()->diagnose(BadIntrin); 5269 return DAG.getUNDEF(VT); 5270 } 5271 5272 auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? 5273 AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR; 5274 return getPreloadedValue(DAG, *MFI, VT, RegID); 5275 } 5276 case Intrinsic::amdgcn_implicitarg_ptr: { 5277 if (MFI->isEntryFunction()) 5278 return getImplicitArgPtr(DAG, DL); 5279 return getPreloadedValue(DAG, *MFI, VT, 5280 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); 5281 } 5282 case Intrinsic::amdgcn_kernarg_segment_ptr: { 5283 return getPreloadedValue(DAG, *MFI, VT, 5284 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 5285 } 5286 case Intrinsic::amdgcn_dispatch_id: { 5287 return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID); 5288 } 5289 case Intrinsic::amdgcn_rcp: 5290 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); 5291 case Intrinsic::amdgcn_rsq: 5292 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 5293 case Intrinsic::amdgcn_rsq_legacy: 5294 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 5295 return emitRemovedIntrinsicError(DAG, DL, VT); 5296 5297 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); 5298 case Intrinsic::amdgcn_rcp_legacy: 5299 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 5300 return emitRemovedIntrinsicError(DAG, DL, VT); 5301 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); 5302 case Intrinsic::amdgcn_rsq_clamp: { 5303 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) 5304 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 5305 5306 Type *Type = VT.getTypeForEVT(*DAG.getContext()); 5307 APFloat Max = APFloat::getLargest(Type->getFltSemantics()); 5308 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); 5309 5310 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 5311 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, 5312 DAG.getConstantFP(Max, DL, VT)); 5313 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, 5314 DAG.getConstantFP(Min, DL, VT)); 5315 } 5316 case Intrinsic::r600_read_ngroups_x: 5317 if (Subtarget->isAmdHsaOS()) 5318 return emitNonHSAIntrinsicError(DAG, DL, VT); 5319 5320 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 5321 SI::KernelInputOffsets::NGROUPS_X, 4, false); 5322 case Intrinsic::r600_read_ngroups_y: 5323 if (Subtarget->isAmdHsaOS()) 5324 return emitNonHSAIntrinsicError(DAG, DL, VT); 5325 5326 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 5327 SI::KernelInputOffsets::NGROUPS_Y, 4, false); 5328 case Intrinsic::r600_read_ngroups_z: 5329 if (Subtarget->isAmdHsaOS()) 5330 return emitNonHSAIntrinsicError(DAG, DL, VT); 5331 5332 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 5333 SI::KernelInputOffsets::NGROUPS_Z, 4, false); 5334 case Intrinsic::r600_read_global_size_x: 5335 if (Subtarget->isAmdHsaOS()) 5336 return emitNonHSAIntrinsicError(DAG, DL, VT); 5337 5338 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 5339 SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false); 5340 case Intrinsic::r600_read_global_size_y: 5341 if (Subtarget->isAmdHsaOS()) 5342 return emitNonHSAIntrinsicError(DAG, DL, VT); 5343 5344 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 5345 SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false); 5346 case Intrinsic::r600_read_global_size_z: 5347 if (Subtarget->isAmdHsaOS()) 5348 return emitNonHSAIntrinsicError(DAG, DL, VT); 5349 5350 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 5351 SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false); 5352 case Intrinsic::r600_read_local_size_x: 5353 if (Subtarget->isAmdHsaOS()) 5354 return emitNonHSAIntrinsicError(DAG, DL, VT); 5355 5356 return lowerImplicitZextParam(DAG, Op, MVT::i16, 5357 SI::KernelInputOffsets::LOCAL_SIZE_X); 5358 case Intrinsic::r600_read_local_size_y: 5359 if (Subtarget->isAmdHsaOS()) 5360 return emitNonHSAIntrinsicError(DAG, DL, VT); 5361 5362 return lowerImplicitZextParam(DAG, Op, MVT::i16, 5363 SI::KernelInputOffsets::LOCAL_SIZE_Y); 5364 case Intrinsic::r600_read_local_size_z: 5365 if (Subtarget->isAmdHsaOS()) 5366 return emitNonHSAIntrinsicError(DAG, DL, VT); 5367 5368 return lowerImplicitZextParam(DAG, Op, MVT::i16, 5369 SI::KernelInputOffsets::LOCAL_SIZE_Z); 5370 case Intrinsic::amdgcn_workgroup_id_x: 5371 case Intrinsic::r600_read_tgid_x: 5372 return getPreloadedValue(DAG, *MFI, VT, 5373 AMDGPUFunctionArgInfo::WORKGROUP_ID_X); 5374 case Intrinsic::amdgcn_workgroup_id_y: 5375 case Intrinsic::r600_read_tgid_y: 5376 return getPreloadedValue(DAG, *MFI, VT, 5377 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); 5378 case Intrinsic::amdgcn_workgroup_id_z: 5379 case Intrinsic::r600_read_tgid_z: 5380 return getPreloadedValue(DAG, *MFI, VT, 5381 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); 5382 case Intrinsic::amdgcn_workitem_id_x: 5383 case Intrinsic::r600_read_tidig_x: 5384 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 5385 SDLoc(DAG.getEntryNode()), 5386 MFI->getArgInfo().WorkItemIDX); 5387 case Intrinsic::amdgcn_workitem_id_y: 5388 case Intrinsic::r600_read_tidig_y: 5389 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 5390 SDLoc(DAG.getEntryNode()), 5391 MFI->getArgInfo().WorkItemIDY); 5392 case Intrinsic::amdgcn_workitem_id_z: 5393 case Intrinsic::r600_read_tidig_z: 5394 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 5395 SDLoc(DAG.getEntryNode()), 5396 MFI->getArgInfo().WorkItemIDZ); 5397 case Intrinsic::amdgcn_s_buffer_load: { 5398 unsigned Cache = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 5399 return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), 5400 DAG.getTargetConstant(Cache & 1, DL, MVT::i1), DAG); 5401 } 5402 case Intrinsic::amdgcn_fdiv_fast: 5403 return lowerFDIV_FAST(Op, DAG); 5404 case Intrinsic::amdgcn_interp_mov: { 5405 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 5406 SDValue Glue = M0.getValue(1); 5407 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1), 5408 Op.getOperand(2), Op.getOperand(3), Glue); 5409 } 5410 case Intrinsic::amdgcn_interp_p1: { 5411 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 5412 SDValue Glue = M0.getValue(1); 5413 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), 5414 Op.getOperand(2), Op.getOperand(3), Glue); 5415 } 5416 case Intrinsic::amdgcn_interp_p2: { 5417 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 5418 SDValue Glue = SDValue(M0.getNode(), 1); 5419 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), 5420 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), 5421 Glue); 5422 } 5423 case Intrinsic::amdgcn_interp_p1_f16: { 5424 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 5425 SDValue Glue = M0.getValue(1); 5426 if (getSubtarget()->getLDSBankCount() == 16) { 5427 // 16 bank LDS 5428 SDValue S = DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, 5429 DAG.getConstant(2, DL, MVT::i32), // P0 5430 Op.getOperand(2), // Attrchan 5431 Op.getOperand(3), // Attr 5432 Glue); 5433 SDValue Ops[] = { 5434 Op.getOperand(1), // Src0 5435 Op.getOperand(2), // Attrchan 5436 Op.getOperand(3), // Attr 5437 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers 5438 S, // Src2 - holds two f16 values selected by high 5439 DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers 5440 Op.getOperand(4), // high 5441 DAG.getConstant(0, DL, MVT::i1), // $clamp 5442 DAG.getConstant(0, DL, MVT::i32) // $omod 5443 }; 5444 return DAG.getNode(AMDGPUISD::INTERP_P1LV_F16, DL, MVT::f32, Ops); 5445 } else { 5446 // 32 bank LDS 5447 SDValue Ops[] = { 5448 Op.getOperand(1), // Src0 5449 Op.getOperand(2), // Attrchan 5450 Op.getOperand(3), // Attr 5451 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers 5452 Op.getOperand(4), // high 5453 DAG.getConstant(0, DL, MVT::i1), // $clamp 5454 DAG.getConstant(0, DL, MVT::i32), // $omod 5455 Glue 5456 }; 5457 return DAG.getNode(AMDGPUISD::INTERP_P1LL_F16, DL, MVT::f32, Ops); 5458 } 5459 } 5460 case Intrinsic::amdgcn_interp_p2_f16: { 5461 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(6)); 5462 SDValue Glue = SDValue(M0.getNode(), 1); 5463 SDValue Ops[] = { 5464 Op.getOperand(2), // Src0 5465 Op.getOperand(3), // Attrchan 5466 Op.getOperand(4), // Attr 5467 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers 5468 Op.getOperand(1), // Src2 5469 DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers 5470 Op.getOperand(5), // high 5471 DAG.getConstant(0, DL, MVT::i1), // $clamp 5472 Glue 5473 }; 5474 return DAG.getNode(AMDGPUISD::INTERP_P2_F16, DL, MVT::f16, Ops); 5475 } 5476 case Intrinsic::amdgcn_sin: 5477 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); 5478 5479 case Intrinsic::amdgcn_cos: 5480 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); 5481 5482 case Intrinsic::amdgcn_log_clamp: { 5483 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) 5484 return SDValue(); 5485 5486 DiagnosticInfoUnsupported BadIntrin( 5487 MF.getFunction(), "intrinsic not supported on subtarget", 5488 DL.getDebugLoc()); 5489 DAG.getContext()->diagnose(BadIntrin); 5490 return DAG.getUNDEF(VT); 5491 } 5492 case Intrinsic::amdgcn_ldexp: 5493 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, 5494 Op.getOperand(1), Op.getOperand(2)); 5495 5496 case Intrinsic::amdgcn_fract: 5497 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); 5498 5499 case Intrinsic::amdgcn_class: 5500 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, 5501 Op.getOperand(1), Op.getOperand(2)); 5502 case Intrinsic::amdgcn_div_fmas: 5503 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, 5504 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 5505 Op.getOperand(4)); 5506 5507 case Intrinsic::amdgcn_div_fixup: 5508 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, 5509 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 5510 5511 case Intrinsic::amdgcn_trig_preop: 5512 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, 5513 Op.getOperand(1), Op.getOperand(2)); 5514 case Intrinsic::amdgcn_div_scale: { 5515 const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3)); 5516 5517 // Translate to the operands expected by the machine instruction. The 5518 // first parameter must be the same as the first instruction. 5519 SDValue Numerator = Op.getOperand(1); 5520 SDValue Denominator = Op.getOperand(2); 5521 5522 // Note this order is opposite of the machine instruction's operations, 5523 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The 5524 // intrinsic has the numerator as the first operand to match a normal 5525 // division operation. 5526 5527 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; 5528 5529 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, 5530 Denominator, Numerator); 5531 } 5532 case Intrinsic::amdgcn_icmp: { 5533 // There is a Pat that handles this variant, so return it as-is. 5534 if (Op.getOperand(1).getValueType() == MVT::i1 && 5535 Op.getConstantOperandVal(2) == 0 && 5536 Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE) 5537 return Op; 5538 return lowerICMPIntrinsic(*this, Op.getNode(), DAG); 5539 } 5540 case Intrinsic::amdgcn_fcmp: { 5541 return lowerFCMPIntrinsic(*this, Op.getNode(), DAG); 5542 } 5543 case Intrinsic::amdgcn_fmed3: 5544 return DAG.getNode(AMDGPUISD::FMED3, DL, VT, 5545 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 5546 case Intrinsic::amdgcn_fdot2: 5547 return DAG.getNode(AMDGPUISD::FDOT2, DL, VT, 5548 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 5549 Op.getOperand(4)); 5550 case Intrinsic::amdgcn_fmul_legacy: 5551 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, 5552 Op.getOperand(1), Op.getOperand(2)); 5553 case Intrinsic::amdgcn_sffbh: 5554 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); 5555 case Intrinsic::amdgcn_sbfe: 5556 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, 5557 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 5558 case Intrinsic::amdgcn_ubfe: 5559 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, 5560 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 5561 case Intrinsic::amdgcn_cvt_pkrtz: 5562 case Intrinsic::amdgcn_cvt_pknorm_i16: 5563 case Intrinsic::amdgcn_cvt_pknorm_u16: 5564 case Intrinsic::amdgcn_cvt_pk_i16: 5565 case Intrinsic::amdgcn_cvt_pk_u16: { 5566 // FIXME: Stop adding cast if v2f16/v2i16 are legal. 5567 EVT VT = Op.getValueType(); 5568 unsigned Opcode; 5569 5570 if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz) 5571 Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32; 5572 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16) 5573 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; 5574 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16) 5575 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; 5576 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16) 5577 Opcode = AMDGPUISD::CVT_PK_I16_I32; 5578 else 5579 Opcode = AMDGPUISD::CVT_PK_U16_U32; 5580 5581 if (isTypeLegal(VT)) 5582 return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2)); 5583 5584 SDValue Node = DAG.getNode(Opcode, DL, MVT::i32, 5585 Op.getOperand(1), Op.getOperand(2)); 5586 return DAG.getNode(ISD::BITCAST, DL, VT, Node); 5587 } 5588 case Intrinsic::amdgcn_wqm: { 5589 SDValue Src = Op.getOperand(1); 5590 return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src), 5591 0); 5592 } 5593 case Intrinsic::amdgcn_wwm: { 5594 SDValue Src = Op.getOperand(1); 5595 return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src), 5596 0); 5597 } 5598 case Intrinsic::amdgcn_fmad_ftz: 5599 return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1), 5600 Op.getOperand(2), Op.getOperand(3)); 5601 default: 5602 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = 5603 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) 5604 return lowerImage(Op, ImageDimIntr, DAG); 5605 5606 return Op; 5607 } 5608 } 5609 5610 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 5611 SelectionDAG &DAG) const { 5612 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 5613 SDLoc DL(Op); 5614 5615 switch (IntrID) { 5616 case Intrinsic::amdgcn_ds_ordered_add: 5617 case Intrinsic::amdgcn_ds_ordered_swap: { 5618 MemSDNode *M = cast<MemSDNode>(Op); 5619 SDValue Chain = M->getOperand(0); 5620 SDValue M0 = M->getOperand(2); 5621 SDValue Value = M->getOperand(3); 5622 unsigned OrderedCountIndex = M->getConstantOperandVal(7); 5623 unsigned WaveRelease = M->getConstantOperandVal(8); 5624 unsigned WaveDone = M->getConstantOperandVal(9); 5625 unsigned ShaderType; 5626 unsigned Instruction; 5627 5628 switch (IntrID) { 5629 case Intrinsic::amdgcn_ds_ordered_add: 5630 Instruction = 0; 5631 break; 5632 case Intrinsic::amdgcn_ds_ordered_swap: 5633 Instruction = 1; 5634 break; 5635 } 5636 5637 if (WaveDone && !WaveRelease) 5638 report_fatal_error("ds_ordered_count: wave_done requires wave_release"); 5639 5640 switch (DAG.getMachineFunction().getFunction().getCallingConv()) { 5641 case CallingConv::AMDGPU_CS: 5642 case CallingConv::AMDGPU_KERNEL: 5643 ShaderType = 0; 5644 break; 5645 case CallingConv::AMDGPU_PS: 5646 ShaderType = 1; 5647 break; 5648 case CallingConv::AMDGPU_VS: 5649 ShaderType = 2; 5650 break; 5651 case CallingConv::AMDGPU_GS: 5652 ShaderType = 3; 5653 break; 5654 default: 5655 report_fatal_error("ds_ordered_count unsupported for this calling conv"); 5656 } 5657 5658 unsigned Offset0 = OrderedCountIndex << 2; 5659 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) | 5660 (Instruction << 4); 5661 unsigned Offset = Offset0 | (Offset1 << 8); 5662 5663 SDValue Ops[] = { 5664 Chain, 5665 Value, 5666 DAG.getTargetConstant(Offset, DL, MVT::i16), 5667 copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue 5668 }; 5669 return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL, 5670 M->getVTList(), Ops, M->getMemoryVT(), 5671 M->getMemOperand()); 5672 } 5673 case Intrinsic::amdgcn_ds_fadd: { 5674 MemSDNode *M = cast<MemSDNode>(Op); 5675 unsigned Opc; 5676 switch (IntrID) { 5677 case Intrinsic::amdgcn_ds_fadd: 5678 Opc = ISD::ATOMIC_LOAD_FADD; 5679 break; 5680 } 5681 5682 return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(), 5683 M->getOperand(0), M->getOperand(2), M->getOperand(3), 5684 M->getMemOperand()); 5685 } 5686 case Intrinsic::amdgcn_atomic_inc: 5687 case Intrinsic::amdgcn_atomic_dec: 5688 case Intrinsic::amdgcn_ds_fmin: 5689 case Intrinsic::amdgcn_ds_fmax: { 5690 MemSDNode *M = cast<MemSDNode>(Op); 5691 unsigned Opc; 5692 switch (IntrID) { 5693 case Intrinsic::amdgcn_atomic_inc: 5694 Opc = AMDGPUISD::ATOMIC_INC; 5695 break; 5696 case Intrinsic::amdgcn_atomic_dec: 5697 Opc = AMDGPUISD::ATOMIC_DEC; 5698 break; 5699 case Intrinsic::amdgcn_ds_fmin: 5700 Opc = AMDGPUISD::ATOMIC_LOAD_FMIN; 5701 break; 5702 case Intrinsic::amdgcn_ds_fmax: 5703 Opc = AMDGPUISD::ATOMIC_LOAD_FMAX; 5704 break; 5705 default: 5706 llvm_unreachable("Unknown intrinsic!"); 5707 } 5708 SDValue Ops[] = { 5709 M->getOperand(0), // Chain 5710 M->getOperand(2), // Ptr 5711 M->getOperand(3) // Value 5712 }; 5713 5714 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, 5715 M->getMemoryVT(), M->getMemOperand()); 5716 } 5717 case Intrinsic::amdgcn_buffer_load: 5718 case Intrinsic::amdgcn_buffer_load_format: { 5719 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); 5720 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); 5721 unsigned IdxEn = 1; 5722 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3))) 5723 IdxEn = Idx->getZExtValue() != 0; 5724 SDValue Ops[] = { 5725 Op.getOperand(0), // Chain 5726 Op.getOperand(2), // rsrc 5727 Op.getOperand(3), // vindex 5728 SDValue(), // voffset -- will be set by setBufferOffsets 5729 SDValue(), // soffset -- will be set by setBufferOffsets 5730 SDValue(), // offset -- will be set by setBufferOffsets 5731 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy 5732 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen 5733 }; 5734 5735 setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]); 5736 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? 5737 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; 5738 5739 EVT VT = Op.getValueType(); 5740 EVT IntVT = VT.changeTypeToInteger(); 5741 auto *M = cast<MemSDNode>(Op); 5742 EVT LoadVT = Op.getValueType(); 5743 5744 if (LoadVT.getScalarType() == MVT::f16) 5745 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, 5746 M, DAG, Ops); 5747 5748 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics 5749 if (LoadVT.getScalarType() == MVT::i8 || 5750 LoadVT.getScalarType() == MVT::i16) 5751 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); 5752 5753 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, 5754 M->getMemOperand(), DAG); 5755 } 5756 case Intrinsic::amdgcn_raw_buffer_load: 5757 case Intrinsic::amdgcn_raw_buffer_load_format: { 5758 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); 5759 SDValue Ops[] = { 5760 Op.getOperand(0), // Chain 5761 Op.getOperand(2), // rsrc 5762 DAG.getConstant(0, DL, MVT::i32), // vindex 5763 Offsets.first, // voffset 5764 Op.getOperand(4), // soffset 5765 Offsets.second, // offset 5766 Op.getOperand(5), // cachepolicy 5767 DAG.getConstant(0, DL, MVT::i1), // idxen 5768 }; 5769 5770 unsigned Opc = (IntrID == Intrinsic::amdgcn_raw_buffer_load) ? 5771 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; 5772 5773 EVT VT = Op.getValueType(); 5774 EVT IntVT = VT.changeTypeToInteger(); 5775 auto *M = cast<MemSDNode>(Op); 5776 EVT LoadVT = Op.getValueType(); 5777 5778 if (LoadVT.getScalarType() == MVT::f16) 5779 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, 5780 M, DAG, Ops); 5781 5782 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics 5783 if (LoadVT.getScalarType() == MVT::i8 || 5784 LoadVT.getScalarType() == MVT::i16) 5785 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); 5786 5787 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, 5788 M->getMemOperand(), DAG); 5789 } 5790 case Intrinsic::amdgcn_struct_buffer_load: 5791 case Intrinsic::amdgcn_struct_buffer_load_format: { 5792 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 5793 SDValue Ops[] = { 5794 Op.getOperand(0), // Chain 5795 Op.getOperand(2), // rsrc 5796 Op.getOperand(3), // vindex 5797 Offsets.first, // voffset 5798 Op.getOperand(5), // soffset 5799 Offsets.second, // offset 5800 Op.getOperand(6), // cachepolicy 5801 DAG.getConstant(1, DL, MVT::i1), // idxen 5802 }; 5803 5804 unsigned Opc = (IntrID == Intrinsic::amdgcn_struct_buffer_load) ? 5805 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; 5806 5807 EVT VT = Op.getValueType(); 5808 EVT IntVT = VT.changeTypeToInteger(); 5809 auto *M = cast<MemSDNode>(Op); 5810 EVT LoadVT = Op.getValueType(); 5811 5812 if (LoadVT.getScalarType() == MVT::f16) 5813 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, 5814 M, DAG, Ops); 5815 5816 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics 5817 if (LoadVT.getScalarType() == MVT::i8 || 5818 LoadVT.getScalarType() == MVT::i16) 5819 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); 5820 5821 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, 5822 M->getMemOperand(), DAG); 5823 } 5824 case Intrinsic::amdgcn_tbuffer_load: { 5825 MemSDNode *M = cast<MemSDNode>(Op); 5826 EVT LoadVT = Op.getValueType(); 5827 5828 unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); 5829 unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); 5830 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); 5831 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); 5832 unsigned IdxEn = 1; 5833 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3))) 5834 IdxEn = Idx->getZExtValue() != 0; 5835 SDValue Ops[] = { 5836 Op.getOperand(0), // Chain 5837 Op.getOperand(2), // rsrc 5838 Op.getOperand(3), // vindex 5839 Op.getOperand(4), // voffset 5840 Op.getOperand(5), // soffset 5841 Op.getOperand(6), // offset 5842 DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format 5843 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy 5844 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen 5845 }; 5846 5847 if (LoadVT.getScalarType() == MVT::f16) 5848 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, 5849 M, DAG, Ops); 5850 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, 5851 Op->getVTList(), Ops, LoadVT, M->getMemOperand(), 5852 DAG); 5853 } 5854 case Intrinsic::amdgcn_raw_tbuffer_load: { 5855 MemSDNode *M = cast<MemSDNode>(Op); 5856 EVT LoadVT = Op.getValueType(); 5857 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); 5858 5859 SDValue Ops[] = { 5860 Op.getOperand(0), // Chain 5861 Op.getOperand(2), // rsrc 5862 DAG.getConstant(0, DL, MVT::i32), // vindex 5863 Offsets.first, // voffset 5864 Op.getOperand(4), // soffset 5865 Offsets.second, // offset 5866 Op.getOperand(5), // format 5867 Op.getOperand(6), // cachepolicy 5868 DAG.getConstant(0, DL, MVT::i1), // idxen 5869 }; 5870 5871 if (LoadVT.getScalarType() == MVT::f16) 5872 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, 5873 M, DAG, Ops); 5874 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, 5875 Op->getVTList(), Ops, LoadVT, M->getMemOperand(), 5876 DAG); 5877 } 5878 case Intrinsic::amdgcn_struct_tbuffer_load: { 5879 MemSDNode *M = cast<MemSDNode>(Op); 5880 EVT LoadVT = Op.getValueType(); 5881 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 5882 5883 SDValue Ops[] = { 5884 Op.getOperand(0), // Chain 5885 Op.getOperand(2), // rsrc 5886 Op.getOperand(3), // vindex 5887 Offsets.first, // voffset 5888 Op.getOperand(5), // soffset 5889 Offsets.second, // offset 5890 Op.getOperand(6), // format 5891 Op.getOperand(7), // cachepolicy 5892 DAG.getConstant(1, DL, MVT::i1), // idxen 5893 }; 5894 5895 if (LoadVT.getScalarType() == MVT::f16) 5896 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, 5897 M, DAG, Ops); 5898 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, 5899 Op->getVTList(), Ops, LoadVT, M->getMemOperand(), 5900 DAG); 5901 } 5902 case Intrinsic::amdgcn_buffer_atomic_swap: 5903 case Intrinsic::amdgcn_buffer_atomic_add: 5904 case Intrinsic::amdgcn_buffer_atomic_sub: 5905 case Intrinsic::amdgcn_buffer_atomic_smin: 5906 case Intrinsic::amdgcn_buffer_atomic_umin: 5907 case Intrinsic::amdgcn_buffer_atomic_smax: 5908 case Intrinsic::amdgcn_buffer_atomic_umax: 5909 case Intrinsic::amdgcn_buffer_atomic_and: 5910 case Intrinsic::amdgcn_buffer_atomic_or: 5911 case Intrinsic::amdgcn_buffer_atomic_xor: { 5912 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); 5913 unsigned IdxEn = 1; 5914 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4))) 5915 IdxEn = Idx->getZExtValue() != 0; 5916 SDValue Ops[] = { 5917 Op.getOperand(0), // Chain 5918 Op.getOperand(2), // vdata 5919 Op.getOperand(3), // rsrc 5920 Op.getOperand(4), // vindex 5921 SDValue(), // voffset -- will be set by setBufferOffsets 5922 SDValue(), // soffset -- will be set by setBufferOffsets 5923 SDValue(), // offset -- will be set by setBufferOffsets 5924 DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy 5925 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen 5926 }; 5927 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); 5928 EVT VT = Op.getValueType(); 5929 5930 auto *M = cast<MemSDNode>(Op); 5931 unsigned Opcode = 0; 5932 5933 switch (IntrID) { 5934 case Intrinsic::amdgcn_buffer_atomic_swap: 5935 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; 5936 break; 5937 case Intrinsic::amdgcn_buffer_atomic_add: 5938 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; 5939 break; 5940 case Intrinsic::amdgcn_buffer_atomic_sub: 5941 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; 5942 break; 5943 case Intrinsic::amdgcn_buffer_atomic_smin: 5944 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; 5945 break; 5946 case Intrinsic::amdgcn_buffer_atomic_umin: 5947 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; 5948 break; 5949 case Intrinsic::amdgcn_buffer_atomic_smax: 5950 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; 5951 break; 5952 case Intrinsic::amdgcn_buffer_atomic_umax: 5953 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; 5954 break; 5955 case Intrinsic::amdgcn_buffer_atomic_and: 5956 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; 5957 break; 5958 case Intrinsic::amdgcn_buffer_atomic_or: 5959 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; 5960 break; 5961 case Intrinsic::amdgcn_buffer_atomic_xor: 5962 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; 5963 break; 5964 default: 5965 llvm_unreachable("unhandled atomic opcode"); 5966 } 5967 5968 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, 5969 M->getMemOperand()); 5970 } 5971 case Intrinsic::amdgcn_raw_buffer_atomic_swap: 5972 case Intrinsic::amdgcn_raw_buffer_atomic_add: 5973 case Intrinsic::amdgcn_raw_buffer_atomic_sub: 5974 case Intrinsic::amdgcn_raw_buffer_atomic_smin: 5975 case Intrinsic::amdgcn_raw_buffer_atomic_umin: 5976 case Intrinsic::amdgcn_raw_buffer_atomic_smax: 5977 case Intrinsic::amdgcn_raw_buffer_atomic_umax: 5978 case Intrinsic::amdgcn_raw_buffer_atomic_and: 5979 case Intrinsic::amdgcn_raw_buffer_atomic_or: 5980 case Intrinsic::amdgcn_raw_buffer_atomic_xor: { 5981 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 5982 SDValue Ops[] = { 5983 Op.getOperand(0), // Chain 5984 Op.getOperand(2), // vdata 5985 Op.getOperand(3), // rsrc 5986 DAG.getConstant(0, DL, MVT::i32), // vindex 5987 Offsets.first, // voffset 5988 Op.getOperand(5), // soffset 5989 Offsets.second, // offset 5990 Op.getOperand(6), // cachepolicy 5991 DAG.getConstant(0, DL, MVT::i1), // idxen 5992 }; 5993 EVT VT = Op.getValueType(); 5994 5995 auto *M = cast<MemSDNode>(Op); 5996 unsigned Opcode = 0; 5997 5998 switch (IntrID) { 5999 case Intrinsic::amdgcn_raw_buffer_atomic_swap: 6000 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; 6001 break; 6002 case Intrinsic::amdgcn_raw_buffer_atomic_add: 6003 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; 6004 break; 6005 case Intrinsic::amdgcn_raw_buffer_atomic_sub: 6006 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; 6007 break; 6008 case Intrinsic::amdgcn_raw_buffer_atomic_smin: 6009 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; 6010 break; 6011 case Intrinsic::amdgcn_raw_buffer_atomic_umin: 6012 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; 6013 break; 6014 case Intrinsic::amdgcn_raw_buffer_atomic_smax: 6015 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; 6016 break; 6017 case Intrinsic::amdgcn_raw_buffer_atomic_umax: 6018 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; 6019 break; 6020 case Intrinsic::amdgcn_raw_buffer_atomic_and: 6021 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; 6022 break; 6023 case Intrinsic::amdgcn_raw_buffer_atomic_or: 6024 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; 6025 break; 6026 case Intrinsic::amdgcn_raw_buffer_atomic_xor: 6027 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; 6028 break; 6029 default: 6030 llvm_unreachable("unhandled atomic opcode"); 6031 } 6032 6033 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, 6034 M->getMemOperand()); 6035 } 6036 case Intrinsic::amdgcn_struct_buffer_atomic_swap: 6037 case Intrinsic::amdgcn_struct_buffer_atomic_add: 6038 case Intrinsic::amdgcn_struct_buffer_atomic_sub: 6039 case Intrinsic::amdgcn_struct_buffer_atomic_smin: 6040 case Intrinsic::amdgcn_struct_buffer_atomic_umin: 6041 case Intrinsic::amdgcn_struct_buffer_atomic_smax: 6042 case Intrinsic::amdgcn_struct_buffer_atomic_umax: 6043 case Intrinsic::amdgcn_struct_buffer_atomic_and: 6044 case Intrinsic::amdgcn_struct_buffer_atomic_or: 6045 case Intrinsic::amdgcn_struct_buffer_atomic_xor: { 6046 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); 6047 SDValue Ops[] = { 6048 Op.getOperand(0), // Chain 6049 Op.getOperand(2), // vdata 6050 Op.getOperand(3), // rsrc 6051 Op.getOperand(4), // vindex 6052 Offsets.first, // voffset 6053 Op.getOperand(6), // soffset 6054 Offsets.second, // offset 6055 Op.getOperand(7), // cachepolicy 6056 DAG.getConstant(1, DL, MVT::i1), // idxen 6057 }; 6058 EVT VT = Op.getValueType(); 6059 6060 auto *M = cast<MemSDNode>(Op); 6061 unsigned Opcode = 0; 6062 6063 switch (IntrID) { 6064 case Intrinsic::amdgcn_struct_buffer_atomic_swap: 6065 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; 6066 break; 6067 case Intrinsic::amdgcn_struct_buffer_atomic_add: 6068 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; 6069 break; 6070 case Intrinsic::amdgcn_struct_buffer_atomic_sub: 6071 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; 6072 break; 6073 case Intrinsic::amdgcn_struct_buffer_atomic_smin: 6074 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; 6075 break; 6076 case Intrinsic::amdgcn_struct_buffer_atomic_umin: 6077 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; 6078 break; 6079 case Intrinsic::amdgcn_struct_buffer_atomic_smax: 6080 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; 6081 break; 6082 case Intrinsic::amdgcn_struct_buffer_atomic_umax: 6083 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; 6084 break; 6085 case Intrinsic::amdgcn_struct_buffer_atomic_and: 6086 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; 6087 break; 6088 case Intrinsic::amdgcn_struct_buffer_atomic_or: 6089 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; 6090 break; 6091 case Intrinsic::amdgcn_struct_buffer_atomic_xor: 6092 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; 6093 break; 6094 default: 6095 llvm_unreachable("unhandled atomic opcode"); 6096 } 6097 6098 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, 6099 M->getMemOperand()); 6100 } 6101 case Intrinsic::amdgcn_buffer_atomic_cmpswap: { 6102 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); 6103 unsigned IdxEn = 1; 6104 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5))) 6105 IdxEn = Idx->getZExtValue() != 0; 6106 SDValue Ops[] = { 6107 Op.getOperand(0), // Chain 6108 Op.getOperand(2), // src 6109 Op.getOperand(3), // cmp 6110 Op.getOperand(4), // rsrc 6111 Op.getOperand(5), // vindex 6112 SDValue(), // voffset -- will be set by setBufferOffsets 6113 SDValue(), // soffset -- will be set by setBufferOffsets 6114 SDValue(), // offset -- will be set by setBufferOffsets 6115 DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy 6116 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen 6117 }; 6118 setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]); 6119 EVT VT = Op.getValueType(); 6120 auto *M = cast<MemSDNode>(Op); 6121 6122 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, 6123 Op->getVTList(), Ops, VT, M->getMemOperand()); 6124 } 6125 case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: { 6126 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); 6127 SDValue Ops[] = { 6128 Op.getOperand(0), // Chain 6129 Op.getOperand(2), // src 6130 Op.getOperand(3), // cmp 6131 Op.getOperand(4), // rsrc 6132 DAG.getConstant(0, DL, MVT::i32), // vindex 6133 Offsets.first, // voffset 6134 Op.getOperand(6), // soffset 6135 Offsets.second, // offset 6136 Op.getOperand(7), // cachepolicy 6137 DAG.getConstant(0, DL, MVT::i1), // idxen 6138 }; 6139 EVT VT = Op.getValueType(); 6140 auto *M = cast<MemSDNode>(Op); 6141 6142 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, 6143 Op->getVTList(), Ops, VT, M->getMemOperand()); 6144 } 6145 case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: { 6146 auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG); 6147 SDValue Ops[] = { 6148 Op.getOperand(0), // Chain 6149 Op.getOperand(2), // src 6150 Op.getOperand(3), // cmp 6151 Op.getOperand(4), // rsrc 6152 Op.getOperand(5), // vindex 6153 Offsets.first, // voffset 6154 Op.getOperand(7), // soffset 6155 Offsets.second, // offset 6156 Op.getOperand(8), // cachepolicy 6157 DAG.getConstant(1, DL, MVT::i1), // idxen 6158 }; 6159 EVT VT = Op.getValueType(); 6160 auto *M = cast<MemSDNode>(Op); 6161 6162 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, 6163 Op->getVTList(), Ops, VT, M->getMemOperand()); 6164 } 6165 6166 default: 6167 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = 6168 AMDGPU::getImageDimIntrinsicInfo(IntrID)) 6169 return lowerImage(Op, ImageDimIntr, DAG); 6170 6171 return SDValue(); 6172 } 6173 } 6174 6175 // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to 6176 // dwordx4 if on SI. 6177 SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL, 6178 SDVTList VTList, 6179 ArrayRef<SDValue> Ops, EVT MemVT, 6180 MachineMemOperand *MMO, 6181 SelectionDAG &DAG) const { 6182 EVT VT = VTList.VTs[0]; 6183 EVT WidenedVT = VT; 6184 EVT WidenedMemVT = MemVT; 6185 if (!Subtarget->hasDwordx3LoadStores() && 6186 (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) { 6187 WidenedVT = EVT::getVectorVT(*DAG.getContext(), 6188 WidenedVT.getVectorElementType(), 4); 6189 WidenedMemVT = EVT::getVectorVT(*DAG.getContext(), 6190 WidenedMemVT.getVectorElementType(), 4); 6191 MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16); 6192 } 6193 6194 assert(VTList.NumVTs == 2); 6195 SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]); 6196 6197 auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops, 6198 WidenedMemVT, MMO); 6199 if (WidenedVT != VT) { 6200 auto Extract = DAG.getNode( 6201 ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp, 6202 DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout()))); 6203 NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL); 6204 } 6205 return NewOp; 6206 } 6207 6208 SDValue SITargetLowering::handleD16VData(SDValue VData, 6209 SelectionDAG &DAG) const { 6210 EVT StoreVT = VData.getValueType(); 6211 6212 // No change for f16 and legal vector D16 types. 6213 if (!StoreVT.isVector()) 6214 return VData; 6215 6216 SDLoc DL(VData); 6217 assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16"); 6218 6219 if (Subtarget->hasUnpackedD16VMem()) { 6220 // We need to unpack the packed data to store. 6221 EVT IntStoreVT = StoreVT.changeTypeToInteger(); 6222 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); 6223 6224 EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, 6225 StoreVT.getVectorNumElements()); 6226 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData); 6227 return DAG.UnrollVectorOp(ZExt.getNode()); 6228 } 6229 6230 assert(isTypeLegal(StoreVT)); 6231 return VData; 6232 } 6233 6234 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, 6235 SelectionDAG &DAG) const { 6236 SDLoc DL(Op); 6237 SDValue Chain = Op.getOperand(0); 6238 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6239 MachineFunction &MF = DAG.getMachineFunction(); 6240 6241 switch (IntrinsicID) { 6242 case Intrinsic::amdgcn_exp: { 6243 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 6244 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 6245 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8)); 6246 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9)); 6247 6248 const SDValue Ops[] = { 6249 Chain, 6250 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 6251 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 6252 Op.getOperand(4), // src0 6253 Op.getOperand(5), // src1 6254 Op.getOperand(6), // src2 6255 Op.getOperand(7), // src3 6256 DAG.getTargetConstant(0, DL, MVT::i1), // compr 6257 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 6258 }; 6259 6260 unsigned Opc = Done->isNullValue() ? 6261 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 6262 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 6263 } 6264 case Intrinsic::amdgcn_exp_compr: { 6265 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 6266 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 6267 SDValue Src0 = Op.getOperand(4); 6268 SDValue Src1 = Op.getOperand(5); 6269 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); 6270 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7)); 6271 6272 SDValue Undef = DAG.getUNDEF(MVT::f32); 6273 const SDValue Ops[] = { 6274 Chain, 6275 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 6276 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 6277 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), 6278 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), 6279 Undef, // src2 6280 Undef, // src3 6281 DAG.getTargetConstant(1, DL, MVT::i1), // compr 6282 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 6283 }; 6284 6285 unsigned Opc = Done->isNullValue() ? 6286 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 6287 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 6288 } 6289 case Intrinsic::amdgcn_s_sendmsg: 6290 case Intrinsic::amdgcn_s_sendmsghalt: { 6291 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ? 6292 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT; 6293 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 6294 SDValue Glue = Chain.getValue(1); 6295 return DAG.getNode(NodeOp, DL, MVT::Other, Chain, 6296 Op.getOperand(2), Glue); 6297 } 6298 case Intrinsic::amdgcn_init_exec: { 6299 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain, 6300 Op.getOperand(2)); 6301 } 6302 case Intrinsic::amdgcn_init_exec_from_input: { 6303 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain, 6304 Op.getOperand(2), Op.getOperand(3)); 6305 } 6306 case Intrinsic::amdgcn_s_barrier: { 6307 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) { 6308 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 6309 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second; 6310 if (WGSize <= ST.getWavefrontSize()) 6311 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other, 6312 Op.getOperand(0)), 0); 6313 } 6314 return SDValue(); 6315 }; 6316 case Intrinsic::amdgcn_tbuffer_store: { 6317 SDValue VData = Op.getOperand(2); 6318 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 6319 if (IsD16) 6320 VData = handleD16VData(VData, DAG); 6321 unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); 6322 unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); 6323 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); 6324 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue(); 6325 unsigned IdxEn = 1; 6326 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4))) 6327 IdxEn = Idx->getZExtValue() != 0; 6328 SDValue Ops[] = { 6329 Chain, 6330 VData, // vdata 6331 Op.getOperand(3), // rsrc 6332 Op.getOperand(4), // vindex 6333 Op.getOperand(5), // voffset 6334 Op.getOperand(6), // soffset 6335 Op.getOperand(7), // offset 6336 DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format 6337 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy 6338 DAG.getConstant(IdxEn, DL, MVT::i1), // idexen 6339 }; 6340 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : 6341 AMDGPUISD::TBUFFER_STORE_FORMAT; 6342 MemSDNode *M = cast<MemSDNode>(Op); 6343 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 6344 M->getMemoryVT(), M->getMemOperand()); 6345 } 6346 6347 case Intrinsic::amdgcn_struct_tbuffer_store: { 6348 SDValue VData = Op.getOperand(2); 6349 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 6350 if (IsD16) 6351 VData = handleD16VData(VData, DAG); 6352 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); 6353 SDValue Ops[] = { 6354 Chain, 6355 VData, // vdata 6356 Op.getOperand(3), // rsrc 6357 Op.getOperand(4), // vindex 6358 Offsets.first, // voffset 6359 Op.getOperand(6), // soffset 6360 Offsets.second, // offset 6361 Op.getOperand(7), // format 6362 Op.getOperand(8), // cachepolicy 6363 DAG.getConstant(1, DL, MVT::i1), // idexen 6364 }; 6365 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : 6366 AMDGPUISD::TBUFFER_STORE_FORMAT; 6367 MemSDNode *M = cast<MemSDNode>(Op); 6368 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 6369 M->getMemoryVT(), M->getMemOperand()); 6370 } 6371 6372 case Intrinsic::amdgcn_raw_tbuffer_store: { 6373 SDValue VData = Op.getOperand(2); 6374 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 6375 if (IsD16) 6376 VData = handleD16VData(VData, DAG); 6377 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 6378 SDValue Ops[] = { 6379 Chain, 6380 VData, // vdata 6381 Op.getOperand(3), // rsrc 6382 DAG.getConstant(0, DL, MVT::i32), // vindex 6383 Offsets.first, // voffset 6384 Op.getOperand(5), // soffset 6385 Offsets.second, // offset 6386 Op.getOperand(6), // format 6387 Op.getOperand(7), // cachepolicy 6388 DAG.getConstant(0, DL, MVT::i1), // idexen 6389 }; 6390 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : 6391 AMDGPUISD::TBUFFER_STORE_FORMAT; 6392 MemSDNode *M = cast<MemSDNode>(Op); 6393 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 6394 M->getMemoryVT(), M->getMemOperand()); 6395 } 6396 6397 case Intrinsic::amdgcn_buffer_store: 6398 case Intrinsic::amdgcn_buffer_store_format: { 6399 SDValue VData = Op.getOperand(2); 6400 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 6401 if (IsD16) 6402 VData = handleD16VData(VData, DAG); 6403 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); 6404 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); 6405 unsigned IdxEn = 1; 6406 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4))) 6407 IdxEn = Idx->getZExtValue() != 0; 6408 SDValue Ops[] = { 6409 Chain, 6410 VData, 6411 Op.getOperand(3), // rsrc 6412 Op.getOperand(4), // vindex 6413 SDValue(), // voffset -- will be set by setBufferOffsets 6414 SDValue(), // soffset -- will be set by setBufferOffsets 6415 SDValue(), // offset -- will be set by setBufferOffsets 6416 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy 6417 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen 6418 }; 6419 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); 6420 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ? 6421 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; 6422 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; 6423 MemSDNode *M = cast<MemSDNode>(Op); 6424 6425 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics 6426 EVT VDataType = VData.getValueType().getScalarType(); 6427 if (VDataType == MVT::i8 || VDataType == MVT::i16) 6428 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); 6429 6430 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 6431 M->getMemoryVT(), M->getMemOperand()); 6432 } 6433 6434 case Intrinsic::amdgcn_raw_buffer_store: 6435 case Intrinsic::amdgcn_raw_buffer_store_format: { 6436 SDValue VData = Op.getOperand(2); 6437 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 6438 if (IsD16) 6439 VData = handleD16VData(VData, DAG); 6440 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 6441 SDValue Ops[] = { 6442 Chain, 6443 VData, 6444 Op.getOperand(3), // rsrc 6445 DAG.getConstant(0, DL, MVT::i32), // vindex 6446 Offsets.first, // voffset 6447 Op.getOperand(5), // soffset 6448 Offsets.second, // offset 6449 Op.getOperand(6), // cachepolicy 6450 DAG.getConstant(0, DL, MVT::i1), // idxen 6451 }; 6452 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_raw_buffer_store ? 6453 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; 6454 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; 6455 MemSDNode *M = cast<MemSDNode>(Op); 6456 6457 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics 6458 EVT VDataType = VData.getValueType().getScalarType(); 6459 if (VDataType == MVT::i8 || VDataType == MVT::i16) 6460 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); 6461 6462 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 6463 M->getMemoryVT(), M->getMemOperand()); 6464 } 6465 6466 case Intrinsic::amdgcn_struct_buffer_store: 6467 case Intrinsic::amdgcn_struct_buffer_store_format: { 6468 SDValue VData = Op.getOperand(2); 6469 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 6470 if (IsD16) 6471 VData = handleD16VData(VData, DAG); 6472 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); 6473 SDValue Ops[] = { 6474 Chain, 6475 VData, 6476 Op.getOperand(3), // rsrc 6477 Op.getOperand(4), // vindex 6478 Offsets.first, // voffset 6479 Op.getOperand(6), // soffset 6480 Offsets.second, // offset 6481 Op.getOperand(7), // cachepolicy 6482 DAG.getConstant(1, DL, MVT::i1), // idxen 6483 }; 6484 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ? 6485 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; 6486 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; 6487 MemSDNode *M = cast<MemSDNode>(Op); 6488 6489 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics 6490 EVT VDataType = VData.getValueType().getScalarType(); 6491 if (VDataType == MVT::i8 || VDataType == MVT::i16) 6492 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); 6493 6494 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 6495 M->getMemoryVT(), M->getMemOperand()); 6496 } 6497 6498 default: { 6499 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = 6500 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) 6501 return lowerImage(Op, ImageDimIntr, DAG); 6502 6503 return Op; 6504 } 6505 } 6506 } 6507 6508 // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args: 6509 // offset (the offset that is included in bounds checking and swizzling, to be 6510 // split between the instruction's voffset and immoffset fields) and soffset 6511 // (the offset that is excluded from bounds checking and swizzling, to go in 6512 // the instruction's soffset field). This function takes the first kind of 6513 // offset and figures out how to split it between voffset and immoffset. 6514 std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets( 6515 SDValue Offset, SelectionDAG &DAG) const { 6516 SDLoc DL(Offset); 6517 const unsigned MaxImm = 4095; 6518 SDValue N0 = Offset; 6519 ConstantSDNode *C1 = nullptr; 6520 6521 if ((C1 = dyn_cast<ConstantSDNode>(N0))) 6522 N0 = SDValue(); 6523 else if (DAG.isBaseWithConstantOffset(N0)) { 6524 C1 = cast<ConstantSDNode>(N0.getOperand(1)); 6525 N0 = N0.getOperand(0); 6526 } 6527 6528 if (C1) { 6529 unsigned ImmOffset = C1->getZExtValue(); 6530 // If the immediate value is too big for the immoffset field, put the value 6531 // and -4096 into the immoffset field so that the value that is copied/added 6532 // for the voffset field is a multiple of 4096, and it stands more chance 6533 // of being CSEd with the copy/add for another similar load/store. 6534 // However, do not do that rounding down to a multiple of 4096 if that is a 6535 // negative number, as it appears to be illegal to have a negative offset 6536 // in the vgpr, even if adding the immediate offset makes it positive. 6537 unsigned Overflow = ImmOffset & ~MaxImm; 6538 ImmOffset -= Overflow; 6539 if ((int32_t)Overflow < 0) { 6540 Overflow += ImmOffset; 6541 ImmOffset = 0; 6542 } 6543 C1 = cast<ConstantSDNode>(DAG.getConstant(ImmOffset, DL, MVT::i32)); 6544 if (Overflow) { 6545 auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32); 6546 if (!N0) 6547 N0 = OverflowVal; 6548 else { 6549 SDValue Ops[] = { N0, OverflowVal }; 6550 N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops); 6551 } 6552 } 6553 } 6554 if (!N0) 6555 N0 = DAG.getConstant(0, DL, MVT::i32); 6556 if (!C1) 6557 C1 = cast<ConstantSDNode>(DAG.getConstant(0, DL, MVT::i32)); 6558 return {N0, SDValue(C1, 0)}; 6559 } 6560 6561 // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the 6562 // three offsets (voffset, soffset and instoffset) into the SDValue[3] array 6563 // pointed to by Offsets. 6564 void SITargetLowering::setBufferOffsets(SDValue CombinedOffset, 6565 SelectionDAG &DAG, SDValue *Offsets, 6566 unsigned Align) const { 6567 SDLoc DL(CombinedOffset); 6568 if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) { 6569 uint32_t Imm = C->getZExtValue(); 6570 uint32_t SOffset, ImmOffset; 6571 if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, Align)) { 6572 Offsets[0] = DAG.getConstant(0, DL, MVT::i32); 6573 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); 6574 Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32); 6575 return; 6576 } 6577 } 6578 if (DAG.isBaseWithConstantOffset(CombinedOffset)) { 6579 SDValue N0 = CombinedOffset.getOperand(0); 6580 SDValue N1 = CombinedOffset.getOperand(1); 6581 uint32_t SOffset, ImmOffset; 6582 int Offset = cast<ConstantSDNode>(N1)->getSExtValue(); 6583 if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset, 6584 Subtarget, Align)) { 6585 Offsets[0] = N0; 6586 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); 6587 Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32); 6588 return; 6589 } 6590 } 6591 Offsets[0] = CombinedOffset; 6592 Offsets[1] = DAG.getConstant(0, DL, MVT::i32); 6593 Offsets[2] = DAG.getConstant(0, DL, MVT::i32); 6594 } 6595 6596 // Handle 8 bit and 16 bit buffer loads 6597 SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG, 6598 EVT LoadVT, SDLoc DL, 6599 ArrayRef<SDValue> Ops, 6600 MemSDNode *M) const { 6601 EVT IntVT = LoadVT.changeTypeToInteger(); 6602 unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ? 6603 AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT; 6604 6605 SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other); 6606 SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList, 6607 Ops, IntVT, 6608 M->getMemOperand()); 6609 SDValue BufferLoadTrunc = DAG.getNode(ISD::TRUNCATE, DL, 6610 LoadVT.getScalarType(), BufferLoad); 6611 return DAG.getMergeValues({BufferLoadTrunc, BufferLoad.getValue(1)}, DL); 6612 } 6613 6614 // Handle 8 bit and 16 bit buffer stores 6615 SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG, 6616 EVT VDataType, SDLoc DL, 6617 SDValue Ops[], 6618 MemSDNode *M) const { 6619 SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]); 6620 Ops[1] = BufferStoreExt; 6621 unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE : 6622 AMDGPUISD::BUFFER_STORE_SHORT; 6623 ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9); 6624 return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType, 6625 M->getMemOperand()); 6626 } 6627 6628 static SDValue getLoadExtOrTrunc(SelectionDAG &DAG, 6629 ISD::LoadExtType ExtType, SDValue Op, 6630 const SDLoc &SL, EVT VT) { 6631 if (VT.bitsLT(Op.getValueType())) 6632 return DAG.getNode(ISD::TRUNCATE, SL, VT, Op); 6633 6634 switch (ExtType) { 6635 case ISD::SEXTLOAD: 6636 return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op); 6637 case ISD::ZEXTLOAD: 6638 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op); 6639 case ISD::EXTLOAD: 6640 return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op); 6641 case ISD::NON_EXTLOAD: 6642 return Op; 6643 } 6644 6645 llvm_unreachable("invalid ext type"); 6646 } 6647 6648 SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const { 6649 SelectionDAG &DAG = DCI.DAG; 6650 if (Ld->getAlignment() < 4 || Ld->isDivergent()) 6651 return SDValue(); 6652 6653 // FIXME: Constant loads should all be marked invariant. 6654 unsigned AS = Ld->getAddressSpace(); 6655 if (AS != AMDGPUAS::CONSTANT_ADDRESS && 6656 AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT && 6657 (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant())) 6658 return SDValue(); 6659 6660 // Don't do this early, since it may interfere with adjacent load merging for 6661 // illegal types. We can avoid losing alignment information for exotic types 6662 // pre-legalize. 6663 EVT MemVT = Ld->getMemoryVT(); 6664 if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) || 6665 MemVT.getSizeInBits() >= 32) 6666 return SDValue(); 6667 6668 SDLoc SL(Ld); 6669 6670 assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) && 6671 "unexpected vector extload"); 6672 6673 // TODO: Drop only high part of range. 6674 SDValue Ptr = Ld->getBasePtr(); 6675 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, 6676 MVT::i32, SL, Ld->getChain(), Ptr, 6677 Ld->getOffset(), 6678 Ld->getPointerInfo(), MVT::i32, 6679 Ld->getAlignment(), 6680 Ld->getMemOperand()->getFlags(), 6681 Ld->getAAInfo(), 6682 nullptr); // Drop ranges 6683 6684 EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()); 6685 if (MemVT.isFloatingPoint()) { 6686 assert(Ld->getExtensionType() == ISD::NON_EXTLOAD && 6687 "unexpected fp extload"); 6688 TruncVT = MemVT.changeTypeToInteger(); 6689 } 6690 6691 SDValue Cvt = NewLoad; 6692 if (Ld->getExtensionType() == ISD::SEXTLOAD) { 6693 Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad, 6694 DAG.getValueType(TruncVT)); 6695 } else if (Ld->getExtensionType() == ISD::ZEXTLOAD || 6696 Ld->getExtensionType() == ISD::NON_EXTLOAD) { 6697 Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT); 6698 } else { 6699 assert(Ld->getExtensionType() == ISD::EXTLOAD); 6700 } 6701 6702 EVT VT = Ld->getValueType(0); 6703 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 6704 6705 DCI.AddToWorklist(Cvt.getNode()); 6706 6707 // We may need to handle exotic cases, such as i16->i64 extloads, so insert 6708 // the appropriate extension from the 32-bit load. 6709 Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT); 6710 DCI.AddToWorklist(Cvt.getNode()); 6711 6712 // Handle conversion back to floating point if necessary. 6713 Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt); 6714 6715 return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL); 6716 } 6717 6718 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 6719 SDLoc DL(Op); 6720 LoadSDNode *Load = cast<LoadSDNode>(Op); 6721 ISD::LoadExtType ExtType = Load->getExtensionType(); 6722 EVT MemVT = Load->getMemoryVT(); 6723 6724 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { 6725 if (MemVT == MVT::i16 && isTypeLegal(MVT::i16)) 6726 return SDValue(); 6727 6728 // FIXME: Copied from PPC 6729 // First, load into 32 bits, then truncate to 1 bit. 6730 6731 SDValue Chain = Load->getChain(); 6732 SDValue BasePtr = Load->getBasePtr(); 6733 MachineMemOperand *MMO = Load->getMemOperand(); 6734 6735 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; 6736 6737 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 6738 BasePtr, RealMemVT, MMO); 6739 6740 if (!MemVT.isVector()) { 6741 SDValue Ops[] = { 6742 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), 6743 NewLD.getValue(1) 6744 }; 6745 6746 return DAG.getMergeValues(Ops, DL); 6747 } 6748 6749 SmallVector<SDValue, 3> Elts; 6750 for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) { 6751 SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD, 6752 DAG.getConstant(I, DL, MVT::i32)); 6753 6754 Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt)); 6755 } 6756 6757 SDValue Ops[] = { 6758 DAG.getBuildVector(MemVT, DL, Elts), 6759 NewLD.getValue(1) 6760 }; 6761 6762 return DAG.getMergeValues(Ops, DL); 6763 } 6764 6765 if (!MemVT.isVector()) 6766 return SDValue(); 6767 6768 assert(Op.getValueType().getVectorElementType() == MVT::i32 && 6769 "Custom lowering for non-i32 vectors hasn't been implemented."); 6770 6771 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, 6772 *Load->getMemOperand())) { 6773 SDValue Ops[2]; 6774 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); 6775 return DAG.getMergeValues(Ops, DL); 6776 } 6777 6778 unsigned Alignment = Load->getAlignment(); 6779 unsigned AS = Load->getAddressSpace(); 6780 if (Subtarget->hasLDSMisalignedBug() && 6781 AS == AMDGPUAS::FLAT_ADDRESS && 6782 Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) { 6783 return SplitVectorLoad(Op, DAG); 6784 } 6785 6786 MachineFunction &MF = DAG.getMachineFunction(); 6787 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 6788 // If there is a possibilty that flat instruction access scratch memory 6789 // then we need to use the same legalization rules we use for private. 6790 if (AS == AMDGPUAS::FLAT_ADDRESS) 6791 AS = MFI->hasFlatScratchInit() ? 6792 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; 6793 6794 unsigned NumElements = MemVT.getVectorNumElements(); 6795 6796 if (AS == AMDGPUAS::CONSTANT_ADDRESS || 6797 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) { 6798 if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) { 6799 if (MemVT.isPow2VectorType()) 6800 return SDValue(); 6801 if (NumElements == 3) 6802 return WidenVectorLoad(Op, DAG); 6803 return SplitVectorLoad(Op, DAG); 6804 } 6805 // Non-uniform loads will be selected to MUBUF instructions, so they 6806 // have the same legalization requirements as global and private 6807 // loads. 6808 // 6809 } 6810 6811 if (AS == AMDGPUAS::CONSTANT_ADDRESS || 6812 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || 6813 AS == AMDGPUAS::GLOBAL_ADDRESS) { 6814 if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() && 6815 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) && 6816 Alignment >= 4 && NumElements < 32) { 6817 if (MemVT.isPow2VectorType()) 6818 return SDValue(); 6819 if (NumElements == 3) 6820 return WidenVectorLoad(Op, DAG); 6821 return SplitVectorLoad(Op, DAG); 6822 } 6823 // Non-uniform loads will be selected to MUBUF instructions, so they 6824 // have the same legalization requirements as global and private 6825 // loads. 6826 // 6827 } 6828 if (AS == AMDGPUAS::CONSTANT_ADDRESS || 6829 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || 6830 AS == AMDGPUAS::GLOBAL_ADDRESS || 6831 AS == AMDGPUAS::FLAT_ADDRESS) { 6832 if (NumElements > 4) 6833 return SplitVectorLoad(Op, DAG); 6834 // v3 loads not supported on SI. 6835 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) 6836 return WidenVectorLoad(Op, DAG); 6837 // v3 and v4 loads are supported for private and global memory. 6838 return SDValue(); 6839 } 6840 if (AS == AMDGPUAS::PRIVATE_ADDRESS) { 6841 // Depending on the setting of the private_element_size field in the 6842 // resource descriptor, we can only make private accesses up to a certain 6843 // size. 6844 switch (Subtarget->getMaxPrivateElementSize()) { 6845 case 4: 6846 return scalarizeVectorLoad(Load, DAG); 6847 case 8: 6848 if (NumElements > 2) 6849 return SplitVectorLoad(Op, DAG); 6850 return SDValue(); 6851 case 16: 6852 // Same as global/flat 6853 if (NumElements > 4) 6854 return SplitVectorLoad(Op, DAG); 6855 // v3 loads not supported on SI. 6856 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) 6857 return WidenVectorLoad(Op, DAG); 6858 return SDValue(); 6859 default: 6860 llvm_unreachable("unsupported private_element_size"); 6861 } 6862 } else if (AS == AMDGPUAS::LOCAL_ADDRESS) { 6863 // Use ds_read_b128 if possible. 6864 if (Subtarget->useDS128() && Load->getAlignment() >= 16 && 6865 MemVT.getStoreSize() == 16) 6866 return SDValue(); 6867 6868 if (NumElements > 2) 6869 return SplitVectorLoad(Op, DAG); 6870 6871 // SI has a hardware bug in the LDS / GDS boounds checking: if the base 6872 // address is negative, then the instruction is incorrectly treated as 6873 // out-of-bounds even if base + offsets is in bounds. Split vectorized 6874 // loads here to avoid emitting ds_read2_b32. We may re-combine the 6875 // load later in the SILoadStoreOptimizer. 6876 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && 6877 NumElements == 2 && MemVT.getStoreSize() == 8 && 6878 Load->getAlignment() < 8) { 6879 return SplitVectorLoad(Op, DAG); 6880 } 6881 } 6882 return SDValue(); 6883 } 6884 6885 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 6886 EVT VT = Op.getValueType(); 6887 assert(VT.getSizeInBits() == 64); 6888 6889 SDLoc DL(Op); 6890 SDValue Cond = Op.getOperand(0); 6891 6892 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 6893 SDValue One = DAG.getConstant(1, DL, MVT::i32); 6894 6895 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); 6896 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); 6897 6898 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); 6899 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); 6900 6901 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); 6902 6903 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); 6904 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); 6905 6906 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); 6907 6908 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); 6909 return DAG.getNode(ISD::BITCAST, DL, VT, Res); 6910 } 6911 6912 // Catch division cases where we can use shortcuts with rcp and rsq 6913 // instructions. 6914 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, 6915 SelectionDAG &DAG) const { 6916 SDLoc SL(Op); 6917 SDValue LHS = Op.getOperand(0); 6918 SDValue RHS = Op.getOperand(1); 6919 EVT VT = Op.getValueType(); 6920 const SDNodeFlags Flags = Op->getFlags(); 6921 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || Flags.hasAllowReciprocal(); 6922 6923 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals()) 6924 return SDValue(); 6925 6926 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { 6927 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) { 6928 if (CLHS->isExactlyValue(1.0)) { 6929 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 6930 // the CI documentation has a worst case error of 1 ulp. 6931 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 6932 // use it as long as we aren't trying to use denormals. 6933 // 6934 // v_rcp_f16 and v_rsq_f16 DO support denormals. 6935 6936 // 1.0 / sqrt(x) -> rsq(x) 6937 6938 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP 6939 // error seems really high at 2^29 ULP. 6940 if (RHS.getOpcode() == ISD::FSQRT) 6941 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); 6942 6943 // 1.0 / x -> rcp(x) 6944 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 6945 } 6946 6947 // Same as for 1.0, but expand the sign out of the constant. 6948 if (CLHS->isExactlyValue(-1.0)) { 6949 // -1.0 / x -> rcp (fneg x) 6950 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 6951 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); 6952 } 6953 } 6954 } 6955 6956 if (Unsafe) { 6957 // Turn into multiply by the reciprocal. 6958 // x / y -> x * (1.0 / y) 6959 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 6960 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags); 6961 } 6962 6963 return SDValue(); 6964 } 6965 6966 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 6967 EVT VT, SDValue A, SDValue B, SDValue GlueChain) { 6968 if (GlueChain->getNumValues() <= 1) { 6969 return DAG.getNode(Opcode, SL, VT, A, B); 6970 } 6971 6972 assert(GlueChain->getNumValues() == 3); 6973 6974 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 6975 switch (Opcode) { 6976 default: llvm_unreachable("no chain equivalent for opcode"); 6977 case ISD::FMUL: 6978 Opcode = AMDGPUISD::FMUL_W_CHAIN; 6979 break; 6980 } 6981 6982 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, 6983 GlueChain.getValue(2)); 6984 } 6985 6986 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 6987 EVT VT, SDValue A, SDValue B, SDValue C, 6988 SDValue GlueChain) { 6989 if (GlueChain->getNumValues() <= 1) { 6990 return DAG.getNode(Opcode, SL, VT, A, B, C); 6991 } 6992 6993 assert(GlueChain->getNumValues() == 3); 6994 6995 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 6996 switch (Opcode) { 6997 default: llvm_unreachable("no chain equivalent for opcode"); 6998 case ISD::FMA: 6999 Opcode = AMDGPUISD::FMA_W_CHAIN; 7000 break; 7001 } 7002 7003 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C, 7004 GlueChain.getValue(2)); 7005 } 7006 7007 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { 7008 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 7009 return FastLowered; 7010 7011 SDLoc SL(Op); 7012 SDValue Src0 = Op.getOperand(0); 7013 SDValue Src1 = Op.getOperand(1); 7014 7015 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); 7016 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); 7017 7018 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); 7019 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); 7020 7021 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); 7022 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); 7023 7024 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); 7025 } 7026 7027 // Faster 2.5 ULP division that does not support denormals. 7028 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { 7029 SDLoc SL(Op); 7030 SDValue LHS = Op.getOperand(1); 7031 SDValue RHS = Op.getOperand(2); 7032 7033 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); 7034 7035 const APFloat K0Val(BitsToFloat(0x6f800000)); 7036 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); 7037 7038 const APFloat K1Val(BitsToFloat(0x2f800000)); 7039 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); 7040 7041 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 7042 7043 EVT SetCCVT = 7044 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 7045 7046 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); 7047 7048 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); 7049 7050 // TODO: Should this propagate fast-math-flags? 7051 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); 7052 7053 // rcp does not support denormals. 7054 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); 7055 7056 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); 7057 7058 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); 7059 } 7060 7061 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { 7062 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 7063 return FastLowered; 7064 7065 SDLoc SL(Op); 7066 SDValue LHS = Op.getOperand(0); 7067 SDValue RHS = Op.getOperand(1); 7068 7069 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 7070 7071 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); 7072 7073 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 7074 RHS, RHS, LHS); 7075 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 7076 LHS, RHS, LHS); 7077 7078 // Denominator is scaled to not be denormal, so using rcp is ok. 7079 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, 7080 DenominatorScaled); 7081 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, 7082 DenominatorScaled); 7083 7084 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | 7085 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | 7086 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); 7087 7088 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16); 7089 7090 if (!Subtarget->hasFP32Denormals()) { 7091 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); 7092 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE, 7093 SL, MVT::i32); 7094 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs, 7095 DAG.getEntryNode(), 7096 EnableDenormValue, BitField); 7097 SDValue Ops[3] = { 7098 NegDivScale0, 7099 EnableDenorm.getValue(0), 7100 EnableDenorm.getValue(1) 7101 }; 7102 7103 NegDivScale0 = DAG.getMergeValues(Ops, SL); 7104 } 7105 7106 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, 7107 ApproxRcp, One, NegDivScale0); 7108 7109 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, 7110 ApproxRcp, Fma0); 7111 7112 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, 7113 Fma1, Fma1); 7114 7115 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, 7116 NumeratorScaled, Mul); 7117 7118 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2); 7119 7120 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, 7121 NumeratorScaled, Fma3); 7122 7123 if (!Subtarget->hasFP32Denormals()) { 7124 const SDValue DisableDenormValue = 7125 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32); 7126 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other, 7127 Fma4.getValue(1), 7128 DisableDenormValue, 7129 BitField, 7130 Fma4.getValue(2)); 7131 7132 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 7133 DisableDenorm, DAG.getRoot()); 7134 DAG.setRoot(OutputChain); 7135 } 7136 7137 SDValue Scale = NumeratorScaled.getValue(1); 7138 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, 7139 Fma4, Fma1, Fma3, Scale); 7140 7141 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS); 7142 } 7143 7144 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { 7145 if (DAG.getTarget().Options.UnsafeFPMath) 7146 return lowerFastUnsafeFDIV(Op, DAG); 7147 7148 SDLoc SL(Op); 7149 SDValue X = Op.getOperand(0); 7150 SDValue Y = Op.getOperand(1); 7151 7152 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 7153 7154 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); 7155 7156 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); 7157 7158 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); 7159 7160 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); 7161 7162 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); 7163 7164 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); 7165 7166 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); 7167 7168 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); 7169 7170 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); 7171 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); 7172 7173 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, 7174 NegDivScale0, Mul, DivScale1); 7175 7176 SDValue Scale; 7177 7178 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { 7179 // Workaround a hardware bug on SI where the condition output from div_scale 7180 // is not usable. 7181 7182 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); 7183 7184 // Figure out if the scale to use for div_fmas. 7185 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 7186 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); 7187 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); 7188 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); 7189 7190 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); 7191 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); 7192 7193 SDValue Scale0Hi 7194 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); 7195 SDValue Scale1Hi 7196 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); 7197 7198 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); 7199 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); 7200 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); 7201 } else { 7202 Scale = DivScale1.getValue(1); 7203 } 7204 7205 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, 7206 Fma4, Fma3, Mul, Scale); 7207 7208 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); 7209 } 7210 7211 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { 7212 EVT VT = Op.getValueType(); 7213 7214 if (VT == MVT::f32) 7215 return LowerFDIV32(Op, DAG); 7216 7217 if (VT == MVT::f64) 7218 return LowerFDIV64(Op, DAG); 7219 7220 if (VT == MVT::f16) 7221 return LowerFDIV16(Op, DAG); 7222 7223 llvm_unreachable("Unexpected type for fdiv"); 7224 } 7225 7226 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 7227 SDLoc DL(Op); 7228 StoreSDNode *Store = cast<StoreSDNode>(Op); 7229 EVT VT = Store->getMemoryVT(); 7230 7231 if (VT == MVT::i1) { 7232 return DAG.getTruncStore(Store->getChain(), DL, 7233 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), 7234 Store->getBasePtr(), MVT::i1, Store->getMemOperand()); 7235 } 7236 7237 assert(VT.isVector() && 7238 Store->getValue().getValueType().getScalarType() == MVT::i32); 7239 7240 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 7241 *Store->getMemOperand())) { 7242 return expandUnalignedStore(Store, DAG); 7243 } 7244 7245 unsigned AS = Store->getAddressSpace(); 7246 if (Subtarget->hasLDSMisalignedBug() && 7247 AS == AMDGPUAS::FLAT_ADDRESS && 7248 Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) { 7249 return SplitVectorStore(Op, DAG); 7250 } 7251 7252 MachineFunction &MF = DAG.getMachineFunction(); 7253 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 7254 // If there is a possibilty that flat instruction access scratch memory 7255 // then we need to use the same legalization rules we use for private. 7256 if (AS == AMDGPUAS::FLAT_ADDRESS) 7257 AS = MFI->hasFlatScratchInit() ? 7258 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; 7259 7260 unsigned NumElements = VT.getVectorNumElements(); 7261 if (AS == AMDGPUAS::GLOBAL_ADDRESS || 7262 AS == AMDGPUAS::FLAT_ADDRESS) { 7263 if (NumElements > 4) 7264 return SplitVectorStore(Op, DAG); 7265 // v3 stores not supported on SI. 7266 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) 7267 return SplitVectorStore(Op, DAG); 7268 return SDValue(); 7269 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { 7270 switch (Subtarget->getMaxPrivateElementSize()) { 7271 case 4: 7272 return scalarizeVectorStore(Store, DAG); 7273 case 8: 7274 if (NumElements > 2) 7275 return SplitVectorStore(Op, DAG); 7276 return SDValue(); 7277 case 16: 7278 if (NumElements > 4 || NumElements == 3) 7279 return SplitVectorStore(Op, DAG); 7280 return SDValue(); 7281 default: 7282 llvm_unreachable("unsupported private_element_size"); 7283 } 7284 } else if (AS == AMDGPUAS::LOCAL_ADDRESS) { 7285 // Use ds_write_b128 if possible. 7286 if (Subtarget->useDS128() && Store->getAlignment() >= 16 && 7287 VT.getStoreSize() == 16 && NumElements != 3) 7288 return SDValue(); 7289 7290 if (NumElements > 2) 7291 return SplitVectorStore(Op, DAG); 7292 7293 // SI has a hardware bug in the LDS / GDS boounds checking: if the base 7294 // address is negative, then the instruction is incorrectly treated as 7295 // out-of-bounds even if base + offsets is in bounds. Split vectorized 7296 // stores here to avoid emitting ds_write2_b32. We may re-combine the 7297 // store later in the SILoadStoreOptimizer. 7298 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && 7299 NumElements == 2 && VT.getStoreSize() == 8 && 7300 Store->getAlignment() < 8) { 7301 return SplitVectorStore(Op, DAG); 7302 } 7303 7304 return SDValue(); 7305 } else { 7306 llvm_unreachable("unhandled address space"); 7307 } 7308 } 7309 7310 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 7311 SDLoc DL(Op); 7312 EVT VT = Op.getValueType(); 7313 SDValue Arg = Op.getOperand(0); 7314 SDValue TrigVal; 7315 7316 // TODO: Should this propagate fast-math-flags? 7317 7318 SDValue OneOver2Pi = DAG.getConstantFP(0.5 / M_PI, DL, VT); 7319 7320 if (Subtarget->hasTrigReducedRange()) { 7321 SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi); 7322 TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal); 7323 } else { 7324 TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi); 7325 } 7326 7327 switch (Op.getOpcode()) { 7328 case ISD::FCOS: 7329 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal); 7330 case ISD::FSIN: 7331 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal); 7332 default: 7333 llvm_unreachable("Wrong trig opcode"); 7334 } 7335 } 7336 7337 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 7338 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); 7339 assert(AtomicNode->isCompareAndSwap()); 7340 unsigned AS = AtomicNode->getAddressSpace(); 7341 7342 // No custom lowering required for local address space 7343 if (!isFlatGlobalAddrSpace(AS)) 7344 return Op; 7345 7346 // Non-local address space requires custom lowering for atomic compare 7347 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 7348 SDLoc DL(Op); 7349 SDValue ChainIn = Op.getOperand(0); 7350 SDValue Addr = Op.getOperand(1); 7351 SDValue Old = Op.getOperand(2); 7352 SDValue New = Op.getOperand(3); 7353 EVT VT = Op.getValueType(); 7354 MVT SimpleVT = VT.getSimpleVT(); 7355 MVT VecType = MVT::getVectorVT(SimpleVT, 2); 7356 7357 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); 7358 SDValue Ops[] = { ChainIn, Addr, NewOld }; 7359 7360 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), 7361 Ops, VT, AtomicNode->getMemOperand()); 7362 } 7363 7364 //===----------------------------------------------------------------------===// 7365 // Custom DAG optimizations 7366 //===----------------------------------------------------------------------===// 7367 7368 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, 7369 DAGCombinerInfo &DCI) const { 7370 EVT VT = N->getValueType(0); 7371 EVT ScalarVT = VT.getScalarType(); 7372 if (ScalarVT != MVT::f32) 7373 return SDValue(); 7374 7375 SelectionDAG &DAG = DCI.DAG; 7376 SDLoc DL(N); 7377 7378 SDValue Src = N->getOperand(0); 7379 EVT SrcVT = Src.getValueType(); 7380 7381 // TODO: We could try to match extracting the higher bytes, which would be 7382 // easier if i8 vectors weren't promoted to i32 vectors, particularly after 7383 // types are legalized. v4i8 -> v4f32 is probably the only case to worry 7384 // about in practice. 7385 if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) { 7386 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { 7387 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); 7388 DCI.AddToWorklist(Cvt.getNode()); 7389 return Cvt; 7390 } 7391 } 7392 7393 return SDValue(); 7394 } 7395 7396 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) 7397 7398 // This is a variant of 7399 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), 7400 // 7401 // The normal DAG combiner will do this, but only if the add has one use since 7402 // that would increase the number of instructions. 7403 // 7404 // This prevents us from seeing a constant offset that can be folded into a 7405 // memory instruction's addressing mode. If we know the resulting add offset of 7406 // a pointer can be folded into an addressing offset, we can replace the pointer 7407 // operand with the add of new constant offset. This eliminates one of the uses, 7408 // and may allow the remaining use to also be simplified. 7409 // 7410 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, 7411 unsigned AddrSpace, 7412 EVT MemVT, 7413 DAGCombinerInfo &DCI) const { 7414 SDValue N0 = N->getOperand(0); 7415 SDValue N1 = N->getOperand(1); 7416 7417 // We only do this to handle cases where it's profitable when there are 7418 // multiple uses of the add, so defer to the standard combine. 7419 if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) || 7420 N0->hasOneUse()) 7421 return SDValue(); 7422 7423 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); 7424 if (!CN1) 7425 return SDValue(); 7426 7427 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 7428 if (!CAdd) 7429 return SDValue(); 7430 7431 // If the resulting offset is too large, we can't fold it into the addressing 7432 // mode offset. 7433 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); 7434 Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext()); 7435 7436 AddrMode AM; 7437 AM.HasBaseReg = true; 7438 AM.BaseOffs = Offset.getSExtValue(); 7439 if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace)) 7440 return SDValue(); 7441 7442 SelectionDAG &DAG = DCI.DAG; 7443 SDLoc SL(N); 7444 EVT VT = N->getValueType(0); 7445 7446 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); 7447 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); 7448 7449 SDNodeFlags Flags; 7450 Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() && 7451 (N0.getOpcode() == ISD::OR || 7452 N0->getFlags().hasNoUnsignedWrap())); 7453 7454 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags); 7455 } 7456 7457 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, 7458 DAGCombinerInfo &DCI) const { 7459 SDValue Ptr = N->getBasePtr(); 7460 SelectionDAG &DAG = DCI.DAG; 7461 SDLoc SL(N); 7462 7463 // TODO: We could also do this for multiplies. 7464 if (Ptr.getOpcode() == ISD::SHL) { 7465 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(), 7466 N->getMemoryVT(), DCI); 7467 if (NewPtr) { 7468 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); 7469 7470 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; 7471 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); 7472 } 7473 } 7474 7475 return SDValue(); 7476 } 7477 7478 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { 7479 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || 7480 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || 7481 (Opc == ISD::XOR && Val == 0); 7482 } 7483 7484 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This 7485 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit 7486 // integer combine opportunities since most 64-bit operations are decomposed 7487 // this way. TODO: We won't want this for SALU especially if it is an inline 7488 // immediate. 7489 SDValue SITargetLowering::splitBinaryBitConstantOp( 7490 DAGCombinerInfo &DCI, 7491 const SDLoc &SL, 7492 unsigned Opc, SDValue LHS, 7493 const ConstantSDNode *CRHS) const { 7494 uint64_t Val = CRHS->getZExtValue(); 7495 uint32_t ValLo = Lo_32(Val); 7496 uint32_t ValHi = Hi_32(Val); 7497 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 7498 7499 if ((bitOpWithConstantIsReducible(Opc, ValLo) || 7500 bitOpWithConstantIsReducible(Opc, ValHi)) || 7501 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { 7502 // If we need to materialize a 64-bit immediate, it will be split up later 7503 // anyway. Avoid creating the harder to understand 64-bit immediate 7504 // materialization. 7505 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); 7506 } 7507 7508 return SDValue(); 7509 } 7510 7511 // Returns true if argument is a boolean value which is not serialized into 7512 // memory or argument and does not require v_cmdmask_b32 to be deserialized. 7513 static bool isBoolSGPR(SDValue V) { 7514 if (V.getValueType() != MVT::i1) 7515 return false; 7516 switch (V.getOpcode()) { 7517 default: break; 7518 case ISD::SETCC: 7519 case ISD::AND: 7520 case ISD::OR: 7521 case ISD::XOR: 7522 case AMDGPUISD::FP_CLASS: 7523 return true; 7524 } 7525 return false; 7526 } 7527 7528 // If a constant has all zeroes or all ones within each byte return it. 7529 // Otherwise return 0. 7530 static uint32_t getConstantPermuteMask(uint32_t C) { 7531 // 0xff for any zero byte in the mask 7532 uint32_t ZeroByteMask = 0; 7533 if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff; 7534 if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00; 7535 if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000; 7536 if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000; 7537 uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte 7538 if ((NonZeroByteMask & C) != NonZeroByteMask) 7539 return 0; // Partial bytes selected. 7540 return C; 7541 } 7542 7543 // Check if a node selects whole bytes from its operand 0 starting at a byte 7544 // boundary while masking the rest. Returns select mask as in the v_perm_b32 7545 // or -1 if not succeeded. 7546 // Note byte select encoding: 7547 // value 0-3 selects corresponding source byte; 7548 // value 0xc selects zero; 7549 // value 0xff selects 0xff. 7550 static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) { 7551 assert(V.getValueSizeInBits() == 32); 7552 7553 if (V.getNumOperands() != 2) 7554 return ~0; 7555 7556 ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1)); 7557 if (!N1) 7558 return ~0; 7559 7560 uint32_t C = N1->getZExtValue(); 7561 7562 switch (V.getOpcode()) { 7563 default: 7564 break; 7565 case ISD::AND: 7566 if (uint32_t ConstMask = getConstantPermuteMask(C)) { 7567 return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask); 7568 } 7569 break; 7570 7571 case ISD::OR: 7572 if (uint32_t ConstMask = getConstantPermuteMask(C)) { 7573 return (0x03020100 & ~ConstMask) | ConstMask; 7574 } 7575 break; 7576 7577 case ISD::SHL: 7578 if (C % 8) 7579 return ~0; 7580 7581 return uint32_t((0x030201000c0c0c0cull << C) >> 32); 7582 7583 case ISD::SRL: 7584 if (C % 8) 7585 return ~0; 7586 7587 return uint32_t(0x0c0c0c0c03020100ull >> C); 7588 } 7589 7590 return ~0; 7591 } 7592 7593 SDValue SITargetLowering::performAndCombine(SDNode *N, 7594 DAGCombinerInfo &DCI) const { 7595 if (DCI.isBeforeLegalize()) 7596 return SDValue(); 7597 7598 SelectionDAG &DAG = DCI.DAG; 7599 EVT VT = N->getValueType(0); 7600 SDValue LHS = N->getOperand(0); 7601 SDValue RHS = N->getOperand(1); 7602 7603 7604 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 7605 if (VT == MVT::i64 && CRHS) { 7606 if (SDValue Split 7607 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) 7608 return Split; 7609 } 7610 7611 if (CRHS && VT == MVT::i32) { 7612 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb 7613 // nb = number of trailing zeroes in mask 7614 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass, 7615 // given that we are selecting 8 or 16 bit fields starting at byte boundary. 7616 uint64_t Mask = CRHS->getZExtValue(); 7617 unsigned Bits = countPopulation(Mask); 7618 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL && 7619 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) { 7620 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) { 7621 unsigned Shift = CShift->getZExtValue(); 7622 unsigned NB = CRHS->getAPIntValue().countTrailingZeros(); 7623 unsigned Offset = NB + Shift; 7624 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary. 7625 SDLoc SL(N); 7626 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 7627 LHS->getOperand(0), 7628 DAG.getConstant(Offset, SL, MVT::i32), 7629 DAG.getConstant(Bits, SL, MVT::i32)); 7630 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits); 7631 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE, 7632 DAG.getValueType(NarrowVT)); 7633 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext, 7634 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32)); 7635 return Shl; 7636 } 7637 } 7638 } 7639 7640 // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) 7641 if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM && 7642 isa<ConstantSDNode>(LHS.getOperand(2))) { 7643 uint32_t Sel = getConstantPermuteMask(Mask); 7644 if (!Sel) 7645 return SDValue(); 7646 7647 // Select 0xc for all zero bytes 7648 Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c); 7649 SDLoc DL(N); 7650 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), 7651 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); 7652 } 7653 } 7654 7655 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> 7656 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) 7657 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { 7658 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 7659 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); 7660 7661 SDValue X = LHS.getOperand(0); 7662 SDValue Y = RHS.getOperand(0); 7663 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) 7664 return SDValue(); 7665 7666 if (LCC == ISD::SETO) { 7667 if (X != LHS.getOperand(1)) 7668 return SDValue(); 7669 7670 if (RCC == ISD::SETUNE) { 7671 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); 7672 if (!C1 || !C1->isInfinity() || C1->isNegative()) 7673 return SDValue(); 7674 7675 const uint32_t Mask = SIInstrFlags::N_NORMAL | 7676 SIInstrFlags::N_SUBNORMAL | 7677 SIInstrFlags::N_ZERO | 7678 SIInstrFlags::P_ZERO | 7679 SIInstrFlags::P_SUBNORMAL | 7680 SIInstrFlags::P_NORMAL; 7681 7682 static_assert(((~(SIInstrFlags::S_NAN | 7683 SIInstrFlags::Q_NAN | 7684 SIInstrFlags::N_INFINITY | 7685 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, 7686 "mask not equal"); 7687 7688 SDLoc DL(N); 7689 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 7690 X, DAG.getConstant(Mask, DL, MVT::i32)); 7691 } 7692 } 7693 } 7694 7695 if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS) 7696 std::swap(LHS, RHS); 7697 7698 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS && 7699 RHS.hasOneUse()) { 7700 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 7701 // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan) 7702 // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan) 7703 const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 7704 if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask && 7705 (RHS.getOperand(0) == LHS.getOperand(0) && 7706 LHS.getOperand(0) == LHS.getOperand(1))) { 7707 const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN; 7708 unsigned NewMask = LCC == ISD::SETO ? 7709 Mask->getZExtValue() & ~OrdMask : 7710 Mask->getZExtValue() & OrdMask; 7711 7712 SDLoc DL(N); 7713 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0), 7714 DAG.getConstant(NewMask, DL, MVT::i32)); 7715 } 7716 } 7717 7718 if (VT == MVT::i32 && 7719 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) { 7720 // and x, (sext cc from i1) => select cc, x, 0 7721 if (RHS.getOpcode() != ISD::SIGN_EXTEND) 7722 std::swap(LHS, RHS); 7723 if (isBoolSGPR(RHS.getOperand(0))) 7724 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0), 7725 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32)); 7726 } 7727 7728 // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) 7729 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 7730 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && 7731 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) { 7732 uint32_t LHSMask = getPermuteMask(DAG, LHS); 7733 uint32_t RHSMask = getPermuteMask(DAG, RHS); 7734 if (LHSMask != ~0u && RHSMask != ~0u) { 7735 // Canonicalize the expression in an attempt to have fewer unique masks 7736 // and therefore fewer registers used to hold the masks. 7737 if (LHSMask > RHSMask) { 7738 std::swap(LHSMask, RHSMask); 7739 std::swap(LHS, RHS); 7740 } 7741 7742 // Select 0xc for each lane used from source operand. Zero has 0xc mask 7743 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. 7744 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 7745 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 7746 7747 // Check of we need to combine values from two sources within a byte. 7748 if (!(LHSUsedLanes & RHSUsedLanes) && 7749 // If we select high and lower word keep it for SDWA. 7750 // TODO: teach SDWA to work with v_perm_b32 and remove the check. 7751 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { 7752 // Each byte in each mask is either selector mask 0-3, or has higher 7753 // bits set in either of masks, which can be 0xff for 0xff or 0x0c for 7754 // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise 7755 // mask which is not 0xff wins. By anding both masks we have a correct 7756 // result except that 0x0c shall be corrected to give 0x0c only. 7757 uint32_t Mask = LHSMask & RHSMask; 7758 for (unsigned I = 0; I < 32; I += 8) { 7759 uint32_t ByteSel = 0xff << I; 7760 if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c) 7761 Mask &= (0x0c << I) & 0xffffffff; 7762 } 7763 7764 // Add 4 to each active LHS lane. It will not affect any existing 0xff 7765 // or 0x0c. 7766 uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404); 7767 SDLoc DL(N); 7768 7769 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, 7770 LHS.getOperand(0), RHS.getOperand(0), 7771 DAG.getConstant(Sel, DL, MVT::i32)); 7772 } 7773 } 7774 } 7775 7776 return SDValue(); 7777 } 7778 7779 SDValue SITargetLowering::performOrCombine(SDNode *N, 7780 DAGCombinerInfo &DCI) const { 7781 SelectionDAG &DAG = DCI.DAG; 7782 SDValue LHS = N->getOperand(0); 7783 SDValue RHS = N->getOperand(1); 7784 7785 EVT VT = N->getValueType(0); 7786 if (VT == MVT::i1) { 7787 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) 7788 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && 7789 RHS.getOpcode() == AMDGPUISD::FP_CLASS) { 7790 SDValue Src = LHS.getOperand(0); 7791 if (Src != RHS.getOperand(0)) 7792 return SDValue(); 7793 7794 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 7795 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 7796 if (!CLHS || !CRHS) 7797 return SDValue(); 7798 7799 // Only 10 bits are used. 7800 static const uint32_t MaxMask = 0x3ff; 7801 7802 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; 7803 SDLoc DL(N); 7804 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 7805 Src, DAG.getConstant(NewMask, DL, MVT::i32)); 7806 } 7807 7808 return SDValue(); 7809 } 7810 7811 // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) 7812 if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() && 7813 LHS.getOpcode() == AMDGPUISD::PERM && 7814 isa<ConstantSDNode>(LHS.getOperand(2))) { 7815 uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1)); 7816 if (!Sel) 7817 return SDValue(); 7818 7819 Sel |= LHS.getConstantOperandVal(2); 7820 SDLoc DL(N); 7821 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), 7822 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); 7823 } 7824 7825 // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) 7826 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 7827 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && 7828 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) { 7829 uint32_t LHSMask = getPermuteMask(DAG, LHS); 7830 uint32_t RHSMask = getPermuteMask(DAG, RHS); 7831 if (LHSMask != ~0u && RHSMask != ~0u) { 7832 // Canonicalize the expression in an attempt to have fewer unique masks 7833 // and therefore fewer registers used to hold the masks. 7834 if (LHSMask > RHSMask) { 7835 std::swap(LHSMask, RHSMask); 7836 std::swap(LHS, RHS); 7837 } 7838 7839 // Select 0xc for each lane used from source operand. Zero has 0xc mask 7840 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. 7841 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 7842 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 7843 7844 // Check of we need to combine values from two sources within a byte. 7845 if (!(LHSUsedLanes & RHSUsedLanes) && 7846 // If we select high and lower word keep it for SDWA. 7847 // TODO: teach SDWA to work with v_perm_b32 and remove the check. 7848 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { 7849 // Kill zero bytes selected by other mask. Zero value is 0xc. 7850 LHSMask &= ~RHSUsedLanes; 7851 RHSMask &= ~LHSUsedLanes; 7852 // Add 4 to each active LHS lane 7853 LHSMask |= LHSUsedLanes & 0x04040404; 7854 // Combine masks 7855 uint32_t Sel = LHSMask | RHSMask; 7856 SDLoc DL(N); 7857 7858 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, 7859 LHS.getOperand(0), RHS.getOperand(0), 7860 DAG.getConstant(Sel, DL, MVT::i32)); 7861 } 7862 } 7863 } 7864 7865 if (VT != MVT::i64) 7866 return SDValue(); 7867 7868 // TODO: This could be a generic combine with a predicate for extracting the 7869 // high half of an integer being free. 7870 7871 // (or i64:x, (zero_extend i32:y)) -> 7872 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) 7873 if (LHS.getOpcode() == ISD::ZERO_EXTEND && 7874 RHS.getOpcode() != ISD::ZERO_EXTEND) 7875 std::swap(LHS, RHS); 7876 7877 if (RHS.getOpcode() == ISD::ZERO_EXTEND) { 7878 SDValue ExtSrc = RHS.getOperand(0); 7879 EVT SrcVT = ExtSrc.getValueType(); 7880 if (SrcVT == MVT::i32) { 7881 SDLoc SL(N); 7882 SDValue LowLHS, HiBits; 7883 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); 7884 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); 7885 7886 DCI.AddToWorklist(LowOr.getNode()); 7887 DCI.AddToWorklist(HiBits.getNode()); 7888 7889 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 7890 LowOr, HiBits); 7891 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 7892 } 7893 } 7894 7895 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 7896 if (CRHS) { 7897 if (SDValue Split 7898 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) 7899 return Split; 7900 } 7901 7902 return SDValue(); 7903 } 7904 7905 SDValue SITargetLowering::performXorCombine(SDNode *N, 7906 DAGCombinerInfo &DCI) const { 7907 EVT VT = N->getValueType(0); 7908 if (VT != MVT::i64) 7909 return SDValue(); 7910 7911 SDValue LHS = N->getOperand(0); 7912 SDValue RHS = N->getOperand(1); 7913 7914 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 7915 if (CRHS) { 7916 if (SDValue Split 7917 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) 7918 return Split; 7919 } 7920 7921 return SDValue(); 7922 } 7923 7924 // Instructions that will be lowered with a final instruction that zeros the 7925 // high result bits. 7926 // XXX - probably only need to list legal operations. 7927 static bool fp16SrcZerosHighBits(unsigned Opc) { 7928 switch (Opc) { 7929 case ISD::FADD: 7930 case ISD::FSUB: 7931 case ISD::FMUL: 7932 case ISD::FDIV: 7933 case ISD::FREM: 7934 case ISD::FMA: 7935 case ISD::FMAD: 7936 case ISD::FCANONICALIZE: 7937 case ISD::FP_ROUND: 7938 case ISD::UINT_TO_FP: 7939 case ISD::SINT_TO_FP: 7940 case ISD::FABS: 7941 // Fabs is lowered to a bit operation, but it's an and which will clear the 7942 // high bits anyway. 7943 case ISD::FSQRT: 7944 case ISD::FSIN: 7945 case ISD::FCOS: 7946 case ISD::FPOWI: 7947 case ISD::FPOW: 7948 case ISD::FLOG: 7949 case ISD::FLOG2: 7950 case ISD::FLOG10: 7951 case ISD::FEXP: 7952 case ISD::FEXP2: 7953 case ISD::FCEIL: 7954 case ISD::FTRUNC: 7955 case ISD::FRINT: 7956 case ISD::FNEARBYINT: 7957 case ISD::FROUND: 7958 case ISD::FFLOOR: 7959 case ISD::FMINNUM: 7960 case ISD::FMAXNUM: 7961 case AMDGPUISD::FRACT: 7962 case AMDGPUISD::CLAMP: 7963 case AMDGPUISD::COS_HW: 7964 case AMDGPUISD::SIN_HW: 7965 case AMDGPUISD::FMIN3: 7966 case AMDGPUISD::FMAX3: 7967 case AMDGPUISD::FMED3: 7968 case AMDGPUISD::FMAD_FTZ: 7969 case AMDGPUISD::RCP: 7970 case AMDGPUISD::RSQ: 7971 case AMDGPUISD::RCP_IFLAG: 7972 case AMDGPUISD::LDEXP: 7973 return true; 7974 default: 7975 // fcopysign, select and others may be lowered to 32-bit bit operations 7976 // which don't zero the high bits. 7977 return false; 7978 } 7979 } 7980 7981 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N, 7982 DAGCombinerInfo &DCI) const { 7983 if (!Subtarget->has16BitInsts() || 7984 DCI.getDAGCombineLevel() < AfterLegalizeDAG) 7985 return SDValue(); 7986 7987 EVT VT = N->getValueType(0); 7988 if (VT != MVT::i32) 7989 return SDValue(); 7990 7991 SDValue Src = N->getOperand(0); 7992 if (Src.getValueType() != MVT::i16) 7993 return SDValue(); 7994 7995 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src 7996 // FIXME: It is not universally true that the high bits are zeroed on gfx9. 7997 if (Src.getOpcode() == ISD::BITCAST) { 7998 SDValue BCSrc = Src.getOperand(0); 7999 if (BCSrc.getValueType() == MVT::f16 && 8000 fp16SrcZerosHighBits(BCSrc.getOpcode())) 8001 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc); 8002 } 8003 8004 return SDValue(); 8005 } 8006 8007 SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N, 8008 DAGCombinerInfo &DCI) 8009 const { 8010 SDValue Src = N->getOperand(0); 8011 auto *VTSign = cast<VTSDNode>(N->getOperand(1)); 8012 8013 if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE && 8014 VTSign->getVT() == MVT::i8) || 8015 (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT && 8016 VTSign->getVT() == MVT::i16)) && 8017 Src.hasOneUse()) { 8018 auto *M = cast<MemSDNode>(Src); 8019 SDValue Ops[] = { 8020 Src.getOperand(0), // Chain 8021 Src.getOperand(1), // rsrc 8022 Src.getOperand(2), // vindex 8023 Src.getOperand(3), // voffset 8024 Src.getOperand(4), // soffset 8025 Src.getOperand(5), // offset 8026 Src.getOperand(6), 8027 Src.getOperand(7) 8028 }; 8029 // replace with BUFFER_LOAD_BYTE/SHORT 8030 SDVTList ResList = DCI.DAG.getVTList(MVT::i32, 8031 Src.getOperand(0).getValueType()); 8032 unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ? 8033 AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT; 8034 SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N), 8035 ResList, 8036 Ops, M->getMemoryVT(), 8037 M->getMemOperand()); 8038 return DCI.DAG.getMergeValues({BufferLoadSignExt, 8039 BufferLoadSignExt.getValue(1)}, SDLoc(N)); 8040 } 8041 return SDValue(); 8042 } 8043 8044 SDValue SITargetLowering::performClassCombine(SDNode *N, 8045 DAGCombinerInfo &DCI) const { 8046 SelectionDAG &DAG = DCI.DAG; 8047 SDValue Mask = N->getOperand(1); 8048 8049 // fp_class x, 0 -> false 8050 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { 8051 if (CMask->isNullValue()) 8052 return DAG.getConstant(0, SDLoc(N), MVT::i1); 8053 } 8054 8055 if (N->getOperand(0).isUndef()) 8056 return DAG.getUNDEF(MVT::i1); 8057 8058 return SDValue(); 8059 } 8060 8061 SDValue SITargetLowering::performRcpCombine(SDNode *N, 8062 DAGCombinerInfo &DCI) const { 8063 EVT VT = N->getValueType(0); 8064 SDValue N0 = N->getOperand(0); 8065 8066 if (N0.isUndef()) 8067 return N0; 8068 8069 if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP || 8070 N0.getOpcode() == ISD::SINT_TO_FP)) { 8071 return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0, 8072 N->getFlags()); 8073 } 8074 8075 return AMDGPUTargetLowering::performRcpCombine(N, DCI); 8076 } 8077 8078 bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op, 8079 unsigned MaxDepth) const { 8080 unsigned Opcode = Op.getOpcode(); 8081 if (Opcode == ISD::FCANONICALIZE) 8082 return true; 8083 8084 if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) { 8085 auto F = CFP->getValueAPF(); 8086 if (F.isNaN() && F.isSignaling()) 8087 return false; 8088 return !F.isDenormal() || denormalsEnabledForType(Op.getValueType()); 8089 } 8090 8091 // If source is a result of another standard FP operation it is already in 8092 // canonical form. 8093 if (MaxDepth == 0) 8094 return false; 8095 8096 switch (Opcode) { 8097 // These will flush denorms if required. 8098 case ISD::FADD: 8099 case ISD::FSUB: 8100 case ISD::FMUL: 8101 case ISD::FCEIL: 8102 case ISD::FFLOOR: 8103 case ISD::FMA: 8104 case ISD::FMAD: 8105 case ISD::FSQRT: 8106 case ISD::FDIV: 8107 case ISD::FREM: 8108 case ISD::FP_ROUND: 8109 case ISD::FP_EXTEND: 8110 case AMDGPUISD::FMUL_LEGACY: 8111 case AMDGPUISD::FMAD_FTZ: 8112 case AMDGPUISD::RCP: 8113 case AMDGPUISD::RSQ: 8114 case AMDGPUISD::RSQ_CLAMP: 8115 case AMDGPUISD::RCP_LEGACY: 8116 case AMDGPUISD::RSQ_LEGACY: 8117 case AMDGPUISD::RCP_IFLAG: 8118 case AMDGPUISD::TRIG_PREOP: 8119 case AMDGPUISD::DIV_SCALE: 8120 case AMDGPUISD::DIV_FMAS: 8121 case AMDGPUISD::DIV_FIXUP: 8122 case AMDGPUISD::FRACT: 8123 case AMDGPUISD::LDEXP: 8124 case AMDGPUISD::CVT_PKRTZ_F16_F32: 8125 case AMDGPUISD::CVT_F32_UBYTE0: 8126 case AMDGPUISD::CVT_F32_UBYTE1: 8127 case AMDGPUISD::CVT_F32_UBYTE2: 8128 case AMDGPUISD::CVT_F32_UBYTE3: 8129 return true; 8130 8131 // It can/will be lowered or combined as a bit operation. 8132 // Need to check their input recursively to handle. 8133 case ISD::FNEG: 8134 case ISD::FABS: 8135 case ISD::FCOPYSIGN: 8136 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); 8137 8138 case ISD::FSIN: 8139 case ISD::FCOS: 8140 case ISD::FSINCOS: 8141 return Op.getValueType().getScalarType() != MVT::f16; 8142 8143 case ISD::FMINNUM: 8144 case ISD::FMAXNUM: 8145 case ISD::FMINNUM_IEEE: 8146 case ISD::FMAXNUM_IEEE: 8147 case AMDGPUISD::CLAMP: 8148 case AMDGPUISD::FMED3: 8149 case AMDGPUISD::FMAX3: 8150 case AMDGPUISD::FMIN3: { 8151 // FIXME: Shouldn't treat the generic operations different based these. 8152 // However, we aren't really required to flush the result from 8153 // minnum/maxnum.. 8154 8155 // snans will be quieted, so we only need to worry about denormals. 8156 if (Subtarget->supportsMinMaxDenormModes() || 8157 denormalsEnabledForType(Op.getValueType())) 8158 return true; 8159 8160 // Flushing may be required. 8161 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such 8162 // targets need to check their input recursively. 8163 8164 // FIXME: Does this apply with clamp? It's implemented with max. 8165 for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) { 8166 if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1)) 8167 return false; 8168 } 8169 8170 return true; 8171 } 8172 case ISD::SELECT: { 8173 return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) && 8174 isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1); 8175 } 8176 case ISD::BUILD_VECTOR: { 8177 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 8178 SDValue SrcOp = Op.getOperand(i); 8179 if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1)) 8180 return false; 8181 } 8182 8183 return true; 8184 } 8185 case ISD::EXTRACT_VECTOR_ELT: 8186 case ISD::EXTRACT_SUBVECTOR: { 8187 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); 8188 } 8189 case ISD::INSERT_VECTOR_ELT: { 8190 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) && 8191 isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1); 8192 } 8193 case ISD::UNDEF: 8194 // Could be anything. 8195 return false; 8196 8197 case ISD::BITCAST: { 8198 // Hack round the mess we make when legalizing extract_vector_elt 8199 SDValue Src = Op.getOperand(0); 8200 if (Src.getValueType() == MVT::i16 && 8201 Src.getOpcode() == ISD::TRUNCATE) { 8202 SDValue TruncSrc = Src.getOperand(0); 8203 if (TruncSrc.getValueType() == MVT::i32 && 8204 TruncSrc.getOpcode() == ISD::BITCAST && 8205 TruncSrc.getOperand(0).getValueType() == MVT::v2f16) { 8206 return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1); 8207 } 8208 } 8209 8210 return false; 8211 } 8212 case ISD::INTRINSIC_WO_CHAIN: { 8213 unsigned IntrinsicID 8214 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8215 // TODO: Handle more intrinsics 8216 switch (IntrinsicID) { 8217 case Intrinsic::amdgcn_cvt_pkrtz: 8218 case Intrinsic::amdgcn_cubeid: 8219 case Intrinsic::amdgcn_frexp_mant: 8220 case Intrinsic::amdgcn_fdot2: 8221 return true; 8222 default: 8223 break; 8224 } 8225 8226 LLVM_FALLTHROUGH; 8227 } 8228 default: 8229 return denormalsEnabledForType(Op.getValueType()) && 8230 DAG.isKnownNeverSNaN(Op); 8231 } 8232 8233 llvm_unreachable("invalid operation"); 8234 } 8235 8236 // Constant fold canonicalize. 8237 SDValue SITargetLowering::getCanonicalConstantFP( 8238 SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const { 8239 // Flush denormals to 0 if not enabled. 8240 if (C.isDenormal() && !denormalsEnabledForType(VT)) 8241 return DAG.getConstantFP(0.0, SL, VT); 8242 8243 if (C.isNaN()) { 8244 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); 8245 if (C.isSignaling()) { 8246 // Quiet a signaling NaN. 8247 // FIXME: Is this supposed to preserve payload bits? 8248 return DAG.getConstantFP(CanonicalQNaN, SL, VT); 8249 } 8250 8251 // Make sure it is the canonical NaN bitpattern. 8252 // 8253 // TODO: Can we use -1 as the canonical NaN value since it's an inline 8254 // immediate? 8255 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) 8256 return DAG.getConstantFP(CanonicalQNaN, SL, VT); 8257 } 8258 8259 // Already canonical. 8260 return DAG.getConstantFP(C, SL, VT); 8261 } 8262 8263 static bool vectorEltWillFoldAway(SDValue Op) { 8264 return Op.isUndef() || isa<ConstantFPSDNode>(Op); 8265 } 8266 8267 SDValue SITargetLowering::performFCanonicalizeCombine( 8268 SDNode *N, 8269 DAGCombinerInfo &DCI) const { 8270 SelectionDAG &DAG = DCI.DAG; 8271 SDValue N0 = N->getOperand(0); 8272 EVT VT = N->getValueType(0); 8273 8274 // fcanonicalize undef -> qnan 8275 if (N0.isUndef()) { 8276 APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT)); 8277 return DAG.getConstantFP(QNaN, SDLoc(N), VT); 8278 } 8279 8280 if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) { 8281 EVT VT = N->getValueType(0); 8282 return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF()); 8283 } 8284 8285 // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x), 8286 // (fcanonicalize k) 8287 // 8288 // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0 8289 8290 // TODO: This could be better with wider vectors that will be split to v2f16, 8291 // and to consider uses since there aren't that many packed operations. 8292 if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 && 8293 isTypeLegal(MVT::v2f16)) { 8294 SDLoc SL(N); 8295 SDValue NewElts[2]; 8296 SDValue Lo = N0.getOperand(0); 8297 SDValue Hi = N0.getOperand(1); 8298 EVT EltVT = Lo.getValueType(); 8299 8300 if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) { 8301 for (unsigned I = 0; I != 2; ++I) { 8302 SDValue Op = N0.getOperand(I); 8303 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) { 8304 NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT, 8305 CFP->getValueAPF()); 8306 } else if (Op.isUndef()) { 8307 // Handled below based on what the other operand is. 8308 NewElts[I] = Op; 8309 } else { 8310 NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op); 8311 } 8312 } 8313 8314 // If one half is undef, and one is constant, perfer a splat vector rather 8315 // than the normal qNaN. If it's a register, prefer 0.0 since that's 8316 // cheaper to use and may be free with a packed operation. 8317 if (NewElts[0].isUndef()) { 8318 if (isa<ConstantFPSDNode>(NewElts[1])) 8319 NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ? 8320 NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT); 8321 } 8322 8323 if (NewElts[1].isUndef()) { 8324 NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ? 8325 NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT); 8326 } 8327 8328 return DAG.getBuildVector(VT, SL, NewElts); 8329 } 8330 } 8331 8332 unsigned SrcOpc = N0.getOpcode(); 8333 8334 // If it's free to do so, push canonicalizes further up the source, which may 8335 // find a canonical source. 8336 // 8337 // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for 8338 // sNaNs. 8339 if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) { 8340 auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); 8341 if (CRHS && N0.hasOneUse()) { 8342 SDLoc SL(N); 8343 SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT, 8344 N0.getOperand(0)); 8345 SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF()); 8346 DCI.AddToWorklist(Canon0.getNode()); 8347 8348 return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1); 8349 } 8350 } 8351 8352 return isCanonicalized(DAG, N0) ? N0 : SDValue(); 8353 } 8354 8355 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { 8356 switch (Opc) { 8357 case ISD::FMAXNUM: 8358 case ISD::FMAXNUM_IEEE: 8359 return AMDGPUISD::FMAX3; 8360 case ISD::SMAX: 8361 return AMDGPUISD::SMAX3; 8362 case ISD::UMAX: 8363 return AMDGPUISD::UMAX3; 8364 case ISD::FMINNUM: 8365 case ISD::FMINNUM_IEEE: 8366 return AMDGPUISD::FMIN3; 8367 case ISD::SMIN: 8368 return AMDGPUISD::SMIN3; 8369 case ISD::UMIN: 8370 return AMDGPUISD::UMIN3; 8371 default: 8372 llvm_unreachable("Not a min/max opcode"); 8373 } 8374 } 8375 8376 SDValue SITargetLowering::performIntMed3ImmCombine( 8377 SelectionDAG &DAG, const SDLoc &SL, 8378 SDValue Op0, SDValue Op1, bool Signed) const { 8379 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); 8380 if (!K1) 8381 return SDValue(); 8382 8383 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 8384 if (!K0) 8385 return SDValue(); 8386 8387 if (Signed) { 8388 if (K0->getAPIntValue().sge(K1->getAPIntValue())) 8389 return SDValue(); 8390 } else { 8391 if (K0->getAPIntValue().uge(K1->getAPIntValue())) 8392 return SDValue(); 8393 } 8394 8395 EVT VT = K0->getValueType(0); 8396 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3; 8397 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) { 8398 return DAG.getNode(Med3Opc, SL, VT, 8399 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); 8400 } 8401 8402 // If there isn't a 16-bit med3 operation, convert to 32-bit. 8403 MVT NVT = MVT::i32; 8404 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 8405 8406 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); 8407 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); 8408 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); 8409 8410 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3); 8411 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3); 8412 } 8413 8414 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) { 8415 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 8416 return C; 8417 8418 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) { 8419 if (ConstantFPSDNode *C = BV->getConstantFPSplatNode()) 8420 return C; 8421 } 8422 8423 return nullptr; 8424 } 8425 8426 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG, 8427 const SDLoc &SL, 8428 SDValue Op0, 8429 SDValue Op1) const { 8430 ConstantFPSDNode *K1 = getSplatConstantFP(Op1); 8431 if (!K1) 8432 return SDValue(); 8433 8434 ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1)); 8435 if (!K0) 8436 return SDValue(); 8437 8438 // Ordered >= (although NaN inputs should have folded away by now). 8439 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF()); 8440 if (Cmp == APFloat::cmpGreaterThan) 8441 return SDValue(); 8442 8443 const MachineFunction &MF = DAG.getMachineFunction(); 8444 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 8445 8446 // TODO: Check IEEE bit enabled? 8447 EVT VT = Op0.getValueType(); 8448 if (Info->getMode().DX10Clamp) { 8449 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the 8450 // hardware fmed3 behavior converting to a min. 8451 // FIXME: Should this be allowing -0.0? 8452 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0)) 8453 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0)); 8454 } 8455 8456 // med3 for f16 is only available on gfx9+, and not available for v2f16. 8457 if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) { 8458 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a 8459 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would 8460 // then give the other result, which is different from med3 with a NaN 8461 // input. 8462 SDValue Var = Op0.getOperand(0); 8463 if (!DAG.isKnownNeverSNaN(Var)) 8464 return SDValue(); 8465 8466 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 8467 8468 if ((!K0->hasOneUse() || 8469 TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) && 8470 (!K1->hasOneUse() || 8471 TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) { 8472 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), 8473 Var, SDValue(K0, 0), SDValue(K1, 0)); 8474 } 8475 } 8476 8477 return SDValue(); 8478 } 8479 8480 SDValue SITargetLowering::performMinMaxCombine(SDNode *N, 8481 DAGCombinerInfo &DCI) const { 8482 SelectionDAG &DAG = DCI.DAG; 8483 8484 EVT VT = N->getValueType(0); 8485 unsigned Opc = N->getOpcode(); 8486 SDValue Op0 = N->getOperand(0); 8487 SDValue Op1 = N->getOperand(1); 8488 8489 // Only do this if the inner op has one use since this will just increases 8490 // register pressure for no benefit. 8491 8492 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && 8493 !VT.isVector() && 8494 (VT == MVT::i32 || VT == MVT::f32 || 8495 ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) { 8496 // max(max(a, b), c) -> max3(a, b, c) 8497 // min(min(a, b), c) -> min3(a, b, c) 8498 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { 8499 SDLoc DL(N); 8500 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 8501 DL, 8502 N->getValueType(0), 8503 Op0.getOperand(0), 8504 Op0.getOperand(1), 8505 Op1); 8506 } 8507 8508 // Try commuted. 8509 // max(a, max(b, c)) -> max3(a, b, c) 8510 // min(a, min(b, c)) -> min3(a, b, c) 8511 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { 8512 SDLoc DL(N); 8513 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 8514 DL, 8515 N->getValueType(0), 8516 Op0, 8517 Op1.getOperand(0), 8518 Op1.getOperand(1)); 8519 } 8520 } 8521 8522 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) 8523 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { 8524 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) 8525 return Med3; 8526 } 8527 8528 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { 8529 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) 8530 return Med3; 8531 } 8532 8533 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) 8534 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || 8535 (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) || 8536 (Opc == AMDGPUISD::FMIN_LEGACY && 8537 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && 8538 (VT == MVT::f32 || VT == MVT::f64 || 8539 (VT == MVT::f16 && Subtarget->has16BitInsts()) || 8540 (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) && 8541 Op0.hasOneUse()) { 8542 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) 8543 return Res; 8544 } 8545 8546 return SDValue(); 8547 } 8548 8549 static bool isClampZeroToOne(SDValue A, SDValue B) { 8550 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) { 8551 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) { 8552 // FIXME: Should this be allowing -0.0? 8553 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) || 8554 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0)); 8555 } 8556 } 8557 8558 return false; 8559 } 8560 8561 // FIXME: Should only worry about snans for version with chain. 8562 SDValue SITargetLowering::performFMed3Combine(SDNode *N, 8563 DAGCombinerInfo &DCI) const { 8564 EVT VT = N->getValueType(0); 8565 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and 8566 // NaNs. With a NaN input, the order of the operands may change the result. 8567 8568 SelectionDAG &DAG = DCI.DAG; 8569 SDLoc SL(N); 8570 8571 SDValue Src0 = N->getOperand(0); 8572 SDValue Src1 = N->getOperand(1); 8573 SDValue Src2 = N->getOperand(2); 8574 8575 if (isClampZeroToOne(Src0, Src1)) { 8576 // const_a, const_b, x -> clamp is safe in all cases including signaling 8577 // nans. 8578 // FIXME: Should this be allowing -0.0? 8579 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2); 8580 } 8581 8582 const MachineFunction &MF = DAG.getMachineFunction(); 8583 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 8584 8585 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother 8586 // handling no dx10-clamp? 8587 if (Info->getMode().DX10Clamp) { 8588 // If NaNs is clamped to 0, we are free to reorder the inputs. 8589 8590 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 8591 std::swap(Src0, Src1); 8592 8593 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2)) 8594 std::swap(Src1, Src2); 8595 8596 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 8597 std::swap(Src0, Src1); 8598 8599 if (isClampZeroToOne(Src1, Src2)) 8600 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0); 8601 } 8602 8603 return SDValue(); 8604 } 8605 8606 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N, 8607 DAGCombinerInfo &DCI) const { 8608 SDValue Src0 = N->getOperand(0); 8609 SDValue Src1 = N->getOperand(1); 8610 if (Src0.isUndef() && Src1.isUndef()) 8611 return DCI.DAG.getUNDEF(N->getValueType(0)); 8612 return SDValue(); 8613 } 8614 8615 SDValue SITargetLowering::performExtractVectorEltCombine( 8616 SDNode *N, DAGCombinerInfo &DCI) const { 8617 SDValue Vec = N->getOperand(0); 8618 SelectionDAG &DAG = DCI.DAG; 8619 8620 EVT VecVT = Vec.getValueType(); 8621 EVT EltVT = VecVT.getVectorElementType(); 8622 8623 if ((Vec.getOpcode() == ISD::FNEG || 8624 Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) { 8625 SDLoc SL(N); 8626 EVT EltVT = N->getValueType(0); 8627 SDValue Idx = N->getOperand(1); 8628 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 8629 Vec.getOperand(0), Idx); 8630 return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt); 8631 } 8632 8633 // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx) 8634 // => 8635 // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx) 8636 // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx) 8637 // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt 8638 if (Vec.hasOneUse() && DCI.isBeforeLegalize()) { 8639 SDLoc SL(N); 8640 EVT EltVT = N->getValueType(0); 8641 SDValue Idx = N->getOperand(1); 8642 unsigned Opc = Vec.getOpcode(); 8643 8644 switch(Opc) { 8645 default: 8646 break; 8647 // TODO: Support other binary operations. 8648 case ISD::FADD: 8649 case ISD::FSUB: 8650 case ISD::FMUL: 8651 case ISD::ADD: 8652 case ISD::UMIN: 8653 case ISD::UMAX: 8654 case ISD::SMIN: 8655 case ISD::SMAX: 8656 case ISD::FMAXNUM: 8657 case ISD::FMINNUM: 8658 case ISD::FMAXNUM_IEEE: 8659 case ISD::FMINNUM_IEEE: { 8660 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 8661 Vec.getOperand(0), Idx); 8662 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 8663 Vec.getOperand(1), Idx); 8664 8665 DCI.AddToWorklist(Elt0.getNode()); 8666 DCI.AddToWorklist(Elt1.getNode()); 8667 return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags()); 8668 } 8669 } 8670 } 8671 8672 unsigned VecSize = VecVT.getSizeInBits(); 8673 unsigned EltSize = EltVT.getSizeInBits(); 8674 8675 // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx) 8676 // This elminates non-constant index and subsequent movrel or scratch access. 8677 // Sub-dword vectors of size 2 dword or less have better implementation. 8678 // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32 8679 // instructions. 8680 if (VecSize <= 256 && (VecSize > 64 || EltSize >= 32) && 8681 !isa<ConstantSDNode>(N->getOperand(1))) { 8682 SDLoc SL(N); 8683 SDValue Idx = N->getOperand(1); 8684 EVT IdxVT = Idx.getValueType(); 8685 SDValue V; 8686 for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { 8687 SDValue IC = DAG.getConstant(I, SL, IdxVT); 8688 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); 8689 if (I == 0) 8690 V = Elt; 8691 else 8692 V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ); 8693 } 8694 return V; 8695 } 8696 8697 if (!DCI.isBeforeLegalize()) 8698 return SDValue(); 8699 8700 // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit 8701 // elements. This exposes more load reduction opportunities by replacing 8702 // multiple small extract_vector_elements with a single 32-bit extract. 8703 auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1)); 8704 if (isa<MemSDNode>(Vec) && 8705 EltSize <= 16 && 8706 EltVT.isByteSized() && 8707 VecSize > 32 && 8708 VecSize % 32 == 0 && 8709 Idx) { 8710 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT); 8711 8712 unsigned BitIndex = Idx->getZExtValue() * EltSize; 8713 unsigned EltIdx = BitIndex / 32; 8714 unsigned LeftoverBitIdx = BitIndex % 32; 8715 SDLoc SL(N); 8716 8717 SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec); 8718 DCI.AddToWorklist(Cast.getNode()); 8719 8720 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast, 8721 DAG.getConstant(EltIdx, SL, MVT::i32)); 8722 DCI.AddToWorklist(Elt.getNode()); 8723 SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt, 8724 DAG.getConstant(LeftoverBitIdx, SL, MVT::i32)); 8725 DCI.AddToWorklist(Srl.getNode()); 8726 8727 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl); 8728 DCI.AddToWorklist(Trunc.getNode()); 8729 return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc); 8730 } 8731 8732 return SDValue(); 8733 } 8734 8735 SDValue 8736 SITargetLowering::performInsertVectorEltCombine(SDNode *N, 8737 DAGCombinerInfo &DCI) const { 8738 SDValue Vec = N->getOperand(0); 8739 SDValue Idx = N->getOperand(2); 8740 EVT VecVT = Vec.getValueType(); 8741 EVT EltVT = VecVT.getVectorElementType(); 8742 unsigned VecSize = VecVT.getSizeInBits(); 8743 unsigned EltSize = EltVT.getSizeInBits(); 8744 8745 // INSERT_VECTOR_ELT (<n x e>, var-idx) 8746 // => BUILD_VECTOR n x select (e, const-idx) 8747 // This elminates non-constant index and subsequent movrel or scratch access. 8748 // Sub-dword vectors of size 2 dword or less have better implementation. 8749 // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32 8750 // instructions. 8751 if (isa<ConstantSDNode>(Idx) || 8752 VecSize > 256 || (VecSize <= 64 && EltSize < 32)) 8753 return SDValue(); 8754 8755 SelectionDAG &DAG = DCI.DAG; 8756 SDLoc SL(N); 8757 SDValue Ins = N->getOperand(1); 8758 EVT IdxVT = Idx.getValueType(); 8759 8760 SmallVector<SDValue, 16> Ops; 8761 for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { 8762 SDValue IC = DAG.getConstant(I, SL, IdxVT); 8763 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); 8764 SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ); 8765 Ops.push_back(V); 8766 } 8767 8768 return DAG.getBuildVector(VecVT, SL, Ops); 8769 } 8770 8771 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, 8772 const SDNode *N0, 8773 const SDNode *N1) const { 8774 EVT VT = N0->getValueType(0); 8775 8776 // Only do this if we are not trying to support denormals. v_mad_f32 does not 8777 // support denormals ever. 8778 if (((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) || 8779 (VT == MVT::f16 && !Subtarget->hasFP16Denormals() && 8780 getSubtarget()->hasMadF16())) && 8781 isOperationLegal(ISD::FMAD, VT)) 8782 return ISD::FMAD; 8783 8784 const TargetOptions &Options = DAG.getTarget().Options; 8785 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || 8786 (N0->getFlags().hasAllowContract() && 8787 N1->getFlags().hasAllowContract())) && 8788 isFMAFasterThanFMulAndFAdd(VT)) { 8789 return ISD::FMA; 8790 } 8791 8792 return 0; 8793 } 8794 8795 // For a reassociatable opcode perform: 8796 // op x, (op y, z) -> op (op x, z), y, if x and z are uniform 8797 SDValue SITargetLowering::reassociateScalarOps(SDNode *N, 8798 SelectionDAG &DAG) const { 8799 EVT VT = N->getValueType(0); 8800 if (VT != MVT::i32 && VT != MVT::i64) 8801 return SDValue(); 8802 8803 unsigned Opc = N->getOpcode(); 8804 SDValue Op0 = N->getOperand(0); 8805 SDValue Op1 = N->getOperand(1); 8806 8807 if (!(Op0->isDivergent() ^ Op1->isDivergent())) 8808 return SDValue(); 8809 8810 if (Op0->isDivergent()) 8811 std::swap(Op0, Op1); 8812 8813 if (Op1.getOpcode() != Opc || !Op1.hasOneUse()) 8814 return SDValue(); 8815 8816 SDValue Op2 = Op1.getOperand(1); 8817 Op1 = Op1.getOperand(0); 8818 if (!(Op1->isDivergent() ^ Op2->isDivergent())) 8819 return SDValue(); 8820 8821 if (Op1->isDivergent()) 8822 std::swap(Op1, Op2); 8823 8824 // If either operand is constant this will conflict with 8825 // DAGCombiner::ReassociateOps(). 8826 if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) || 8827 DAG.isConstantIntBuildVectorOrConstantInt(Op1)) 8828 return SDValue(); 8829 8830 SDLoc SL(N); 8831 SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1); 8832 return DAG.getNode(Opc, SL, VT, Add1, Op2); 8833 } 8834 8835 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, 8836 EVT VT, 8837 SDValue N0, SDValue N1, SDValue N2, 8838 bool Signed) { 8839 unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32; 8840 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1); 8841 SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2); 8842 return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad); 8843 } 8844 8845 SDValue SITargetLowering::performAddCombine(SDNode *N, 8846 DAGCombinerInfo &DCI) const { 8847 SelectionDAG &DAG = DCI.DAG; 8848 EVT VT = N->getValueType(0); 8849 SDLoc SL(N); 8850 SDValue LHS = N->getOperand(0); 8851 SDValue RHS = N->getOperand(1); 8852 8853 if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL) 8854 && Subtarget->hasMad64_32() && 8855 !VT.isVector() && VT.getScalarSizeInBits() > 32 && 8856 VT.getScalarSizeInBits() <= 64) { 8857 if (LHS.getOpcode() != ISD::MUL) 8858 std::swap(LHS, RHS); 8859 8860 SDValue MulLHS = LHS.getOperand(0); 8861 SDValue MulRHS = LHS.getOperand(1); 8862 SDValue AddRHS = RHS; 8863 8864 // TODO: Maybe restrict if SGPR inputs. 8865 if (numBitsUnsigned(MulLHS, DAG) <= 32 && 8866 numBitsUnsigned(MulRHS, DAG) <= 32) { 8867 MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32); 8868 MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32); 8869 AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64); 8870 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false); 8871 } 8872 8873 if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) { 8874 MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32); 8875 MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32); 8876 AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64); 8877 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true); 8878 } 8879 8880 return SDValue(); 8881 } 8882 8883 if (SDValue V = reassociateScalarOps(N, DAG)) { 8884 return V; 8885 } 8886 8887 if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG()) 8888 return SDValue(); 8889 8890 // add x, zext (setcc) => addcarry x, 0, setcc 8891 // add x, sext (setcc) => subcarry x, 0, setcc 8892 unsigned Opc = LHS.getOpcode(); 8893 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND || 8894 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY) 8895 std::swap(RHS, LHS); 8896 8897 Opc = RHS.getOpcode(); 8898 switch (Opc) { 8899 default: break; 8900 case ISD::ZERO_EXTEND: 8901 case ISD::SIGN_EXTEND: 8902 case ISD::ANY_EXTEND: { 8903 auto Cond = RHS.getOperand(0); 8904 if (!isBoolSGPR(Cond)) 8905 break; 8906 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); 8907 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; 8908 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY; 8909 return DAG.getNode(Opc, SL, VTList, Args); 8910 } 8911 case ISD::ADDCARRY: { 8912 // add x, (addcarry y, 0, cc) => addcarry x, y, cc 8913 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 8914 if (!C || C->getZExtValue() != 0) break; 8915 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) }; 8916 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args); 8917 } 8918 } 8919 return SDValue(); 8920 } 8921 8922 SDValue SITargetLowering::performSubCombine(SDNode *N, 8923 DAGCombinerInfo &DCI) const { 8924 SelectionDAG &DAG = DCI.DAG; 8925 EVT VT = N->getValueType(0); 8926 8927 if (VT != MVT::i32) 8928 return SDValue(); 8929 8930 SDLoc SL(N); 8931 SDValue LHS = N->getOperand(0); 8932 SDValue RHS = N->getOperand(1); 8933 8934 if (LHS.getOpcode() == ISD::SUBCARRY) { 8935 // sub (subcarry x, 0, cc), y => subcarry x, y, cc 8936 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 8937 if (!C || !C->isNullValue()) 8938 return SDValue(); 8939 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) }; 8940 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args); 8941 } 8942 return SDValue(); 8943 } 8944 8945 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N, 8946 DAGCombinerInfo &DCI) const { 8947 8948 if (N->getValueType(0) != MVT::i32) 8949 return SDValue(); 8950 8951 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 8952 if (!C || C->getZExtValue() != 0) 8953 return SDValue(); 8954 8955 SelectionDAG &DAG = DCI.DAG; 8956 SDValue LHS = N->getOperand(0); 8957 8958 // addcarry (add x, y), 0, cc => addcarry x, y, cc 8959 // subcarry (sub x, y), 0, cc => subcarry x, y, cc 8960 unsigned LHSOpc = LHS.getOpcode(); 8961 unsigned Opc = N->getOpcode(); 8962 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) || 8963 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) { 8964 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) }; 8965 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args); 8966 } 8967 return SDValue(); 8968 } 8969 8970 SDValue SITargetLowering::performFAddCombine(SDNode *N, 8971 DAGCombinerInfo &DCI) const { 8972 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 8973 return SDValue(); 8974 8975 SelectionDAG &DAG = DCI.DAG; 8976 EVT VT = N->getValueType(0); 8977 8978 SDLoc SL(N); 8979 SDValue LHS = N->getOperand(0); 8980 SDValue RHS = N->getOperand(1); 8981 8982 // These should really be instruction patterns, but writing patterns with 8983 // source modiifiers is a pain. 8984 8985 // fadd (fadd (a, a), b) -> mad 2.0, a, b 8986 if (LHS.getOpcode() == ISD::FADD) { 8987 SDValue A = LHS.getOperand(0); 8988 if (A == LHS.getOperand(1)) { 8989 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 8990 if (FusedOp != 0) { 8991 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 8992 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); 8993 } 8994 } 8995 } 8996 8997 // fadd (b, fadd (a, a)) -> mad 2.0, a, b 8998 if (RHS.getOpcode() == ISD::FADD) { 8999 SDValue A = RHS.getOperand(0); 9000 if (A == RHS.getOperand(1)) { 9001 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 9002 if (FusedOp != 0) { 9003 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 9004 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); 9005 } 9006 } 9007 } 9008 9009 return SDValue(); 9010 } 9011 9012 SDValue SITargetLowering::performFSubCombine(SDNode *N, 9013 DAGCombinerInfo &DCI) const { 9014 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 9015 return SDValue(); 9016 9017 SelectionDAG &DAG = DCI.DAG; 9018 SDLoc SL(N); 9019 EVT VT = N->getValueType(0); 9020 assert(!VT.isVector()); 9021 9022 // Try to get the fneg to fold into the source modifier. This undoes generic 9023 // DAG combines and folds them into the mad. 9024 // 9025 // Only do this if we are not trying to support denormals. v_mad_f32 does 9026 // not support denormals ever. 9027 SDValue LHS = N->getOperand(0); 9028 SDValue RHS = N->getOperand(1); 9029 if (LHS.getOpcode() == ISD::FADD) { 9030 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) 9031 SDValue A = LHS.getOperand(0); 9032 if (A == LHS.getOperand(1)) { 9033 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 9034 if (FusedOp != 0){ 9035 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 9036 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 9037 9038 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); 9039 } 9040 } 9041 } 9042 9043 if (RHS.getOpcode() == ISD::FADD) { 9044 // (fsub c, (fadd a, a)) -> mad -2.0, a, c 9045 9046 SDValue A = RHS.getOperand(0); 9047 if (A == RHS.getOperand(1)) { 9048 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 9049 if (FusedOp != 0){ 9050 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); 9051 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); 9052 } 9053 } 9054 } 9055 9056 return SDValue(); 9057 } 9058 9059 SDValue SITargetLowering::performFMACombine(SDNode *N, 9060 DAGCombinerInfo &DCI) const { 9061 SelectionDAG &DAG = DCI.DAG; 9062 EVT VT = N->getValueType(0); 9063 SDLoc SL(N); 9064 9065 if (!Subtarget->hasDot2Insts() || VT != MVT::f32) 9066 return SDValue(); 9067 9068 // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) -> 9069 // FDOT2((V2F16)S0, (V2F16)S1, (F32)z)) 9070 SDValue Op1 = N->getOperand(0); 9071 SDValue Op2 = N->getOperand(1); 9072 SDValue FMA = N->getOperand(2); 9073 9074 if (FMA.getOpcode() != ISD::FMA || 9075 Op1.getOpcode() != ISD::FP_EXTEND || 9076 Op2.getOpcode() != ISD::FP_EXTEND) 9077 return SDValue(); 9078 9079 // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero, 9080 // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract 9081 // is sufficient to allow generaing fdot2. 9082 const TargetOptions &Options = DAG.getTarget().Options; 9083 if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || 9084 (N->getFlags().hasAllowContract() && 9085 FMA->getFlags().hasAllowContract())) { 9086 Op1 = Op1.getOperand(0); 9087 Op2 = Op2.getOperand(0); 9088 if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9089 Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 9090 return SDValue(); 9091 9092 SDValue Vec1 = Op1.getOperand(0); 9093 SDValue Idx1 = Op1.getOperand(1); 9094 SDValue Vec2 = Op2.getOperand(0); 9095 9096 SDValue FMAOp1 = FMA.getOperand(0); 9097 SDValue FMAOp2 = FMA.getOperand(1); 9098 SDValue FMAAcc = FMA.getOperand(2); 9099 9100 if (FMAOp1.getOpcode() != ISD::FP_EXTEND || 9101 FMAOp2.getOpcode() != ISD::FP_EXTEND) 9102 return SDValue(); 9103 9104 FMAOp1 = FMAOp1.getOperand(0); 9105 FMAOp2 = FMAOp2.getOperand(0); 9106 if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9107 FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 9108 return SDValue(); 9109 9110 SDValue Vec3 = FMAOp1.getOperand(0); 9111 SDValue Vec4 = FMAOp2.getOperand(0); 9112 SDValue Idx2 = FMAOp1.getOperand(1); 9113 9114 if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) || 9115 // Idx1 and Idx2 cannot be the same. 9116 Idx1 == Idx2) 9117 return SDValue(); 9118 9119 if (Vec1 == Vec2 || Vec3 == Vec4) 9120 return SDValue(); 9121 9122 if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16) 9123 return SDValue(); 9124 9125 if ((Vec1 == Vec3 && Vec2 == Vec4) || 9126 (Vec1 == Vec4 && Vec2 == Vec3)) { 9127 return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc, 9128 DAG.getTargetConstant(0, SL, MVT::i1)); 9129 } 9130 } 9131 return SDValue(); 9132 } 9133 9134 SDValue SITargetLowering::performSetCCCombine(SDNode *N, 9135 DAGCombinerInfo &DCI) const { 9136 SelectionDAG &DAG = DCI.DAG; 9137 SDLoc SL(N); 9138 9139 SDValue LHS = N->getOperand(0); 9140 SDValue RHS = N->getOperand(1); 9141 EVT VT = LHS.getValueType(); 9142 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 9143 9144 auto CRHS = dyn_cast<ConstantSDNode>(RHS); 9145 if (!CRHS) { 9146 CRHS = dyn_cast<ConstantSDNode>(LHS); 9147 if (CRHS) { 9148 std::swap(LHS, RHS); 9149 CC = getSetCCSwappedOperands(CC); 9150 } 9151 } 9152 9153 if (CRHS) { 9154 if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND && 9155 isBoolSGPR(LHS.getOperand(0))) { 9156 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1 9157 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc 9158 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1 9159 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc 9160 if ((CRHS->isAllOnesValue() && 9161 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) || 9162 (CRHS->isNullValue() && 9163 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE))) 9164 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), 9165 DAG.getConstant(-1, SL, MVT::i1)); 9166 if ((CRHS->isAllOnesValue() && 9167 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) || 9168 (CRHS->isNullValue() && 9169 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT))) 9170 return LHS.getOperand(0); 9171 } 9172 9173 uint64_t CRHSVal = CRHS->getZExtValue(); 9174 if ((CC == ISD::SETEQ || CC == ISD::SETNE) && 9175 LHS.getOpcode() == ISD::SELECT && 9176 isa<ConstantSDNode>(LHS.getOperand(1)) && 9177 isa<ConstantSDNode>(LHS.getOperand(2)) && 9178 LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) && 9179 isBoolSGPR(LHS.getOperand(0))) { 9180 // Given CT != FT: 9181 // setcc (select cc, CT, CF), CF, eq => xor cc, -1 9182 // setcc (select cc, CT, CF), CF, ne => cc 9183 // setcc (select cc, CT, CF), CT, ne => xor cc, -1 9184 // setcc (select cc, CT, CF), CT, eq => cc 9185 uint64_t CT = LHS.getConstantOperandVal(1); 9186 uint64_t CF = LHS.getConstantOperandVal(2); 9187 9188 if ((CF == CRHSVal && CC == ISD::SETEQ) || 9189 (CT == CRHSVal && CC == ISD::SETNE)) 9190 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), 9191 DAG.getConstant(-1, SL, MVT::i1)); 9192 if ((CF == CRHSVal && CC == ISD::SETNE) || 9193 (CT == CRHSVal && CC == ISD::SETEQ)) 9194 return LHS.getOperand(0); 9195 } 9196 } 9197 9198 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && 9199 VT != MVT::f16)) 9200 return SDValue(); 9201 9202 // Match isinf/isfinite pattern 9203 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) 9204 // (fcmp one (fabs x), inf) -> (fp_class x, 9205 // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero) 9206 if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) { 9207 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 9208 if (!CRHS) 9209 return SDValue(); 9210 9211 const APFloat &APF = CRHS->getValueAPF(); 9212 if (APF.isInfinity() && !APF.isNegative()) { 9213 const unsigned IsInfMask = SIInstrFlags::P_INFINITY | 9214 SIInstrFlags::N_INFINITY; 9215 const unsigned IsFiniteMask = SIInstrFlags::N_ZERO | 9216 SIInstrFlags::P_ZERO | 9217 SIInstrFlags::N_NORMAL | 9218 SIInstrFlags::P_NORMAL | 9219 SIInstrFlags::N_SUBNORMAL | 9220 SIInstrFlags::P_SUBNORMAL; 9221 unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask; 9222 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), 9223 DAG.getConstant(Mask, SL, MVT::i32)); 9224 } 9225 } 9226 9227 return SDValue(); 9228 } 9229 9230 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, 9231 DAGCombinerInfo &DCI) const { 9232 SelectionDAG &DAG = DCI.DAG; 9233 SDLoc SL(N); 9234 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; 9235 9236 SDValue Src = N->getOperand(0); 9237 SDValue Srl = N->getOperand(0); 9238 if (Srl.getOpcode() == ISD::ZERO_EXTEND) 9239 Srl = Srl.getOperand(0); 9240 9241 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero. 9242 if (Srl.getOpcode() == ISD::SRL) { 9243 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x 9244 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x 9245 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x 9246 9247 if (const ConstantSDNode *C = 9248 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) { 9249 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)), 9250 EVT(MVT::i32)); 9251 9252 unsigned SrcOffset = C->getZExtValue() + 8 * Offset; 9253 if (SrcOffset < 32 && SrcOffset % 8 == 0) { 9254 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL, 9255 MVT::f32, Srl); 9256 } 9257 } 9258 } 9259 9260 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); 9261 9262 KnownBits Known; 9263 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 9264 !DCI.isBeforeLegalizeOps()); 9265 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9266 if (TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) { 9267 DCI.CommitTargetLoweringOpt(TLO); 9268 } 9269 9270 return SDValue(); 9271 } 9272 9273 SDValue SITargetLowering::performClampCombine(SDNode *N, 9274 DAGCombinerInfo &DCI) const { 9275 ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); 9276 if (!CSrc) 9277 return SDValue(); 9278 9279 const MachineFunction &MF = DCI.DAG.getMachineFunction(); 9280 const APFloat &F = CSrc->getValueAPF(); 9281 APFloat Zero = APFloat::getZero(F.getSemantics()); 9282 APFloat::cmpResult Cmp0 = F.compare(Zero); 9283 if (Cmp0 == APFloat::cmpLessThan || 9284 (Cmp0 == APFloat::cmpUnordered && 9285 MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) { 9286 return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0)); 9287 } 9288 9289 APFloat One(F.getSemantics(), "1.0"); 9290 APFloat::cmpResult Cmp1 = F.compare(One); 9291 if (Cmp1 == APFloat::cmpGreaterThan) 9292 return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0)); 9293 9294 return SDValue(CSrc, 0); 9295 } 9296 9297 9298 SDValue SITargetLowering::PerformDAGCombine(SDNode *N, 9299 DAGCombinerInfo &DCI) const { 9300 if (getTargetMachine().getOptLevel() == CodeGenOpt::None) 9301 return SDValue(); 9302 switch (N->getOpcode()) { 9303 default: 9304 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 9305 case ISD::ADD: 9306 return performAddCombine(N, DCI); 9307 case ISD::SUB: 9308 return performSubCombine(N, DCI); 9309 case ISD::ADDCARRY: 9310 case ISD::SUBCARRY: 9311 return performAddCarrySubCarryCombine(N, DCI); 9312 case ISD::FADD: 9313 return performFAddCombine(N, DCI); 9314 case ISD::FSUB: 9315 return performFSubCombine(N, DCI); 9316 case ISD::SETCC: 9317 return performSetCCCombine(N, DCI); 9318 case ISD::FMAXNUM: 9319 case ISD::FMINNUM: 9320 case ISD::FMAXNUM_IEEE: 9321 case ISD::FMINNUM_IEEE: 9322 case ISD::SMAX: 9323 case ISD::SMIN: 9324 case ISD::UMAX: 9325 case ISD::UMIN: 9326 case AMDGPUISD::FMIN_LEGACY: 9327 case AMDGPUISD::FMAX_LEGACY: 9328 return performMinMaxCombine(N, DCI); 9329 case ISD::FMA: 9330 return performFMACombine(N, DCI); 9331 case ISD::LOAD: { 9332 if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI)) 9333 return Widended; 9334 LLVM_FALLTHROUGH; 9335 } 9336 case ISD::STORE: 9337 case ISD::ATOMIC_LOAD: 9338 case ISD::ATOMIC_STORE: 9339 case ISD::ATOMIC_CMP_SWAP: 9340 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 9341 case ISD::ATOMIC_SWAP: 9342 case ISD::ATOMIC_LOAD_ADD: 9343 case ISD::ATOMIC_LOAD_SUB: 9344 case ISD::ATOMIC_LOAD_AND: 9345 case ISD::ATOMIC_LOAD_OR: 9346 case ISD::ATOMIC_LOAD_XOR: 9347 case ISD::ATOMIC_LOAD_NAND: 9348 case ISD::ATOMIC_LOAD_MIN: 9349 case ISD::ATOMIC_LOAD_MAX: 9350 case ISD::ATOMIC_LOAD_UMIN: 9351 case ISD::ATOMIC_LOAD_UMAX: 9352 case ISD::ATOMIC_LOAD_FADD: 9353 case AMDGPUISD::ATOMIC_INC: 9354 case AMDGPUISD::ATOMIC_DEC: 9355 case AMDGPUISD::ATOMIC_LOAD_FMIN: 9356 case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics. 9357 if (DCI.isBeforeLegalize()) 9358 break; 9359 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI); 9360 case ISD::AND: 9361 return performAndCombine(N, DCI); 9362 case ISD::OR: 9363 return performOrCombine(N, DCI); 9364 case ISD::XOR: 9365 return performXorCombine(N, DCI); 9366 case ISD::ZERO_EXTEND: 9367 return performZeroExtendCombine(N, DCI); 9368 case ISD::SIGN_EXTEND_INREG: 9369 return performSignExtendInRegCombine(N , DCI); 9370 case AMDGPUISD::FP_CLASS: 9371 return performClassCombine(N, DCI); 9372 case ISD::FCANONICALIZE: 9373 return performFCanonicalizeCombine(N, DCI); 9374 case AMDGPUISD::RCP: 9375 return performRcpCombine(N, DCI); 9376 case AMDGPUISD::FRACT: 9377 case AMDGPUISD::RSQ: 9378 case AMDGPUISD::RCP_LEGACY: 9379 case AMDGPUISD::RSQ_LEGACY: 9380 case AMDGPUISD::RCP_IFLAG: 9381 case AMDGPUISD::RSQ_CLAMP: 9382 case AMDGPUISD::LDEXP: { 9383 SDValue Src = N->getOperand(0); 9384 if (Src.isUndef()) 9385 return Src; 9386 break; 9387 } 9388 case ISD::SINT_TO_FP: 9389 case ISD::UINT_TO_FP: 9390 return performUCharToFloatCombine(N, DCI); 9391 case AMDGPUISD::CVT_F32_UBYTE0: 9392 case AMDGPUISD::CVT_F32_UBYTE1: 9393 case AMDGPUISD::CVT_F32_UBYTE2: 9394 case AMDGPUISD::CVT_F32_UBYTE3: 9395 return performCvtF32UByteNCombine(N, DCI); 9396 case AMDGPUISD::FMED3: 9397 return performFMed3Combine(N, DCI); 9398 case AMDGPUISD::CVT_PKRTZ_F16_F32: 9399 return performCvtPkRTZCombine(N, DCI); 9400 case AMDGPUISD::CLAMP: 9401 return performClampCombine(N, DCI); 9402 case ISD::SCALAR_TO_VECTOR: { 9403 SelectionDAG &DAG = DCI.DAG; 9404 EVT VT = N->getValueType(0); 9405 9406 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x)) 9407 if (VT == MVT::v2i16 || VT == MVT::v2f16) { 9408 SDLoc SL(N); 9409 SDValue Src = N->getOperand(0); 9410 EVT EltVT = Src.getValueType(); 9411 if (EltVT == MVT::f16) 9412 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src); 9413 9414 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src); 9415 return DAG.getNode(ISD::BITCAST, SL, VT, Ext); 9416 } 9417 9418 break; 9419 } 9420 case ISD::EXTRACT_VECTOR_ELT: 9421 return performExtractVectorEltCombine(N, DCI); 9422 case ISD::INSERT_VECTOR_ELT: 9423 return performInsertVectorEltCombine(N, DCI); 9424 } 9425 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 9426 } 9427 9428 /// Helper function for adjustWritemask 9429 static unsigned SubIdx2Lane(unsigned Idx) { 9430 switch (Idx) { 9431 default: return 0; 9432 case AMDGPU::sub0: return 0; 9433 case AMDGPU::sub1: return 1; 9434 case AMDGPU::sub2: return 2; 9435 case AMDGPU::sub3: return 3; 9436 case AMDGPU::sub4: return 4; // Possible with TFE/LWE 9437 } 9438 } 9439 9440 /// Adjust the writemask of MIMG instructions 9441 SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node, 9442 SelectionDAG &DAG) const { 9443 unsigned Opcode = Node->getMachineOpcode(); 9444 9445 // Subtract 1 because the vdata output is not a MachineSDNode operand. 9446 int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1; 9447 if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx)) 9448 return Node; // not implemented for D16 9449 9450 SDNode *Users[5] = { nullptr }; 9451 unsigned Lane = 0; 9452 unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1; 9453 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); 9454 unsigned NewDmask = 0; 9455 unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1; 9456 unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1; 9457 bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) || 9458 Node->getConstantOperandVal(LWEIdx)) ? 1 : 0; 9459 unsigned TFCLane = 0; 9460 bool HasChain = Node->getNumValues() > 1; 9461 9462 if (OldDmask == 0) { 9463 // These are folded out, but on the chance it happens don't assert. 9464 return Node; 9465 } 9466 9467 unsigned OldBitsSet = countPopulation(OldDmask); 9468 // Work out which is the TFE/LWE lane if that is enabled. 9469 if (UsesTFC) { 9470 TFCLane = OldBitsSet; 9471 } 9472 9473 // Try to figure out the used register components 9474 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); 9475 I != E; ++I) { 9476 9477 // Don't look at users of the chain. 9478 if (I.getUse().getResNo() != 0) 9479 continue; 9480 9481 // Abort if we can't understand the usage 9482 if (!I->isMachineOpcode() || 9483 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) 9484 return Node; 9485 9486 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used. 9487 // Note that subregs are packed, i.e. Lane==0 is the first bit set 9488 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 9489 // set, etc. 9490 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 9491 9492 // Check if the use is for the TFE/LWE generated result at VGPRn+1. 9493 if (UsesTFC && Lane == TFCLane) { 9494 Users[Lane] = *I; 9495 } else { 9496 // Set which texture component corresponds to the lane. 9497 unsigned Comp; 9498 for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) { 9499 Comp = countTrailingZeros(Dmask); 9500 Dmask &= ~(1 << Comp); 9501 } 9502 9503 // Abort if we have more than one user per component. 9504 if (Users[Lane]) 9505 return Node; 9506 9507 Users[Lane] = *I; 9508 NewDmask |= 1 << Comp; 9509 } 9510 } 9511 9512 // Don't allow 0 dmask, as hardware assumes one channel enabled. 9513 bool NoChannels = !NewDmask; 9514 if (NoChannels) { 9515 if (!UsesTFC) { 9516 // No uses of the result and not using TFC. Then do nothing. 9517 return Node; 9518 } 9519 // If the original dmask has one channel - then nothing to do 9520 if (OldBitsSet == 1) 9521 return Node; 9522 // Use an arbitrary dmask - required for the instruction to work 9523 NewDmask = 1; 9524 } 9525 // Abort if there's no change 9526 if (NewDmask == OldDmask) 9527 return Node; 9528 9529 unsigned BitsSet = countPopulation(NewDmask); 9530 9531 // Check for TFE or LWE - increase the number of channels by one to account 9532 // for the extra return value 9533 // This will need adjustment for D16 if this is also included in 9534 // adjustWriteMask (this function) but at present D16 are excluded. 9535 unsigned NewChannels = BitsSet + UsesTFC; 9536 9537 int NewOpcode = 9538 AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels); 9539 assert(NewOpcode != -1 && 9540 NewOpcode != static_cast<int>(Node->getMachineOpcode()) && 9541 "failed to find equivalent MIMG op"); 9542 9543 // Adjust the writemask in the node 9544 SmallVector<SDValue, 12> Ops; 9545 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); 9546 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); 9547 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); 9548 9549 MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT(); 9550 9551 MVT ResultVT = NewChannels == 1 ? 9552 SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 : 9553 NewChannels == 5 ? 8 : NewChannels); 9554 SDVTList NewVTList = HasChain ? 9555 DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT); 9556 9557 9558 MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node), 9559 NewVTList, Ops); 9560 9561 if (HasChain) { 9562 // Update chain. 9563 DAG.setNodeMemRefs(NewNode, Node->memoperands()); 9564 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1)); 9565 } 9566 9567 if (NewChannels == 1) { 9568 assert(Node->hasNUsesOfValue(1, 0)); 9569 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY, 9570 SDLoc(Node), Users[Lane]->getValueType(0), 9571 SDValue(NewNode, 0)); 9572 DAG.ReplaceAllUsesWith(Users[Lane], Copy); 9573 return nullptr; 9574 } 9575 9576 // Update the users of the node with the new indices 9577 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) { 9578 SDNode *User = Users[i]; 9579 if (!User) { 9580 // Handle the special case of NoChannels. We set NewDmask to 1 above, but 9581 // Users[0] is still nullptr because channel 0 doesn't really have a use. 9582 if (i || !NoChannels) 9583 continue; 9584 } else { 9585 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); 9586 DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op); 9587 } 9588 9589 switch (Idx) { 9590 default: break; 9591 case AMDGPU::sub0: Idx = AMDGPU::sub1; break; 9592 case AMDGPU::sub1: Idx = AMDGPU::sub2; break; 9593 case AMDGPU::sub2: Idx = AMDGPU::sub3; break; 9594 case AMDGPU::sub3: Idx = AMDGPU::sub4; break; 9595 } 9596 } 9597 9598 DAG.RemoveDeadNode(Node); 9599 return nullptr; 9600 } 9601 9602 static bool isFrameIndexOp(SDValue Op) { 9603 if (Op.getOpcode() == ISD::AssertZext) 9604 Op = Op.getOperand(0); 9605 9606 return isa<FrameIndexSDNode>(Op); 9607 } 9608 9609 /// Legalize target independent instructions (e.g. INSERT_SUBREG) 9610 /// with frame index operands. 9611 /// LLVM assumes that inputs are to these instructions are registers. 9612 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, 9613 SelectionDAG &DAG) const { 9614 if (Node->getOpcode() == ISD::CopyToReg) { 9615 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1)); 9616 SDValue SrcVal = Node->getOperand(2); 9617 9618 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have 9619 // to try understanding copies to physical registers. 9620 if (SrcVal.getValueType() == MVT::i1 && 9621 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) { 9622 SDLoc SL(Node); 9623 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 9624 SDValue VReg = DAG.getRegister( 9625 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1); 9626 9627 SDNode *Glued = Node->getGluedNode(); 9628 SDValue ToVReg 9629 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal, 9630 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0)); 9631 SDValue ToResultReg 9632 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0), 9633 VReg, ToVReg.getValue(1)); 9634 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode()); 9635 DAG.RemoveDeadNode(Node); 9636 return ToResultReg.getNode(); 9637 } 9638 } 9639 9640 SmallVector<SDValue, 8> Ops; 9641 for (unsigned i = 0; i < Node->getNumOperands(); ++i) { 9642 if (!isFrameIndexOp(Node->getOperand(i))) { 9643 Ops.push_back(Node->getOperand(i)); 9644 continue; 9645 } 9646 9647 SDLoc DL(Node); 9648 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, 9649 Node->getOperand(i).getValueType(), 9650 Node->getOperand(i)), 0)); 9651 } 9652 9653 return DAG.UpdateNodeOperands(Node, Ops); 9654 } 9655 9656 /// Fold the instructions after selecting them. 9657 /// Returns null if users were already updated. 9658 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, 9659 SelectionDAG &DAG) const { 9660 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 9661 unsigned Opcode = Node->getMachineOpcode(); 9662 9663 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && 9664 !TII->isGather4(Opcode)) { 9665 return adjustWritemask(Node, DAG); 9666 } 9667 9668 if (Opcode == AMDGPU::INSERT_SUBREG || 9669 Opcode == AMDGPU::REG_SEQUENCE) { 9670 legalizeTargetIndependentNode(Node, DAG); 9671 return Node; 9672 } 9673 9674 switch (Opcode) { 9675 case AMDGPU::V_DIV_SCALE_F32: 9676 case AMDGPU::V_DIV_SCALE_F64: { 9677 // Satisfy the operand register constraint when one of the inputs is 9678 // undefined. Ordinarily each undef value will have its own implicit_def of 9679 // a vreg, so force these to use a single register. 9680 SDValue Src0 = Node->getOperand(0); 9681 SDValue Src1 = Node->getOperand(1); 9682 SDValue Src2 = Node->getOperand(2); 9683 9684 if ((Src0.isMachineOpcode() && 9685 Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) && 9686 (Src0 == Src1 || Src0 == Src2)) 9687 break; 9688 9689 MVT VT = Src0.getValueType().getSimpleVT(); 9690 const TargetRegisterClass *RC = 9691 getRegClassFor(VT, Src0.getNode()->isDivergent()); 9692 9693 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 9694 SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT); 9695 9696 SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node), 9697 UndefReg, Src0, SDValue()); 9698 9699 // src0 must be the same register as src1 or src2, even if the value is 9700 // undefined, so make sure we don't violate this constraint. 9701 if (Src0.isMachineOpcode() && 9702 Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) { 9703 if (Src1.isMachineOpcode() && 9704 Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) 9705 Src0 = Src1; 9706 else if (Src2.isMachineOpcode() && 9707 Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) 9708 Src0 = Src2; 9709 else { 9710 assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF); 9711 Src0 = UndefReg; 9712 Src1 = UndefReg; 9713 } 9714 } else 9715 break; 9716 9717 SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 }; 9718 for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I) 9719 Ops.push_back(Node->getOperand(I)); 9720 9721 Ops.push_back(ImpDef.getValue(1)); 9722 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); 9723 } 9724 case AMDGPU::V_PERMLANE16_B32: 9725 case AMDGPU::V_PERMLANEX16_B32: { 9726 ConstantSDNode *FI = cast<ConstantSDNode>(Node->getOperand(0)); 9727 ConstantSDNode *BC = cast<ConstantSDNode>(Node->getOperand(2)); 9728 if (!FI->getZExtValue() && !BC->getZExtValue()) 9729 break; 9730 SDValue VDstIn = Node->getOperand(6); 9731 if (VDstIn.isMachineOpcode() 9732 && VDstIn.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) 9733 break; 9734 MachineSDNode *ImpDef = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 9735 SDLoc(Node), MVT::i32); 9736 SmallVector<SDValue, 8> Ops = { SDValue(FI, 0), Node->getOperand(1), 9737 SDValue(BC, 0), Node->getOperand(3), 9738 Node->getOperand(4), Node->getOperand(5), 9739 SDValue(ImpDef, 0), Node->getOperand(7) }; 9740 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); 9741 } 9742 default: 9743 break; 9744 } 9745 9746 return Node; 9747 } 9748 9749 /// Assign the register class depending on the number of 9750 /// bits set in the writemask 9751 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 9752 SDNode *Node) const { 9753 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 9754 9755 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 9756 9757 if (TII->isVOP3(MI.getOpcode())) { 9758 // Make sure constant bus requirements are respected. 9759 TII->legalizeOperandsVOP3(MRI, MI); 9760 return; 9761 } 9762 9763 // Replace unused atomics with the no return version. 9764 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); 9765 if (NoRetAtomicOp != -1) { 9766 if (!Node->hasAnyUseOfValue(0)) { 9767 MI.setDesc(TII->get(NoRetAtomicOp)); 9768 MI.RemoveOperand(0); 9769 return; 9770 } 9771 9772 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg 9773 // instruction, because the return type of these instructions is a vec2 of 9774 // the memory type, so it can be tied to the input operand. 9775 // This means these instructions always have a use, so we need to add a 9776 // special case to check if the atomic has only one extract_subreg use, 9777 // which itself has no uses. 9778 if ((Node->hasNUsesOfValue(1, 0) && 9779 Node->use_begin()->isMachineOpcode() && 9780 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && 9781 !Node->use_begin()->hasAnyUseOfValue(0))) { 9782 unsigned Def = MI.getOperand(0).getReg(); 9783 9784 // Change this into a noret atomic. 9785 MI.setDesc(TII->get(NoRetAtomicOp)); 9786 MI.RemoveOperand(0); 9787 9788 // If we only remove the def operand from the atomic instruction, the 9789 // extract_subreg will be left with a use of a vreg without a def. 9790 // So we need to insert an implicit_def to avoid machine verifier 9791 // errors. 9792 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), 9793 TII->get(AMDGPU::IMPLICIT_DEF), Def); 9794 } 9795 return; 9796 } 9797 } 9798 9799 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, 9800 uint64_t Val) { 9801 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); 9802 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); 9803 } 9804 9805 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, 9806 const SDLoc &DL, 9807 SDValue Ptr) const { 9808 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 9809 9810 // Build the half of the subregister with the constants before building the 9811 // full 128-bit register. If we are building multiple resource descriptors, 9812 // this will allow CSEing of the 2-component register. 9813 const SDValue Ops0[] = { 9814 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), 9815 buildSMovImm32(DAG, DL, 0), 9816 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 9817 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), 9818 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) 9819 }; 9820 9821 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, 9822 MVT::v2i32, Ops0), 0); 9823 9824 // Combine the constants and the pointer. 9825 const SDValue Ops1[] = { 9826 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 9827 Ptr, 9828 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), 9829 SubRegHi, 9830 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) 9831 }; 9832 9833 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); 9834 } 9835 9836 /// Return a resource descriptor with the 'Add TID' bit enabled 9837 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] 9838 /// of the resource descriptor) to create an offset, which is added to 9839 /// the resource pointer. 9840 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, 9841 SDValue Ptr, uint32_t RsrcDword1, 9842 uint64_t RsrcDword2And3) const { 9843 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); 9844 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); 9845 if (RsrcDword1) { 9846 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, 9847 DAG.getConstant(RsrcDword1, DL, MVT::i32)), 9848 0); 9849 } 9850 9851 SDValue DataLo = buildSMovImm32(DAG, DL, 9852 RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); 9853 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); 9854 9855 const SDValue Ops[] = { 9856 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 9857 PtrLo, 9858 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 9859 PtrHi, 9860 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), 9861 DataLo, 9862 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), 9863 DataHi, 9864 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) 9865 }; 9866 9867 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); 9868 } 9869 9870 //===----------------------------------------------------------------------===// 9871 // SI Inline Assembly Support 9872 //===----------------------------------------------------------------------===// 9873 9874 std::pair<unsigned, const TargetRegisterClass *> 9875 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 9876 StringRef Constraint, 9877 MVT VT) const { 9878 const TargetRegisterClass *RC = nullptr; 9879 if (Constraint.size() == 1) { 9880 switch (Constraint[0]) { 9881 default: 9882 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 9883 case 's': 9884 case 'r': 9885 switch (VT.getSizeInBits()) { 9886 default: 9887 return std::make_pair(0U, nullptr); 9888 case 32: 9889 case 16: 9890 RC = &AMDGPU::SReg_32_XM0RegClass; 9891 break; 9892 case 64: 9893 RC = &AMDGPU::SGPR_64RegClass; 9894 break; 9895 case 96: 9896 RC = &AMDGPU::SReg_96RegClass; 9897 break; 9898 case 128: 9899 RC = &AMDGPU::SReg_128RegClass; 9900 break; 9901 case 160: 9902 RC = &AMDGPU::SReg_160RegClass; 9903 break; 9904 case 256: 9905 RC = &AMDGPU::SReg_256RegClass; 9906 break; 9907 case 512: 9908 RC = &AMDGPU::SReg_512RegClass; 9909 break; 9910 } 9911 break; 9912 case 'v': 9913 switch (VT.getSizeInBits()) { 9914 default: 9915 return std::make_pair(0U, nullptr); 9916 case 32: 9917 case 16: 9918 RC = &AMDGPU::VGPR_32RegClass; 9919 break; 9920 case 64: 9921 RC = &AMDGPU::VReg_64RegClass; 9922 break; 9923 case 96: 9924 RC = &AMDGPU::VReg_96RegClass; 9925 break; 9926 case 128: 9927 RC = &AMDGPU::VReg_128RegClass; 9928 break; 9929 case 160: 9930 RC = &AMDGPU::VReg_160RegClass; 9931 break; 9932 case 256: 9933 RC = &AMDGPU::VReg_256RegClass; 9934 break; 9935 case 512: 9936 RC = &AMDGPU::VReg_512RegClass; 9937 break; 9938 } 9939 break; 9940 } 9941 // We actually support i128, i16 and f16 as inline parameters 9942 // even if they are not reported as legal 9943 if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 || 9944 VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16)) 9945 return std::make_pair(0U, RC); 9946 } 9947 9948 if (Constraint.size() > 1) { 9949 if (Constraint[1] == 'v') { 9950 RC = &AMDGPU::VGPR_32RegClass; 9951 } else if (Constraint[1] == 's') { 9952 RC = &AMDGPU::SGPR_32RegClass; 9953 } 9954 9955 if (RC) { 9956 uint32_t Idx; 9957 bool Failed = Constraint.substr(2).getAsInteger(10, Idx); 9958 if (!Failed && Idx < RC->getNumRegs()) 9959 return std::make_pair(RC->getRegister(Idx), RC); 9960 } 9961 } 9962 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 9963 } 9964 9965 SITargetLowering::ConstraintType 9966 SITargetLowering::getConstraintType(StringRef Constraint) const { 9967 if (Constraint.size() == 1) { 9968 switch (Constraint[0]) { 9969 default: break; 9970 case 's': 9971 case 'v': 9972 return C_RegisterClass; 9973 } 9974 } 9975 return TargetLowering::getConstraintType(Constraint); 9976 } 9977 9978 // Figure out which registers should be reserved for stack access. Only after 9979 // the function is legalized do we know all of the non-spill stack objects or if 9980 // calls are present. 9981 void SITargetLowering::finalizeLowering(MachineFunction &MF) const { 9982 MachineRegisterInfo &MRI = MF.getRegInfo(); 9983 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 9984 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 9985 9986 if (Info->isEntryFunction()) { 9987 // Callable functions have fixed registers used for stack access. 9988 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info); 9989 } 9990 9991 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(), 9992 Info->getStackPtrOffsetReg())); 9993 if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG) 9994 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg()); 9995 9996 // We need to worry about replacing the default register with itself in case 9997 // of MIR testcases missing the MFI. 9998 if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG) 9999 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg()); 10000 10001 if (Info->getFrameOffsetReg() != AMDGPU::FP_REG) 10002 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg()); 10003 10004 if (Info->getScratchWaveOffsetReg() != AMDGPU::SCRATCH_WAVE_OFFSET_REG) { 10005 MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG, 10006 Info->getScratchWaveOffsetReg()); 10007 } 10008 10009 Info->limitOccupancy(MF); 10010 10011 TargetLoweringBase::finalizeLowering(MF); 10012 } 10013 10014 void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op, 10015 KnownBits &Known, 10016 const APInt &DemandedElts, 10017 const SelectionDAG &DAG, 10018 unsigned Depth) const { 10019 TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts, 10020 DAG, Depth); 10021 10022 // Set the high bits to zero based on the maximum allowed scratch size per 10023 // wave. We can't use vaddr in MUBUF instructions if we don't know the address 10024 // calculation won't overflow, so assume the sign bit is never set. 10025 Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex()); 10026 } 10027 10028 unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 10029 const unsigned PrefAlign = TargetLowering::getPrefLoopAlignment(ML); 10030 const unsigned CacheLineAlign = 6; // log2(64) 10031 10032 // Pre-GFX10 target did not benefit from loop alignment 10033 if (!ML || DisableLoopAlignment || 10034 (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) || 10035 getSubtarget()->hasInstFwdPrefetchBug()) 10036 return PrefAlign; 10037 10038 // On GFX10 I$ is 4 x 64 bytes cache lines. 10039 // By default prefetcher keeps one cache line behind and reads two ahead. 10040 // We can modify it with S_INST_PREFETCH for larger loops to have two lines 10041 // behind and one ahead. 10042 // Therefor we can benefit from aligning loop headers if loop fits 192 bytes. 10043 // If loop fits 64 bytes it always spans no more than two cache lines and 10044 // does not need an alignment. 10045 // Else if loop is less or equal 128 bytes we do not need to modify prefetch, 10046 // Else if loop is less or equal 192 bytes we need two lines behind. 10047 10048 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 10049 const MachineBasicBlock *Header = ML->getHeader(); 10050 if (Header->getAlignment() != PrefAlign) 10051 return Header->getAlignment(); // Already processed. 10052 10053 unsigned LoopSize = 0; 10054 for (const MachineBasicBlock *MBB : ML->blocks()) { 10055 // If inner loop block is aligned assume in average half of the alignment 10056 // size to be added as nops. 10057 if (MBB != Header) 10058 LoopSize += (1 << MBB->getAlignment()) / 2; 10059 10060 for (const MachineInstr &MI : *MBB) { 10061 LoopSize += TII->getInstSizeInBytes(MI); 10062 if (LoopSize > 192) 10063 return PrefAlign; 10064 } 10065 } 10066 10067 if (LoopSize <= 64) 10068 return PrefAlign; 10069 10070 if (LoopSize <= 128) 10071 return CacheLineAlign; 10072 10073 // If any of parent loops is surrounded by prefetch instructions do not 10074 // insert new for inner loop, which would reset parent's settings. 10075 for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) { 10076 if (MachineBasicBlock *Exit = P->getExitBlock()) { 10077 auto I = Exit->getFirstNonDebugInstr(); 10078 if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH) 10079 return CacheLineAlign; 10080 } 10081 } 10082 10083 MachineBasicBlock *Pre = ML->getLoopPreheader(); 10084 MachineBasicBlock *Exit = ML->getExitBlock(); 10085 10086 if (Pre && Exit) { 10087 BuildMI(*Pre, Pre->getFirstTerminator(), DebugLoc(), 10088 TII->get(AMDGPU::S_INST_PREFETCH)) 10089 .addImm(1); // prefetch 2 lines behind PC 10090 10091 BuildMI(*Exit, Exit->getFirstNonDebugInstr(), DebugLoc(), 10092 TII->get(AMDGPU::S_INST_PREFETCH)) 10093 .addImm(2); // prefetch 1 line behind PC 10094 } 10095 10096 return CacheLineAlign; 10097 } 10098 10099 LLVM_ATTRIBUTE_UNUSED 10100 static bool isCopyFromRegOfInlineAsm(const SDNode *N) { 10101 assert(N->getOpcode() == ISD::CopyFromReg); 10102 do { 10103 // Follow the chain until we find an INLINEASM node. 10104 N = N->getOperand(0).getNode(); 10105 if (N->getOpcode() == ISD::INLINEASM || 10106 N->getOpcode() == ISD::INLINEASM_BR) 10107 return true; 10108 } while (N->getOpcode() == ISD::CopyFromReg); 10109 return false; 10110 } 10111 10112 bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N, 10113 FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const 10114 { 10115 switch (N->getOpcode()) { 10116 case ISD::CopyFromReg: 10117 { 10118 const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1)); 10119 const MachineFunction * MF = FLI->MF; 10120 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 10121 const MachineRegisterInfo &MRI = MF->getRegInfo(); 10122 const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo(); 10123 unsigned Reg = R->getReg(); 10124 if (TRI.isPhysicalRegister(Reg)) 10125 return !TRI.isSGPRReg(MRI, Reg); 10126 10127 if (MRI.isLiveIn(Reg)) { 10128 // workitem.id.x workitem.id.y workitem.id.z 10129 // Any VGPR formal argument is also considered divergent 10130 if (!TRI.isSGPRReg(MRI, Reg)) 10131 return true; 10132 // Formal arguments of non-entry functions 10133 // are conservatively considered divergent 10134 else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv())) 10135 return true; 10136 return false; 10137 } 10138 const Value *V = FLI->getValueFromVirtualReg(Reg); 10139 if (V) 10140 return KDA->isDivergent(V); 10141 assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N)); 10142 return !TRI.isSGPRReg(MRI, Reg); 10143 } 10144 break; 10145 case ISD::LOAD: { 10146 const LoadSDNode *L = cast<LoadSDNode>(N); 10147 unsigned AS = L->getAddressSpace(); 10148 // A flat load may access private memory. 10149 return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS; 10150 } break; 10151 case ISD::CALLSEQ_END: 10152 return true; 10153 break; 10154 case ISD::INTRINSIC_WO_CHAIN: 10155 { 10156 10157 } 10158 return AMDGPU::isIntrinsicSourceOfDivergence( 10159 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()); 10160 case ISD::INTRINSIC_W_CHAIN: 10161 return AMDGPU::isIntrinsicSourceOfDivergence( 10162 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()); 10163 // In some cases intrinsics that are a source of divergence have been 10164 // lowered to AMDGPUISD so we also need to check those too. 10165 case AMDGPUISD::INTERP_MOV: 10166 case AMDGPUISD::INTERP_P1: 10167 case AMDGPUISD::INTERP_P2: 10168 return true; 10169 } 10170 return false; 10171 } 10172 10173 bool SITargetLowering::denormalsEnabledForType(EVT VT) const { 10174 switch (VT.getScalarType().getSimpleVT().SimpleTy) { 10175 case MVT::f32: 10176 return Subtarget->hasFP32Denormals(); 10177 case MVT::f64: 10178 return Subtarget->hasFP64Denormals(); 10179 case MVT::f16: 10180 return Subtarget->hasFP16Denormals(); 10181 default: 10182 return false; 10183 } 10184 } 10185 10186 bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 10187 const SelectionDAG &DAG, 10188 bool SNaN, 10189 unsigned Depth) const { 10190 if (Op.getOpcode() == AMDGPUISD::CLAMP) { 10191 const MachineFunction &MF = DAG.getMachineFunction(); 10192 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 10193 10194 if (Info->getMode().DX10Clamp) 10195 return true; // Clamped to 0. 10196 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 10197 } 10198 10199 return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG, 10200 SNaN, Depth); 10201 } 10202 10203 TargetLowering::AtomicExpansionKind 10204 SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { 10205 switch (RMW->getOperation()) { 10206 case AtomicRMWInst::FAdd: { 10207 Type *Ty = RMW->getType(); 10208 10209 // We don't have a way to support 16-bit atomics now, so just leave them 10210 // as-is. 10211 if (Ty->isHalfTy()) 10212 return AtomicExpansionKind::None; 10213 10214 if (!Ty->isFloatTy()) 10215 return AtomicExpansionKind::CmpXChg; 10216 10217 // TODO: Do have these for flat. Older targets also had them for buffers. 10218 unsigned AS = RMW->getPointerAddressSpace(); 10219 return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ? 10220 AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg; 10221 } 10222 default: 10223 break; 10224 } 10225 10226 return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW); 10227 } 10228