1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This is the parent TargetLowering class for hardware code gen 11 /// targets. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define AMDGPU_LOG2E_F 1.44269504088896340735992468100189214f 16 #define AMDGPU_LN2_F 0.693147180559945309417232121458176568f 17 #define AMDGPU_LN10_F 2.30258509299404568401799145468436421f 18 19 #include "AMDGPUISelLowering.h" 20 #include "AMDGPU.h" 21 #include "AMDGPUCallLowering.h" 22 #include "AMDGPUFrameLowering.h" 23 #include "AMDGPURegisterInfo.h" 24 #include "AMDGPUSubtarget.h" 25 #include "AMDGPUTargetMachine.h" 26 #include "Utils/AMDGPUBaseInfo.h" 27 #include "R600MachineFunctionInfo.h" 28 #include "SIInstrInfo.h" 29 #include "SIMachineFunctionInfo.h" 30 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 31 #include "llvm/CodeGen/Analysis.h" 32 #include "llvm/CodeGen/CallingConvLower.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/SelectionDAG.h" 36 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/DiagnosticInfo.h" 39 #include "llvm/Support/KnownBits.h" 40 using namespace llvm; 41 42 static bool allocateCCRegs(unsigned ValNo, MVT ValVT, MVT LocVT, 43 CCValAssign::LocInfo LocInfo, 44 ISD::ArgFlagsTy ArgFlags, CCState &State, 45 const TargetRegisterClass *RC, 46 unsigned NumRegs) { 47 ArrayRef<MCPhysReg> RegList = makeArrayRef(RC->begin(), NumRegs); 48 unsigned RegResult = State.AllocateReg(RegList); 49 if (RegResult == AMDGPU::NoRegister) 50 return false; 51 52 State.addLoc(CCValAssign::getReg(ValNo, ValVT, RegResult, LocVT, LocInfo)); 53 return true; 54 } 55 56 static bool allocateSGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT, 57 CCValAssign::LocInfo LocInfo, 58 ISD::ArgFlagsTy ArgFlags, CCState &State) { 59 switch (LocVT.SimpleTy) { 60 case MVT::i64: 61 case MVT::f64: 62 case MVT::v2i32: 63 case MVT::v2f32: 64 case MVT::v4i16: 65 case MVT::v4f16: { 66 // Up to SGPR0-SGPR105 67 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 68 &AMDGPU::SGPR_64RegClass, 53); 69 } 70 default: 71 return false; 72 } 73 } 74 75 // Allocate up to VGPR31. 76 // 77 // TODO: Since there are no VGPR alignent requirements would it be better to 78 // split into individual scalar registers? 79 static bool allocateVGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT, 80 CCValAssign::LocInfo LocInfo, 81 ISD::ArgFlagsTy ArgFlags, CCState &State) { 82 switch (LocVT.SimpleTy) { 83 case MVT::i64: 84 case MVT::f64: 85 case MVT::v2i32: 86 case MVT::v2f32: 87 case MVT::v4i16: 88 case MVT::v4f16: { 89 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 90 &AMDGPU::VReg_64RegClass, 31); 91 } 92 case MVT::v4i32: 93 case MVT::v4f32: 94 case MVT::v2i64: 95 case MVT::v2f64: { 96 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 97 &AMDGPU::VReg_128RegClass, 29); 98 } 99 case MVT::v8i32: 100 case MVT::v8f32: { 101 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 102 &AMDGPU::VReg_256RegClass, 25); 103 104 } 105 case MVT::v16i32: 106 case MVT::v16f32: { 107 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 108 &AMDGPU::VReg_512RegClass, 17); 109 110 } 111 default: 112 return false; 113 } 114 } 115 116 #include "AMDGPUGenCallingConv.inc" 117 118 // Find a larger type to do a load / store of a vector with. 119 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) { 120 unsigned StoreSize = VT.getStoreSizeInBits(); 121 if (StoreSize <= 32) 122 return EVT::getIntegerVT(Ctx, StoreSize); 123 124 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32"); 125 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32); 126 } 127 128 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) { 129 EVT VT = Op.getValueType(); 130 KnownBits Known = DAG.computeKnownBits(Op); 131 return VT.getSizeInBits() - Known.countMinLeadingZeros(); 132 } 133 134 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) { 135 EVT VT = Op.getValueType(); 136 137 // In order for this to be a signed 24-bit value, bit 23, must 138 // be a sign bit. 139 return VT.getSizeInBits() - DAG.ComputeNumSignBits(Op); 140 } 141 142 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM, 143 const AMDGPUSubtarget &STI) 144 : TargetLowering(TM), Subtarget(&STI) { 145 // Lower floating point store/load to integer store/load to reduce the number 146 // of patterns in tablegen. 147 setOperationAction(ISD::LOAD, MVT::f32, Promote); 148 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32); 149 150 setOperationAction(ISD::LOAD, MVT::v2f32, Promote); 151 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32); 152 153 setOperationAction(ISD::LOAD, MVT::v3f32, Promote); 154 AddPromotedToType(ISD::LOAD, MVT::v3f32, MVT::v3i32); 155 156 setOperationAction(ISD::LOAD, MVT::v4f32, Promote); 157 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32); 158 159 setOperationAction(ISD::LOAD, MVT::v5f32, Promote); 160 AddPromotedToType(ISD::LOAD, MVT::v5f32, MVT::v5i32); 161 162 setOperationAction(ISD::LOAD, MVT::v8f32, Promote); 163 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32); 164 165 setOperationAction(ISD::LOAD, MVT::v16f32, Promote); 166 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32); 167 168 setOperationAction(ISD::LOAD, MVT::i64, Promote); 169 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32); 170 171 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 172 AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32); 173 174 setOperationAction(ISD::LOAD, MVT::f64, Promote); 175 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32); 176 177 setOperationAction(ISD::LOAD, MVT::v2f64, Promote); 178 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32); 179 180 // There are no 64-bit extloads. These should be done as a 32-bit extload and 181 // an extension to 64-bit. 182 for (MVT VT : MVT::integer_valuetypes()) { 183 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand); 184 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand); 185 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand); 186 } 187 188 for (MVT VT : MVT::integer_valuetypes()) { 189 if (VT == MVT::i64) 190 continue; 191 192 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 193 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal); 194 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal); 195 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand); 196 197 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 198 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal); 199 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal); 200 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand); 201 202 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 203 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal); 204 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal); 205 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand); 206 } 207 208 for (MVT VT : MVT::integer_vector_valuetypes()) { 209 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand); 210 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand); 211 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand); 212 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand); 213 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand); 214 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand); 215 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand); 216 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand); 217 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand); 218 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand); 219 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand); 220 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand); 221 } 222 223 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 224 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand); 225 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand); 226 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand); 227 228 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 229 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand); 230 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand); 231 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand); 232 233 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 234 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand); 235 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand); 236 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand); 237 238 setOperationAction(ISD::STORE, MVT::f32, Promote); 239 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32); 240 241 setOperationAction(ISD::STORE, MVT::v2f32, Promote); 242 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32); 243 244 setOperationAction(ISD::STORE, MVT::v3f32, Promote); 245 AddPromotedToType(ISD::STORE, MVT::v3f32, MVT::v3i32); 246 247 setOperationAction(ISD::STORE, MVT::v4f32, Promote); 248 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32); 249 250 setOperationAction(ISD::STORE, MVT::v5f32, Promote); 251 AddPromotedToType(ISD::STORE, MVT::v5f32, MVT::v5i32); 252 253 setOperationAction(ISD::STORE, MVT::v8f32, Promote); 254 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32); 255 256 setOperationAction(ISD::STORE, MVT::v16f32, Promote); 257 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32); 258 259 setOperationAction(ISD::STORE, MVT::i64, Promote); 260 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32); 261 262 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 263 AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32); 264 265 setOperationAction(ISD::STORE, MVT::f64, Promote); 266 AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32); 267 268 setOperationAction(ISD::STORE, MVT::v2f64, Promote); 269 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32); 270 271 setTruncStoreAction(MVT::i64, MVT::i1, Expand); 272 setTruncStoreAction(MVT::i64, MVT::i8, Expand); 273 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 274 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 275 276 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand); 277 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand); 278 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand); 279 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand); 280 281 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 282 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand); 283 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand); 284 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand); 285 286 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 287 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 288 289 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 290 setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand); 291 292 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand); 293 setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand); 294 295 setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand); 296 setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand); 297 298 299 setOperationAction(ISD::Constant, MVT::i32, Legal); 300 setOperationAction(ISD::Constant, MVT::i64, Legal); 301 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 302 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 303 304 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 305 setOperationAction(ISD::BRIND, MVT::Other, Expand); 306 307 // This is totally unsupported, just custom lower to produce an error. 308 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 309 310 // Library functions. These default to Expand, but we have instructions 311 // for them. 312 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 313 setOperationAction(ISD::FEXP2, MVT::f32, Legal); 314 setOperationAction(ISD::FPOW, MVT::f32, Legal); 315 setOperationAction(ISD::FLOG2, MVT::f32, Legal); 316 setOperationAction(ISD::FABS, MVT::f32, Legal); 317 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 318 setOperationAction(ISD::FRINT, MVT::f32, Legal); 319 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 320 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 321 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 322 323 setOperationAction(ISD::FROUND, MVT::f32, Custom); 324 setOperationAction(ISD::FROUND, MVT::f64, Custom); 325 326 setOperationAction(ISD::FLOG, MVT::f32, Custom); 327 setOperationAction(ISD::FLOG10, MVT::f32, Custom); 328 setOperationAction(ISD::FEXP, MVT::f32, Custom); 329 330 331 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom); 332 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom); 333 334 setOperationAction(ISD::FREM, MVT::f32, Custom); 335 setOperationAction(ISD::FREM, MVT::f64, Custom); 336 337 // Expand to fneg + fadd. 338 setOperationAction(ISD::FSUB, MVT::f64, Expand); 339 340 setOperationAction(ISD::CONCAT_VECTORS, MVT::v3i32, Custom); 341 setOperationAction(ISD::CONCAT_VECTORS, MVT::v3f32, Custom); 342 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); 343 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom); 344 setOperationAction(ISD::CONCAT_VECTORS, MVT::v5i32, Custom); 345 setOperationAction(ISD::CONCAT_VECTORS, MVT::v5f32, Custom); 346 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom); 347 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom); 348 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom); 349 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom); 350 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3f32, Custom); 351 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3i32, Custom); 352 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom); 353 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom); 354 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5f32, Custom); 355 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5i32, Custom); 356 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom); 357 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom); 358 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i32, Custom); 359 360 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 361 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom); 362 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom); 363 364 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 365 for (MVT VT : ScalarIntVTs) { 366 // These should use [SU]DIVREM, so set them to expand 367 setOperationAction(ISD::SDIV, VT, Expand); 368 setOperationAction(ISD::UDIV, VT, Expand); 369 setOperationAction(ISD::SREM, VT, Expand); 370 setOperationAction(ISD::UREM, VT, Expand); 371 372 // GPU does not have divrem function for signed or unsigned. 373 setOperationAction(ISD::SDIVREM, VT, Custom); 374 setOperationAction(ISD::UDIVREM, VT, Custom); 375 376 // GPU does not have [S|U]MUL_LOHI functions as a single instruction. 377 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 378 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 379 380 setOperationAction(ISD::BSWAP, VT, Expand); 381 setOperationAction(ISD::CTTZ, VT, Expand); 382 setOperationAction(ISD::CTLZ, VT, Expand); 383 384 // AMDGPU uses ADDC/SUBC/ADDE/SUBE 385 setOperationAction(ISD::ADDC, VT, Legal); 386 setOperationAction(ISD::SUBC, VT, Legal); 387 setOperationAction(ISD::ADDE, VT, Legal); 388 setOperationAction(ISD::SUBE, VT, Legal); 389 } 390 391 // The hardware supports 32-bit ROTR, but not ROTL. 392 setOperationAction(ISD::ROTL, MVT::i32, Expand); 393 setOperationAction(ISD::ROTL, MVT::i64, Expand); 394 setOperationAction(ISD::ROTR, MVT::i64, Expand); 395 396 setOperationAction(ISD::MUL, MVT::i64, Expand); 397 setOperationAction(ISD::MULHU, MVT::i64, Expand); 398 setOperationAction(ISD::MULHS, MVT::i64, Expand); 399 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 400 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 401 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 402 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 403 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 404 405 setOperationAction(ISD::SMIN, MVT::i32, Legal); 406 setOperationAction(ISD::UMIN, MVT::i32, Legal); 407 setOperationAction(ISD::SMAX, MVT::i32, Legal); 408 setOperationAction(ISD::UMAX, MVT::i32, Legal); 409 410 setOperationAction(ISD::CTTZ, MVT::i64, Custom); 411 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom); 412 setOperationAction(ISD::CTLZ, MVT::i64, Custom); 413 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 414 415 static const MVT::SimpleValueType VectorIntTypes[] = { 416 MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32 417 }; 418 419 for (MVT VT : VectorIntTypes) { 420 // Expand the following operations for the current type by default. 421 setOperationAction(ISD::ADD, VT, Expand); 422 setOperationAction(ISD::AND, VT, Expand); 423 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 424 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 425 setOperationAction(ISD::MUL, VT, Expand); 426 setOperationAction(ISD::MULHU, VT, Expand); 427 setOperationAction(ISD::MULHS, VT, Expand); 428 setOperationAction(ISD::OR, VT, Expand); 429 setOperationAction(ISD::SHL, VT, Expand); 430 setOperationAction(ISD::SRA, VT, Expand); 431 setOperationAction(ISD::SRL, VT, Expand); 432 setOperationAction(ISD::ROTL, VT, Expand); 433 setOperationAction(ISD::ROTR, VT, Expand); 434 setOperationAction(ISD::SUB, VT, Expand); 435 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 436 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 437 setOperationAction(ISD::SDIV, VT, Expand); 438 setOperationAction(ISD::UDIV, VT, Expand); 439 setOperationAction(ISD::SREM, VT, Expand); 440 setOperationAction(ISD::UREM, VT, Expand); 441 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 442 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 443 setOperationAction(ISD::SDIVREM, VT, Custom); 444 setOperationAction(ISD::UDIVREM, VT, Expand); 445 setOperationAction(ISD::SELECT, VT, Expand); 446 setOperationAction(ISD::VSELECT, VT, Expand); 447 setOperationAction(ISD::SELECT_CC, VT, Expand); 448 setOperationAction(ISD::XOR, VT, Expand); 449 setOperationAction(ISD::BSWAP, VT, Expand); 450 setOperationAction(ISD::CTPOP, VT, Expand); 451 setOperationAction(ISD::CTTZ, VT, Expand); 452 setOperationAction(ISD::CTLZ, VT, Expand); 453 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 454 setOperationAction(ISD::SETCC, VT, Expand); 455 } 456 457 static const MVT::SimpleValueType FloatVectorTypes[] = { 458 MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32 459 }; 460 461 for (MVT VT : FloatVectorTypes) { 462 setOperationAction(ISD::FABS, VT, Expand); 463 setOperationAction(ISD::FMINNUM, VT, Expand); 464 setOperationAction(ISD::FMAXNUM, VT, Expand); 465 setOperationAction(ISD::FADD, VT, Expand); 466 setOperationAction(ISD::FCEIL, VT, Expand); 467 setOperationAction(ISD::FCOS, VT, Expand); 468 setOperationAction(ISD::FDIV, VT, Expand); 469 setOperationAction(ISD::FEXP2, VT, Expand); 470 setOperationAction(ISD::FEXP, VT, Expand); 471 setOperationAction(ISD::FLOG2, VT, Expand); 472 setOperationAction(ISD::FREM, VT, Expand); 473 setOperationAction(ISD::FLOG, VT, Expand); 474 setOperationAction(ISD::FLOG10, VT, Expand); 475 setOperationAction(ISD::FPOW, VT, Expand); 476 setOperationAction(ISD::FFLOOR, VT, Expand); 477 setOperationAction(ISD::FTRUNC, VT, Expand); 478 setOperationAction(ISD::FMUL, VT, Expand); 479 setOperationAction(ISD::FMA, VT, Expand); 480 setOperationAction(ISD::FRINT, VT, Expand); 481 setOperationAction(ISD::FNEARBYINT, VT, Expand); 482 setOperationAction(ISD::FSQRT, VT, Expand); 483 setOperationAction(ISD::FSIN, VT, Expand); 484 setOperationAction(ISD::FSUB, VT, Expand); 485 setOperationAction(ISD::FNEG, VT, Expand); 486 setOperationAction(ISD::VSELECT, VT, Expand); 487 setOperationAction(ISD::SELECT_CC, VT, Expand); 488 setOperationAction(ISD::FCOPYSIGN, VT, Expand); 489 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 490 setOperationAction(ISD::SETCC, VT, Expand); 491 setOperationAction(ISD::FCANONICALIZE, VT, Expand); 492 } 493 494 // This causes using an unrolled select operation rather than expansion with 495 // bit operations. This is in general better, but the alternative using BFI 496 // instructions may be better if the select sources are SGPRs. 497 setOperationAction(ISD::SELECT, MVT::v2f32, Promote); 498 AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32); 499 500 setOperationAction(ISD::SELECT, MVT::v3f32, Promote); 501 AddPromotedToType(ISD::SELECT, MVT::v3f32, MVT::v3i32); 502 503 setOperationAction(ISD::SELECT, MVT::v4f32, Promote); 504 AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32); 505 506 setOperationAction(ISD::SELECT, MVT::v5f32, Promote); 507 AddPromotedToType(ISD::SELECT, MVT::v5f32, MVT::v5i32); 508 509 // There are no libcalls of any kind. 510 for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I) 511 setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr); 512 513 setBooleanContents(ZeroOrNegativeOneBooleanContent); 514 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 515 516 setSchedulingPreference(Sched::RegPressure); 517 setJumpIsExpensive(true); 518 519 // FIXME: This is only partially true. If we have to do vector compares, any 520 // SGPR pair can be a condition register. If we have a uniform condition, we 521 // are better off doing SALU operations, where there is only one SCC. For now, 522 // we don't have a way of knowing during instruction selection if a condition 523 // will be uniform and we always use vector compares. Assume we are using 524 // vector compares until that is fixed. 525 setHasMultipleConditionRegisters(true); 526 527 setMinCmpXchgSizeInBits(32); 528 setSupportsUnalignedAtomics(false); 529 530 PredictableSelectIsExpensive = false; 531 532 // We want to find all load dependencies for long chains of stores to enable 533 // merging into very wide vectors. The problem is with vectors with > 4 534 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16 535 // vectors are a legal type, even though we have to split the loads 536 // usually. When we can more precisely specify load legality per address 537 // space, we should be able to make FindBetterChain/MergeConsecutiveStores 538 // smarter so that they can figure out what to do in 2 iterations without all 539 // N > 4 stores on the same chain. 540 GatherAllAliasesMaxDepth = 16; 541 542 // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry 543 // about these during lowering. 544 MaxStoresPerMemcpy = 0xffffffff; 545 MaxStoresPerMemmove = 0xffffffff; 546 MaxStoresPerMemset = 0xffffffff; 547 548 setTargetDAGCombine(ISD::BITCAST); 549 setTargetDAGCombine(ISD::SHL); 550 setTargetDAGCombine(ISD::SRA); 551 setTargetDAGCombine(ISD::SRL); 552 setTargetDAGCombine(ISD::TRUNCATE); 553 setTargetDAGCombine(ISD::MUL); 554 setTargetDAGCombine(ISD::MULHU); 555 setTargetDAGCombine(ISD::MULHS); 556 setTargetDAGCombine(ISD::SELECT); 557 setTargetDAGCombine(ISD::SELECT_CC); 558 setTargetDAGCombine(ISD::STORE); 559 setTargetDAGCombine(ISD::FADD); 560 setTargetDAGCombine(ISD::FSUB); 561 setTargetDAGCombine(ISD::FNEG); 562 setTargetDAGCombine(ISD::FABS); 563 setTargetDAGCombine(ISD::AssertZext); 564 setTargetDAGCombine(ISD::AssertSext); 565 } 566 567 //===----------------------------------------------------------------------===// 568 // Target Information 569 //===----------------------------------------------------------------------===// 570 571 LLVM_READNONE 572 static bool fnegFoldsIntoOp(unsigned Opc) { 573 switch (Opc) { 574 case ISD::FADD: 575 case ISD::FSUB: 576 case ISD::FMUL: 577 case ISD::FMA: 578 case ISD::FMAD: 579 case ISD::FMINNUM: 580 case ISD::FMAXNUM: 581 case ISD::FMINNUM_IEEE: 582 case ISD::FMAXNUM_IEEE: 583 case ISD::FSIN: 584 case ISD::FTRUNC: 585 case ISD::FRINT: 586 case ISD::FNEARBYINT: 587 case ISD::FCANONICALIZE: 588 case AMDGPUISD::RCP: 589 case AMDGPUISD::RCP_LEGACY: 590 case AMDGPUISD::RCP_IFLAG: 591 case AMDGPUISD::SIN_HW: 592 case AMDGPUISD::FMUL_LEGACY: 593 case AMDGPUISD::FMIN_LEGACY: 594 case AMDGPUISD::FMAX_LEGACY: 595 case AMDGPUISD::FMED3: 596 return true; 597 default: 598 return false; 599 } 600 } 601 602 /// \p returns true if the operation will definitely need to use a 64-bit 603 /// encoding, and thus will use a VOP3 encoding regardless of the source 604 /// modifiers. 605 LLVM_READONLY 606 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) { 607 return N->getNumOperands() > 2 || VT == MVT::f64; 608 } 609 610 // Most FP instructions support source modifiers, but this could be refined 611 // slightly. 612 LLVM_READONLY 613 static bool hasSourceMods(const SDNode *N) { 614 if (isa<MemSDNode>(N)) 615 return false; 616 617 switch (N->getOpcode()) { 618 case ISD::CopyToReg: 619 case ISD::SELECT: 620 case ISD::FDIV: 621 case ISD::FREM: 622 case ISD::INLINEASM: 623 case ISD::INLINEASM_BR: 624 case AMDGPUISD::INTERP_P1: 625 case AMDGPUISD::INTERP_P2: 626 case AMDGPUISD::DIV_SCALE: 627 628 // TODO: Should really be looking at the users of the bitcast. These are 629 // problematic because bitcasts are used to legalize all stores to integer 630 // types. 631 case ISD::BITCAST: 632 return false; 633 default: 634 return true; 635 } 636 } 637 638 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N, 639 unsigned CostThreshold) { 640 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus 641 // it is truly free to use a source modifier in all cases. If there are 642 // multiple users but for each one will necessitate using VOP3, there will be 643 // a code size increase. Try to avoid increasing code size unless we know it 644 // will save on the instruction count. 645 unsigned NumMayIncreaseSize = 0; 646 MVT VT = N->getValueType(0).getScalarType().getSimpleVT(); 647 648 // XXX - Should this limit number of uses to check? 649 for (const SDNode *U : N->uses()) { 650 if (!hasSourceMods(U)) 651 return false; 652 653 if (!opMustUseVOP3Encoding(U, VT)) { 654 if (++NumMayIncreaseSize > CostThreshold) 655 return false; 656 } 657 } 658 659 return true; 660 } 661 662 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const { 663 return MVT::i32; 664 } 665 666 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const { 667 return true; 668 } 669 670 // The backend supports 32 and 64 bit floating point immediates. 671 // FIXME: Why are we reporting vectors of FP immediates as legal? 672 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 673 bool ForCodeSize) const { 674 EVT ScalarVT = VT.getScalarType(); 675 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 || 676 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts())); 677 } 678 679 // We don't want to shrink f64 / f32 constants. 680 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const { 681 EVT ScalarVT = VT.getScalarType(); 682 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64); 683 } 684 685 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N, 686 ISD::LoadExtType ExtTy, 687 EVT NewVT) const { 688 // TODO: This may be worth removing. Check regression tests for diffs. 689 if (!TargetLoweringBase::shouldReduceLoadWidth(N, ExtTy, NewVT)) 690 return false; 691 692 unsigned NewSize = NewVT.getStoreSizeInBits(); 693 694 // If we are reducing to a 32-bit load, this is always better. 695 if (NewSize == 32) 696 return true; 697 698 EVT OldVT = N->getValueType(0); 699 unsigned OldSize = OldVT.getStoreSizeInBits(); 700 701 MemSDNode *MN = cast<MemSDNode>(N); 702 unsigned AS = MN->getAddressSpace(); 703 // Do not shrink an aligned scalar load to sub-dword. 704 // Scalar engine cannot do sub-dword loads. 705 if (OldSize >= 32 && NewSize < 32 && MN->getAlignment() >= 4 && 706 (AS == AMDGPUAS::CONSTANT_ADDRESS || 707 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || 708 (isa<LoadSDNode>(N) && 709 AS == AMDGPUAS::GLOBAL_ADDRESS && MN->isInvariant())) && 710 AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand())) 711 return false; 712 713 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar 714 // extloads, so doing one requires using a buffer_load. In cases where we 715 // still couldn't use a scalar load, using the wider load shouldn't really 716 // hurt anything. 717 718 // If the old size already had to be an extload, there's no harm in continuing 719 // to reduce the width. 720 return (OldSize < 32); 721 } 722 723 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy, 724 const SelectionDAG &DAG, 725 const MachineMemOperand &MMO) const { 726 727 assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits()); 728 729 if (LoadTy.getScalarType() == MVT::i32) 730 return false; 731 732 unsigned LScalarSize = LoadTy.getScalarSizeInBits(); 733 unsigned CastScalarSize = CastTy.getScalarSizeInBits(); 734 735 if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32)) 736 return false; 737 738 bool Fast = false; 739 return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), CastTy, 740 MMO, &Fast) && Fast; 741 } 742 743 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also 744 // profitable with the expansion for 64-bit since it's generally good to 745 // speculate things. 746 // FIXME: These should really have the size as a parameter. 747 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const { 748 return true; 749 } 750 751 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const { 752 return true; 753 } 754 755 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode * N) const { 756 switch (N->getOpcode()) { 757 default: 758 return false; 759 case ISD::EntryToken: 760 case ISD::TokenFactor: 761 return true; 762 case ISD::INTRINSIC_WO_CHAIN: 763 { 764 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 765 switch (IntrID) { 766 default: 767 return false; 768 case Intrinsic::amdgcn_readfirstlane: 769 case Intrinsic::amdgcn_readlane: 770 return true; 771 } 772 } 773 break; 774 case ISD::LOAD: 775 { 776 const LoadSDNode * L = dyn_cast<LoadSDNode>(N); 777 if (L->getMemOperand()->getAddrSpace() 778 == AMDGPUAS::CONSTANT_ADDRESS_32BIT) 779 return true; 780 return false; 781 } 782 break; 783 } 784 } 785 786 //===---------------------------------------------------------------------===// 787 // Target Properties 788 //===---------------------------------------------------------------------===// 789 790 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const { 791 assert(VT.isFloatingPoint()); 792 793 // Packed operations do not have a fabs modifier. 794 return VT == MVT::f32 || VT == MVT::f64 || 795 (Subtarget->has16BitInsts() && VT == MVT::f16); 796 } 797 798 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const { 799 assert(VT.isFloatingPoint()); 800 return VT == MVT::f32 || VT == MVT::f64 || 801 (Subtarget->has16BitInsts() && VT == MVT::f16) || 802 (Subtarget->hasVOP3PInsts() && VT == MVT::v2f16); 803 } 804 805 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT, 806 unsigned NumElem, 807 unsigned AS) const { 808 return true; 809 } 810 811 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const { 812 // There are few operations which truly have vector input operands. Any vector 813 // operation is going to involve operations on each component, and a 814 // build_vector will be a copy per element, so it always makes sense to use a 815 // build_vector input in place of the extracted element to avoid a copy into a 816 // super register. 817 // 818 // We should probably only do this if all users are extracts only, but this 819 // should be the common case. 820 return true; 821 } 822 823 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const { 824 // Truncate is just accessing a subregister. 825 826 unsigned SrcSize = Source.getSizeInBits(); 827 unsigned DestSize = Dest.getSizeInBits(); 828 829 return DestSize < SrcSize && DestSize % 32 == 0 ; 830 } 831 832 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const { 833 // Truncate is just accessing a subregister. 834 835 unsigned SrcSize = Source->getScalarSizeInBits(); 836 unsigned DestSize = Dest->getScalarSizeInBits(); 837 838 if (DestSize== 16 && Subtarget->has16BitInsts()) 839 return SrcSize >= 32; 840 841 return DestSize < SrcSize && DestSize % 32 == 0; 842 } 843 844 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const { 845 unsigned SrcSize = Src->getScalarSizeInBits(); 846 unsigned DestSize = Dest->getScalarSizeInBits(); 847 848 if (SrcSize == 16 && Subtarget->has16BitInsts()) 849 return DestSize >= 32; 850 851 return SrcSize == 32 && DestSize == 64; 852 } 853 854 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const { 855 // Any register load of a 64-bit value really requires 2 32-bit moves. For all 856 // practical purposes, the extra mov 0 to load a 64-bit is free. As used, 857 // this will enable reducing 64-bit operations the 32-bit, which is always 858 // good. 859 860 if (Src == MVT::i16) 861 return Dest == MVT::i32 ||Dest == MVT::i64 ; 862 863 return Src == MVT::i32 && Dest == MVT::i64; 864 } 865 866 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 867 return isZExtFree(Val.getValueType(), VT2); 868 } 869 870 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const { 871 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a 872 // limited number of native 64-bit operations. Shrinking an operation to fit 873 // in a single 32-bit register should always be helpful. As currently used, 874 // this is much less general than the name suggests, and is only used in 875 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is 876 // not profitable, and may actually be harmful. 877 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32; 878 } 879 880 //===---------------------------------------------------------------------===// 881 // TargetLowering Callbacks 882 //===---------------------------------------------------------------------===// 883 884 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC, 885 bool IsVarArg) { 886 switch (CC) { 887 case CallingConv::AMDGPU_VS: 888 case CallingConv::AMDGPU_GS: 889 case CallingConv::AMDGPU_PS: 890 case CallingConv::AMDGPU_CS: 891 case CallingConv::AMDGPU_HS: 892 case CallingConv::AMDGPU_ES: 893 case CallingConv::AMDGPU_LS: 894 return CC_AMDGPU; 895 case CallingConv::C: 896 case CallingConv::Fast: 897 case CallingConv::Cold: 898 return CC_AMDGPU_Func; 899 case CallingConv::AMDGPU_KERNEL: 900 case CallingConv::SPIR_KERNEL: 901 default: 902 report_fatal_error("Unsupported calling convention for call"); 903 } 904 } 905 906 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC, 907 bool IsVarArg) { 908 switch (CC) { 909 case CallingConv::AMDGPU_KERNEL: 910 case CallingConv::SPIR_KERNEL: 911 llvm_unreachable("kernels should not be handled here"); 912 case CallingConv::AMDGPU_VS: 913 case CallingConv::AMDGPU_GS: 914 case CallingConv::AMDGPU_PS: 915 case CallingConv::AMDGPU_CS: 916 case CallingConv::AMDGPU_HS: 917 case CallingConv::AMDGPU_ES: 918 case CallingConv::AMDGPU_LS: 919 return RetCC_SI_Shader; 920 case CallingConv::C: 921 case CallingConv::Fast: 922 case CallingConv::Cold: 923 return RetCC_AMDGPU_Func; 924 default: 925 report_fatal_error("Unsupported calling convention."); 926 } 927 } 928 929 /// The SelectionDAGBuilder will automatically promote function arguments 930 /// with illegal types. However, this does not work for the AMDGPU targets 931 /// since the function arguments are stored in memory as these illegal types. 932 /// In order to handle this properly we need to get the original types sizes 933 /// from the LLVM IR Function and fixup the ISD:InputArg values before 934 /// passing them to AnalyzeFormalArguments() 935 936 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting 937 /// input values across multiple registers. Each item in the Ins array 938 /// represents a single value that will be stored in registers. Ins[x].VT is 939 /// the value type of the value that will be stored in the register, so 940 /// whatever SDNode we lower the argument to needs to be this type. 941 /// 942 /// In order to correctly lower the arguments we need to know the size of each 943 /// argument. Since Ins[x].VT gives us the size of the register that will 944 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type 945 /// for the orignal function argument so that we can deduce the correct memory 946 /// type to use for Ins[x]. In most cases the correct memory type will be 947 /// Ins[x].ArgVT. However, this will not always be the case. If, for example, 948 /// we have a kernel argument of type v8i8, this argument will be split into 949 /// 8 parts and each part will be represented by its own item in the Ins array. 950 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of 951 /// the argument before it was split. From this, we deduce that the memory type 952 /// for each individual part is i8. We pass the memory type as LocVT to the 953 /// calling convention analysis function and the register type (Ins[x].VT) as 954 /// the ValVT. 955 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute( 956 CCState &State, 957 const SmallVectorImpl<ISD::InputArg> &Ins) const { 958 const MachineFunction &MF = State.getMachineFunction(); 959 const Function &Fn = MF.getFunction(); 960 LLVMContext &Ctx = Fn.getParent()->getContext(); 961 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF); 962 const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn); 963 CallingConv::ID CC = Fn.getCallingConv(); 964 965 unsigned MaxAlign = 1; 966 uint64_t ExplicitArgOffset = 0; 967 const DataLayout &DL = Fn.getParent()->getDataLayout(); 968 969 unsigned InIndex = 0; 970 971 for (const Argument &Arg : Fn.args()) { 972 Type *BaseArgTy = Arg.getType(); 973 unsigned Align = DL.getABITypeAlignment(BaseArgTy); 974 MaxAlign = std::max(Align, MaxAlign); 975 unsigned AllocSize = DL.getTypeAllocSize(BaseArgTy); 976 977 uint64_t ArgOffset = alignTo(ExplicitArgOffset, Align) + ExplicitOffset; 978 ExplicitArgOffset = alignTo(ExplicitArgOffset, Align) + AllocSize; 979 980 // We're basically throwing away everything passed into us and starting over 981 // to get accurate in-memory offsets. The "PartOffset" is completely useless 982 // to us as computed in Ins. 983 // 984 // We also need to figure out what type legalization is trying to do to get 985 // the correct memory offsets. 986 987 SmallVector<EVT, 16> ValueVTs; 988 SmallVector<uint64_t, 16> Offsets; 989 ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset); 990 991 for (unsigned Value = 0, NumValues = ValueVTs.size(); 992 Value != NumValues; ++Value) { 993 uint64_t BasePartOffset = Offsets[Value]; 994 995 EVT ArgVT = ValueVTs[Value]; 996 EVT MemVT = ArgVT; 997 MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT); 998 unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT); 999 1000 if (NumRegs == 1) { 1001 // This argument is not split, so the IR type is the memory type. 1002 if (ArgVT.isExtended()) { 1003 // We have an extended type, like i24, so we should just use the 1004 // register type. 1005 MemVT = RegisterVT; 1006 } else { 1007 MemVT = ArgVT; 1008 } 1009 } else if (ArgVT.isVector() && RegisterVT.isVector() && 1010 ArgVT.getScalarType() == RegisterVT.getScalarType()) { 1011 assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements()); 1012 // We have a vector value which has been split into a vector with 1013 // the same scalar type, but fewer elements. This should handle 1014 // all the floating-point vector types. 1015 MemVT = RegisterVT; 1016 } else if (ArgVT.isVector() && 1017 ArgVT.getVectorNumElements() == NumRegs) { 1018 // This arg has been split so that each element is stored in a separate 1019 // register. 1020 MemVT = ArgVT.getScalarType(); 1021 } else if (ArgVT.isExtended()) { 1022 // We have an extended type, like i65. 1023 MemVT = RegisterVT; 1024 } else { 1025 unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs; 1026 assert(ArgVT.getStoreSizeInBits() % NumRegs == 0); 1027 if (RegisterVT.isInteger()) { 1028 MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits); 1029 } else if (RegisterVT.isVector()) { 1030 assert(!RegisterVT.getScalarType().isFloatingPoint()); 1031 unsigned NumElements = RegisterVT.getVectorNumElements(); 1032 assert(MemoryBits % NumElements == 0); 1033 // This vector type has been split into another vector type with 1034 // a different elements size. 1035 EVT ScalarVT = EVT::getIntegerVT(State.getContext(), 1036 MemoryBits / NumElements); 1037 MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements); 1038 } else { 1039 llvm_unreachable("cannot deduce memory type."); 1040 } 1041 } 1042 1043 // Convert one element vectors to scalar. 1044 if (MemVT.isVector() && MemVT.getVectorNumElements() == 1) 1045 MemVT = MemVT.getScalarType(); 1046 1047 // Round up vec3/vec5 argument. 1048 if (MemVT.isVector() && !MemVT.isPow2VectorType()) { 1049 assert(MemVT.getVectorNumElements() == 3 || 1050 MemVT.getVectorNumElements() == 5); 1051 MemVT = MemVT.getPow2VectorType(State.getContext()); 1052 } 1053 1054 unsigned PartOffset = 0; 1055 for (unsigned i = 0; i != NumRegs; ++i) { 1056 State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT, 1057 BasePartOffset + PartOffset, 1058 MemVT.getSimpleVT(), 1059 CCValAssign::Full)); 1060 PartOffset += MemVT.getStoreSize(); 1061 } 1062 } 1063 } 1064 } 1065 1066 SDValue AMDGPUTargetLowering::LowerReturn( 1067 SDValue Chain, CallingConv::ID CallConv, 1068 bool isVarArg, 1069 const SmallVectorImpl<ISD::OutputArg> &Outs, 1070 const SmallVectorImpl<SDValue> &OutVals, 1071 const SDLoc &DL, SelectionDAG &DAG) const { 1072 // FIXME: Fails for r600 tests 1073 //assert(!isVarArg && Outs.empty() && OutVals.empty() && 1074 // "wave terminate should not have return values"); 1075 return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain); 1076 } 1077 1078 //===---------------------------------------------------------------------===// 1079 // Target specific lowering 1080 //===---------------------------------------------------------------------===// 1081 1082 /// Selects the correct CCAssignFn for a given CallingConvention value. 1083 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC, 1084 bool IsVarArg) { 1085 return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg); 1086 } 1087 1088 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, 1089 bool IsVarArg) { 1090 return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg); 1091 } 1092 1093 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain, 1094 SelectionDAG &DAG, 1095 MachineFrameInfo &MFI, 1096 int ClobberedFI) const { 1097 SmallVector<SDValue, 8> ArgChains; 1098 int64_t FirstByte = MFI.getObjectOffset(ClobberedFI); 1099 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1; 1100 1101 // Include the original chain at the beginning of the list. When this is 1102 // used by target LowerCall hooks, this helps legalize find the 1103 // CALLSEQ_BEGIN node. 1104 ArgChains.push_back(Chain); 1105 1106 // Add a chain value for each stack argument corresponding 1107 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(), 1108 UE = DAG.getEntryNode().getNode()->use_end(); 1109 U != UE; ++U) { 1110 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) { 1111 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) { 1112 if (FI->getIndex() < 0) { 1113 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex()); 1114 int64_t InLastByte = InFirstByte; 1115 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1; 1116 1117 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) || 1118 (FirstByte <= InFirstByte && InFirstByte <= LastByte)) 1119 ArgChains.push_back(SDValue(L, 1)); 1120 } 1121 } 1122 } 1123 } 1124 1125 // Build a tokenfactor for all the chains. 1126 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 1127 } 1128 1129 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI, 1130 SmallVectorImpl<SDValue> &InVals, 1131 StringRef Reason) const { 1132 SDValue Callee = CLI.Callee; 1133 SelectionDAG &DAG = CLI.DAG; 1134 1135 const Function &Fn = DAG.getMachineFunction().getFunction(); 1136 1137 StringRef FuncName("<unknown>"); 1138 1139 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee)) 1140 FuncName = G->getSymbol(); 1141 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1142 FuncName = G->getGlobal()->getName(); 1143 1144 DiagnosticInfoUnsupported NoCalls( 1145 Fn, Reason + FuncName, CLI.DL.getDebugLoc()); 1146 DAG.getContext()->diagnose(NoCalls); 1147 1148 if (!CLI.IsTailCall) { 1149 for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I) 1150 InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT)); 1151 } 1152 1153 return DAG.getEntryNode(); 1154 } 1155 1156 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI, 1157 SmallVectorImpl<SDValue> &InVals) const { 1158 return lowerUnhandledCall(CLI, InVals, "unsupported call to function "); 1159 } 1160 1161 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 1162 SelectionDAG &DAG) const { 1163 const Function &Fn = DAG.getMachineFunction().getFunction(); 1164 1165 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca", 1166 SDLoc(Op).getDebugLoc()); 1167 DAG.getContext()->diagnose(NoDynamicAlloca); 1168 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)}; 1169 return DAG.getMergeValues(Ops, SDLoc()); 1170 } 1171 1172 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, 1173 SelectionDAG &DAG) const { 1174 switch (Op.getOpcode()) { 1175 default: 1176 Op->print(errs(), &DAG); 1177 llvm_unreachable("Custom lowering code for this" 1178 "instruction is not implemented yet!"); 1179 break; 1180 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 1181 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 1182 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); 1183 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG); 1184 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG); 1185 case ISD::FREM: return LowerFREM(Op, DAG); 1186 case ISD::FCEIL: return LowerFCEIL(Op, DAG); 1187 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG); 1188 case ISD::FRINT: return LowerFRINT(Op, DAG); 1189 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG); 1190 case ISD::FROUND: return LowerFROUND(Op, DAG); 1191 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG); 1192 case ISD::FLOG: 1193 return LowerFLOG(Op, DAG, 1 / AMDGPU_LOG2E_F); 1194 case ISD::FLOG10: 1195 return LowerFLOG(Op, DAG, AMDGPU_LN2_F / AMDGPU_LN10_F); 1196 case ISD::FEXP: 1197 return lowerFEXP(Op, DAG); 1198 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 1199 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 1200 case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG); 1201 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 1202 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 1203 case ISD::CTTZ: 1204 case ISD::CTTZ_ZERO_UNDEF: 1205 case ISD::CTLZ: 1206 case ISD::CTLZ_ZERO_UNDEF: 1207 return LowerCTLZ_CTTZ(Op, DAG); 1208 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 1209 } 1210 return Op; 1211 } 1212 1213 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N, 1214 SmallVectorImpl<SDValue> &Results, 1215 SelectionDAG &DAG) const { 1216 switch (N->getOpcode()) { 1217 case ISD::SIGN_EXTEND_INREG: 1218 // Different parts of legalization seem to interpret which type of 1219 // sign_extend_inreg is the one to check for custom lowering. The extended 1220 // from type is what really matters, but some places check for custom 1221 // lowering of the result type. This results in trying to use 1222 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do 1223 // nothing here and let the illegal result integer be handled normally. 1224 return; 1225 default: 1226 return; 1227 } 1228 } 1229 1230 static bool hasDefinedInitializer(const GlobalValue *GV) { 1231 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 1232 if (!GVar || !GVar->hasInitializer()) 1233 return false; 1234 1235 return !isa<UndefValue>(GVar->getInitializer()); 1236 } 1237 1238 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI, 1239 SDValue Op, 1240 SelectionDAG &DAG) const { 1241 1242 const DataLayout &DL = DAG.getDataLayout(); 1243 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op); 1244 const GlobalValue *GV = G->getGlobal(); 1245 1246 if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 1247 G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) { 1248 if (!MFI->isEntryFunction()) { 1249 const Function &Fn = DAG.getMachineFunction().getFunction(); 1250 DiagnosticInfoUnsupported BadLDSDecl( 1251 Fn, "local memory global used by non-kernel function", SDLoc(Op).getDebugLoc()); 1252 DAG.getContext()->diagnose(BadLDSDecl); 1253 } 1254 1255 // XXX: What does the value of G->getOffset() mean? 1256 assert(G->getOffset() == 0 && 1257 "Do not know what to do with an non-zero offset"); 1258 1259 // TODO: We could emit code to handle the initialization somewhere. 1260 if (!hasDefinedInitializer(GV)) { 1261 unsigned Offset = MFI->allocateLDSGlobal(DL, *GV); 1262 return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType()); 1263 } 1264 } 1265 1266 const Function &Fn = DAG.getMachineFunction().getFunction(); 1267 DiagnosticInfoUnsupported BadInit( 1268 Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc()); 1269 DAG.getContext()->diagnose(BadInit); 1270 return SDValue(); 1271 } 1272 1273 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op, 1274 SelectionDAG &DAG) const { 1275 SmallVector<SDValue, 8> Args; 1276 1277 EVT VT = Op.getValueType(); 1278 if (VT == MVT::v4i16 || VT == MVT::v4f16) { 1279 SDLoc SL(Op); 1280 SDValue Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(0)); 1281 SDValue Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(1)); 1282 1283 SDValue BV = DAG.getBuildVector(MVT::v2i32, SL, { Lo, Hi }); 1284 return DAG.getNode(ISD::BITCAST, SL, VT, BV); 1285 } 1286 1287 for (const SDUse &U : Op->ops()) 1288 DAG.ExtractVectorElements(U.get(), Args); 1289 1290 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args); 1291 } 1292 1293 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, 1294 SelectionDAG &DAG) const { 1295 1296 SmallVector<SDValue, 8> Args; 1297 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1298 EVT VT = Op.getValueType(); 1299 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start, 1300 VT.getVectorNumElements()); 1301 1302 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args); 1303 } 1304 1305 /// Generate Min/Max node 1306 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT, 1307 SDValue LHS, SDValue RHS, 1308 SDValue True, SDValue False, 1309 SDValue CC, 1310 DAGCombinerInfo &DCI) const { 1311 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True)) 1312 return SDValue(); 1313 1314 SelectionDAG &DAG = DCI.DAG; 1315 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 1316 switch (CCOpcode) { 1317 case ISD::SETOEQ: 1318 case ISD::SETONE: 1319 case ISD::SETUNE: 1320 case ISD::SETNE: 1321 case ISD::SETUEQ: 1322 case ISD::SETEQ: 1323 case ISD::SETFALSE: 1324 case ISD::SETFALSE2: 1325 case ISD::SETTRUE: 1326 case ISD::SETTRUE2: 1327 case ISD::SETUO: 1328 case ISD::SETO: 1329 break; 1330 case ISD::SETULE: 1331 case ISD::SETULT: { 1332 if (LHS == True) 1333 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); 1334 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); 1335 } 1336 case ISD::SETOLE: 1337 case ISD::SETOLT: 1338 case ISD::SETLE: 1339 case ISD::SETLT: { 1340 // Ordered. Assume ordered for undefined. 1341 1342 // Only do this after legalization to avoid interfering with other combines 1343 // which might occur. 1344 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG && 1345 !DCI.isCalledByLegalizer()) 1346 return SDValue(); 1347 1348 // We need to permute the operands to get the correct NaN behavior. The 1349 // selected operand is the second one based on the failing compare with NaN, 1350 // so permute it based on the compare type the hardware uses. 1351 if (LHS == True) 1352 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); 1353 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); 1354 } 1355 case ISD::SETUGE: 1356 case ISD::SETUGT: { 1357 if (LHS == True) 1358 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); 1359 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); 1360 } 1361 case ISD::SETGT: 1362 case ISD::SETGE: 1363 case ISD::SETOGE: 1364 case ISD::SETOGT: { 1365 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG && 1366 !DCI.isCalledByLegalizer()) 1367 return SDValue(); 1368 1369 if (LHS == True) 1370 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); 1371 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); 1372 } 1373 case ISD::SETCC_INVALID: 1374 llvm_unreachable("Invalid setcc condcode!"); 1375 } 1376 return SDValue(); 1377 } 1378 1379 std::pair<SDValue, SDValue> 1380 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const { 1381 SDLoc SL(Op); 1382 1383 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1384 1385 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1386 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1387 1388 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1389 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1390 1391 return std::make_pair(Lo, Hi); 1392 } 1393 1394 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const { 1395 SDLoc SL(Op); 1396 1397 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1398 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1399 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1400 } 1401 1402 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const { 1403 SDLoc SL(Op); 1404 1405 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1406 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1407 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1408 } 1409 1410 // Split a vector type into two parts. The first part is a power of two vector. 1411 // The second part is whatever is left over, and is a scalar if it would 1412 // otherwise be a 1-vector. 1413 std::pair<EVT, EVT> 1414 AMDGPUTargetLowering::getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const { 1415 EVT LoVT, HiVT; 1416 EVT EltVT = VT.getVectorElementType(); 1417 unsigned NumElts = VT.getVectorNumElements(); 1418 unsigned LoNumElts = PowerOf2Ceil((NumElts + 1) / 2); 1419 LoVT = EVT::getVectorVT(*DAG.getContext(), EltVT, LoNumElts); 1420 HiVT = NumElts - LoNumElts == 1 1421 ? EltVT 1422 : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts); 1423 return std::make_pair(LoVT, HiVT); 1424 } 1425 1426 // Split a vector value into two parts of types LoVT and HiVT. HiVT could be 1427 // scalar. 1428 std::pair<SDValue, SDValue> 1429 AMDGPUTargetLowering::splitVector(const SDValue &N, const SDLoc &DL, 1430 const EVT &LoVT, const EVT &HiVT, 1431 SelectionDAG &DAG) const { 1432 assert(LoVT.getVectorNumElements() + 1433 (HiVT.isVector() ? HiVT.getVectorNumElements() : 1) <= 1434 N.getValueType().getVectorNumElements() && 1435 "More vector elements requested than available!"); 1436 auto IdxTy = getVectorIdxTy(DAG.getDataLayout()); 1437 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 1438 DAG.getConstant(0, DL, IdxTy)); 1439 SDValue Hi = DAG.getNode( 1440 HiVT.isVector() ? ISD::EXTRACT_SUBVECTOR : ISD::EXTRACT_VECTOR_ELT, DL, 1441 HiVT, N, DAG.getConstant(LoVT.getVectorNumElements(), DL, IdxTy)); 1442 return std::make_pair(Lo, Hi); 1443 } 1444 1445 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op, 1446 SelectionDAG &DAG) const { 1447 LoadSDNode *Load = cast<LoadSDNode>(Op); 1448 EVT VT = Op.getValueType(); 1449 1450 1451 // If this is a 2 element vector, we really want to scalarize and not create 1452 // weird 1 element vectors. 1453 if (VT.getVectorNumElements() == 2) 1454 return scalarizeVectorLoad(Load, DAG); 1455 1456 SDValue BasePtr = Load->getBasePtr(); 1457 EVT MemVT = Load->getMemoryVT(); 1458 SDLoc SL(Op); 1459 1460 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); 1461 1462 EVT LoVT, HiVT; 1463 EVT LoMemVT, HiMemVT; 1464 SDValue Lo, Hi; 1465 1466 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG); 1467 std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG); 1468 std::tie(Lo, Hi) = splitVector(Op, SL, LoVT, HiVT, DAG); 1469 1470 unsigned Size = LoMemVT.getStoreSize(); 1471 unsigned BaseAlign = Load->getAlignment(); 1472 unsigned HiAlign = MinAlign(BaseAlign, Size); 1473 1474 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT, 1475 Load->getChain(), BasePtr, SrcValue, LoMemVT, 1476 BaseAlign, Load->getMemOperand()->getFlags()); 1477 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, Size); 1478 SDValue HiLoad = 1479 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(), 1480 HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()), 1481 HiMemVT, HiAlign, Load->getMemOperand()->getFlags()); 1482 1483 auto IdxTy = getVectorIdxTy(DAG.getDataLayout()); 1484 SDValue Join; 1485 if (LoVT == HiVT) { 1486 // This is the case that the vector is power of two so was evenly split. 1487 Join = DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad); 1488 } else { 1489 Join = DAG.getNode(ISD::INSERT_SUBVECTOR, SL, VT, DAG.getUNDEF(VT), LoLoad, 1490 DAG.getConstant(0, SL, IdxTy)); 1491 Join = DAG.getNode(HiVT.isVector() ? ISD::INSERT_SUBVECTOR 1492 : ISD::INSERT_VECTOR_ELT, 1493 SL, VT, Join, HiLoad, 1494 DAG.getConstant(LoVT.getVectorNumElements(), SL, IdxTy)); 1495 } 1496 1497 SDValue Ops[] = {Join, DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 1498 LoLoad.getValue(1), HiLoad.getValue(1))}; 1499 1500 return DAG.getMergeValues(Ops, SL); 1501 } 1502 1503 // Widen a vector load from vec3 to vec4. 1504 SDValue AMDGPUTargetLowering::WidenVectorLoad(SDValue Op, 1505 SelectionDAG &DAG) const { 1506 LoadSDNode *Load = cast<LoadSDNode>(Op); 1507 EVT VT = Op.getValueType(); 1508 assert(VT.getVectorNumElements() == 3); 1509 SDValue BasePtr = Load->getBasePtr(); 1510 EVT MemVT = Load->getMemoryVT(); 1511 SDLoc SL(Op); 1512 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); 1513 unsigned BaseAlign = Load->getAlignment(); 1514 1515 EVT WideVT = 1516 EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4); 1517 EVT WideMemVT = 1518 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 4); 1519 SDValue WideLoad = DAG.getExtLoad( 1520 Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue, 1521 WideMemVT, BaseAlign, Load->getMemOperand()->getFlags()); 1522 return DAG.getMergeValues( 1523 {DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, VT, WideLoad, 1524 DAG.getConstant(0, SL, getVectorIdxTy(DAG.getDataLayout()))), 1525 WideLoad.getValue(1)}, 1526 SL); 1527 } 1528 1529 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op, 1530 SelectionDAG &DAG) const { 1531 StoreSDNode *Store = cast<StoreSDNode>(Op); 1532 SDValue Val = Store->getValue(); 1533 EVT VT = Val.getValueType(); 1534 1535 // If this is a 2 element vector, we really want to scalarize and not create 1536 // weird 1 element vectors. 1537 if (VT.getVectorNumElements() == 2) 1538 return scalarizeVectorStore(Store, DAG); 1539 1540 EVT MemVT = Store->getMemoryVT(); 1541 SDValue Chain = Store->getChain(); 1542 SDValue BasePtr = Store->getBasePtr(); 1543 SDLoc SL(Op); 1544 1545 EVT LoVT, HiVT; 1546 EVT LoMemVT, HiMemVT; 1547 SDValue Lo, Hi; 1548 1549 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG); 1550 std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG); 1551 std::tie(Lo, Hi) = splitVector(Val, SL, LoVT, HiVT, DAG); 1552 1553 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize()); 1554 1555 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo(); 1556 unsigned BaseAlign = Store->getAlignment(); 1557 unsigned Size = LoMemVT.getStoreSize(); 1558 unsigned HiAlign = MinAlign(BaseAlign, Size); 1559 1560 SDValue LoStore = 1561 DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign, 1562 Store->getMemOperand()->getFlags()); 1563 SDValue HiStore = 1564 DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size), 1565 HiMemVT, HiAlign, Store->getMemOperand()->getFlags()); 1566 1567 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore); 1568 } 1569 1570 // This is a shortcut for integer division because we have fast i32<->f32 1571 // conversions, and fast f32 reciprocal instructions. The fractional part of a 1572 // float is enough to accurately represent up to a 24-bit signed integer. 1573 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, 1574 bool Sign) const { 1575 SDLoc DL(Op); 1576 EVT VT = Op.getValueType(); 1577 SDValue LHS = Op.getOperand(0); 1578 SDValue RHS = Op.getOperand(1); 1579 MVT IntVT = MVT::i32; 1580 MVT FltVT = MVT::f32; 1581 1582 unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS); 1583 if (LHSSignBits < 9) 1584 return SDValue(); 1585 1586 unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS); 1587 if (RHSSignBits < 9) 1588 return SDValue(); 1589 1590 unsigned BitSize = VT.getSizeInBits(); 1591 unsigned SignBits = std::min(LHSSignBits, RHSSignBits); 1592 unsigned DivBits = BitSize - SignBits; 1593 if (Sign) 1594 ++DivBits; 1595 1596 ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP; 1597 ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT; 1598 1599 SDValue jq = DAG.getConstant(1, DL, IntVT); 1600 1601 if (Sign) { 1602 // char|short jq = ia ^ ib; 1603 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS); 1604 1605 // jq = jq >> (bitsize - 2) 1606 jq = DAG.getNode(ISD::SRA, DL, VT, jq, 1607 DAG.getConstant(BitSize - 2, DL, VT)); 1608 1609 // jq = jq | 0x1 1610 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT)); 1611 } 1612 1613 // int ia = (int)LHS; 1614 SDValue ia = LHS; 1615 1616 // int ib, (int)RHS; 1617 SDValue ib = RHS; 1618 1619 // float fa = (float)ia; 1620 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia); 1621 1622 // float fb = (float)ib; 1623 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib); 1624 1625 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT, 1626 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb)); 1627 1628 // fq = trunc(fq); 1629 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq); 1630 1631 // float fqneg = -fq; 1632 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq); 1633 1634 // float fr = mad(fqneg, fb, fa); 1635 unsigned OpCode = Subtarget->hasFP32Denormals() ? 1636 (unsigned)AMDGPUISD::FMAD_FTZ : 1637 (unsigned)ISD::FMAD; 1638 SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa); 1639 1640 // int iq = (int)fq; 1641 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq); 1642 1643 // fr = fabs(fr); 1644 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr); 1645 1646 // fb = fabs(fb); 1647 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb); 1648 1649 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 1650 1651 // int cv = fr >= fb; 1652 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE); 1653 1654 // jq = (cv ? jq : 0); 1655 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT)); 1656 1657 // dst = iq + jq; 1658 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq); 1659 1660 // Rem needs compensation, it's easier to recompute it 1661 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS); 1662 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem); 1663 1664 // Truncate to number of bits this divide really is. 1665 if (Sign) { 1666 SDValue InRegSize 1667 = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits)); 1668 Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize); 1669 Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize); 1670 } else { 1671 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT); 1672 Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask); 1673 Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask); 1674 } 1675 1676 return DAG.getMergeValues({ Div, Rem }, DL); 1677 } 1678 1679 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op, 1680 SelectionDAG &DAG, 1681 SmallVectorImpl<SDValue> &Results) const { 1682 SDLoc DL(Op); 1683 EVT VT = Op.getValueType(); 1684 1685 assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64"); 1686 1687 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); 1688 1689 SDValue One = DAG.getConstant(1, DL, HalfVT); 1690 SDValue Zero = DAG.getConstant(0, DL, HalfVT); 1691 1692 //HiLo split 1693 SDValue LHS = Op.getOperand(0); 1694 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero); 1695 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, One); 1696 1697 SDValue RHS = Op.getOperand(1); 1698 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero); 1699 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, One); 1700 1701 if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) && 1702 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) { 1703 1704 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT), 1705 LHS_Lo, RHS_Lo); 1706 1707 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero}); 1708 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero}); 1709 1710 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV)); 1711 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM)); 1712 return; 1713 } 1714 1715 if (isTypeLegal(MVT::i64)) { 1716 // Compute denominator reciprocal. 1717 unsigned FMAD = Subtarget->hasFP32Denormals() ? 1718 (unsigned)AMDGPUISD::FMAD_FTZ : 1719 (unsigned)ISD::FMAD; 1720 1721 SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo); 1722 SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi); 1723 SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi, 1724 DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32), 1725 Cvt_Lo); 1726 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1); 1727 SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp, 1728 DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32)); 1729 SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1, 1730 DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32)); 1731 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2); 1732 SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc, 1733 DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32), 1734 Mul1); 1735 SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2); 1736 SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc); 1737 SDValue Rcp64 = DAG.getBitcast(VT, 1738 DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi})); 1739 1740 SDValue Zero64 = DAG.getConstant(0, DL, VT); 1741 SDValue One64 = DAG.getConstant(1, DL, VT); 1742 SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1); 1743 SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1); 1744 1745 SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS); 1746 SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64); 1747 SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1); 1748 SDValue Mulhi1_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1, 1749 Zero); 1750 SDValue Mulhi1_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1, 1751 One); 1752 1753 SDValue Add1_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Lo, 1754 Mulhi1_Lo, Zero1); 1755 SDValue Add1_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Hi, 1756 Mulhi1_Hi, Add1_Lo.getValue(1)); 1757 SDValue Add1_HiNc = DAG.getNode(ISD::ADD, DL, HalfVT, Rcp_Hi, Mulhi1_Hi); 1758 SDValue Add1 = DAG.getBitcast(VT, 1759 DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi})); 1760 1761 SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1); 1762 SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2); 1763 SDValue Mulhi2_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2, 1764 Zero); 1765 SDValue Mulhi2_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2, 1766 One); 1767 1768 SDValue Add2_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Lo, 1769 Mulhi2_Lo, Zero1); 1770 SDValue Add2_HiC = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_HiNc, 1771 Mulhi2_Hi, Add1_Lo.getValue(1)); 1772 SDValue Add2_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add2_HiC, 1773 Zero, Add2_Lo.getValue(1)); 1774 SDValue Add2 = DAG.getBitcast(VT, 1775 DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi})); 1776 SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2); 1777 1778 SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3); 1779 1780 SDValue Mul3_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, Zero); 1781 SDValue Mul3_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, One); 1782 SDValue Sub1_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Lo, 1783 Mul3_Lo, Zero1); 1784 SDValue Sub1_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Hi, 1785 Mul3_Hi, Sub1_Lo.getValue(1)); 1786 SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi); 1787 SDValue Sub1 = DAG.getBitcast(VT, 1788 DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi})); 1789 1790 SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT); 1791 SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero, 1792 ISD::SETUGE); 1793 SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero, 1794 ISD::SETUGE); 1795 SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ); 1796 1797 // TODO: Here and below portions of the code can be enclosed into if/endif. 1798 // Currently control flow is unconditional and we have 4 selects after 1799 // potential endif to substitute PHIs. 1800 1801 // if C3 != 0 ... 1802 SDValue Sub2_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Lo, 1803 RHS_Lo, Zero1); 1804 SDValue Sub2_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Mi, 1805 RHS_Hi, Sub1_Lo.getValue(1)); 1806 SDValue Sub2_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi, 1807 Zero, Sub2_Lo.getValue(1)); 1808 SDValue Sub2 = DAG.getBitcast(VT, 1809 DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi})); 1810 1811 SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64); 1812 1813 SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero, 1814 ISD::SETUGE); 1815 SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero, 1816 ISD::SETUGE); 1817 SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ); 1818 1819 // if (C6 != 0) 1820 SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64); 1821 1822 SDValue Sub3_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Lo, 1823 RHS_Lo, Zero1); 1824 SDValue Sub3_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi, 1825 RHS_Hi, Sub2_Lo.getValue(1)); 1826 SDValue Sub3_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub3_Mi, 1827 Zero, Sub3_Lo.getValue(1)); 1828 SDValue Sub3 = DAG.getBitcast(VT, 1829 DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi})); 1830 1831 // endif C6 1832 // endif C3 1833 1834 SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE); 1835 SDValue Div = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE); 1836 1837 SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE); 1838 SDValue Rem = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE); 1839 1840 Results.push_back(Div); 1841 Results.push_back(Rem); 1842 1843 return; 1844 } 1845 1846 // r600 expandion. 1847 // Get Speculative values 1848 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo); 1849 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo); 1850 1851 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ); 1852 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero}); 1853 REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM); 1854 1855 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ); 1856 SDValue DIV_Lo = Zero; 1857 1858 const unsigned halfBitWidth = HalfVT.getSizeInBits(); 1859 1860 for (unsigned i = 0; i < halfBitWidth; ++i) { 1861 const unsigned bitPos = halfBitWidth - i - 1; 1862 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT); 1863 // Get value of high bit 1864 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS); 1865 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One); 1866 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit); 1867 1868 // Shift 1869 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT)); 1870 // Add LHS high bit 1871 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit); 1872 1873 SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT); 1874 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE); 1875 1876 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT); 1877 1878 // Update REM 1879 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS); 1880 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE); 1881 } 1882 1883 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi}); 1884 DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV); 1885 Results.push_back(DIV); 1886 Results.push_back(REM); 1887 } 1888 1889 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op, 1890 SelectionDAG &DAG) const { 1891 SDLoc DL(Op); 1892 EVT VT = Op.getValueType(); 1893 1894 if (VT == MVT::i64) { 1895 SmallVector<SDValue, 2> Results; 1896 LowerUDIVREM64(Op, DAG, Results); 1897 return DAG.getMergeValues(Results, DL); 1898 } 1899 1900 if (VT == MVT::i32) { 1901 if (SDValue Res = LowerDIVREM24(Op, DAG, false)) 1902 return Res; 1903 } 1904 1905 SDValue Num = Op.getOperand(0); 1906 SDValue Den = Op.getOperand(1); 1907 1908 // RCP = URECIP(Den) = 2^32 / Den + e 1909 // e is rounding error. 1910 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den); 1911 1912 // RCP_LO = mul(RCP, Den) */ 1913 SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den); 1914 1915 // RCP_HI = mulhu (RCP, Den) */ 1916 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den); 1917 1918 // NEG_RCP_LO = -RCP_LO 1919 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), 1920 RCP_LO); 1921 1922 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO) 1923 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT), 1924 NEG_RCP_LO, RCP_LO, 1925 ISD::SETEQ); 1926 // Calculate the rounding error from the URECIP instruction 1927 // E = mulhu(ABS_RCP_LO, RCP) 1928 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP); 1929 1930 // RCP_A_E = RCP + E 1931 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E); 1932 1933 // RCP_S_E = RCP - E 1934 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E); 1935 1936 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E) 1937 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT), 1938 RCP_A_E, RCP_S_E, 1939 ISD::SETEQ); 1940 // Quotient = mulhu(Tmp0, Num) 1941 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num); 1942 1943 // Num_S_Remainder = Quotient * Den 1944 SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den); 1945 1946 // Remainder = Num - Num_S_Remainder 1947 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder); 1948 1949 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0) 1950 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den, 1951 DAG.getConstant(-1, DL, VT), 1952 DAG.getConstant(0, DL, VT), 1953 ISD::SETUGE); 1954 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0) 1955 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num, 1956 Num_S_Remainder, 1957 DAG.getConstant(-1, DL, VT), 1958 DAG.getConstant(0, DL, VT), 1959 ISD::SETUGE); 1960 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero 1961 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den, 1962 Remainder_GE_Zero); 1963 1964 // Calculate Division result: 1965 1966 // Quotient_A_One = Quotient + 1 1967 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient, 1968 DAG.getConstant(1, DL, VT)); 1969 1970 // Quotient_S_One = Quotient - 1 1971 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient, 1972 DAG.getConstant(1, DL, VT)); 1973 1974 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One) 1975 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT), 1976 Quotient, Quotient_A_One, ISD::SETEQ); 1977 1978 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div) 1979 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT), 1980 Quotient_S_One, Div, ISD::SETEQ); 1981 1982 // Calculate Rem result: 1983 1984 // Remainder_S_Den = Remainder - Den 1985 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den); 1986 1987 // Remainder_A_Den = Remainder + Den 1988 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den); 1989 1990 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den) 1991 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT), 1992 Remainder, Remainder_S_Den, ISD::SETEQ); 1993 1994 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem) 1995 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT), 1996 Remainder_A_Den, Rem, ISD::SETEQ); 1997 SDValue Ops[2] = { 1998 Div, 1999 Rem 2000 }; 2001 return DAG.getMergeValues(Ops, DL); 2002 } 2003 2004 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op, 2005 SelectionDAG &DAG) const { 2006 SDLoc DL(Op); 2007 EVT VT = Op.getValueType(); 2008 2009 SDValue LHS = Op.getOperand(0); 2010 SDValue RHS = Op.getOperand(1); 2011 2012 SDValue Zero = DAG.getConstant(0, DL, VT); 2013 SDValue NegOne = DAG.getConstant(-1, DL, VT); 2014 2015 if (VT == MVT::i32) { 2016 if (SDValue Res = LowerDIVREM24(Op, DAG, true)) 2017 return Res; 2018 } 2019 2020 if (VT == MVT::i64 && 2021 DAG.ComputeNumSignBits(LHS) > 32 && 2022 DAG.ComputeNumSignBits(RHS) > 32) { 2023 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); 2024 2025 //HiLo split 2026 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero); 2027 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero); 2028 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT), 2029 LHS_Lo, RHS_Lo); 2030 SDValue Res[2] = { 2031 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)), 2032 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1)) 2033 }; 2034 return DAG.getMergeValues(Res, DL); 2035 } 2036 2037 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT); 2038 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT); 2039 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign); 2040 SDValue RSign = LHSign; // Remainder sign is the same as LHS 2041 2042 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign); 2043 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign); 2044 2045 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign); 2046 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign); 2047 2048 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS); 2049 SDValue Rem = Div.getValue(1); 2050 2051 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign); 2052 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign); 2053 2054 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign); 2055 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign); 2056 2057 SDValue Res[2] = { 2058 Div, 2059 Rem 2060 }; 2061 return DAG.getMergeValues(Res, DL); 2062 } 2063 2064 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y)) 2065 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const { 2066 SDLoc SL(Op); 2067 EVT VT = Op.getValueType(); 2068 SDValue X = Op.getOperand(0); 2069 SDValue Y = Op.getOperand(1); 2070 2071 // TODO: Should this propagate fast-math-flags? 2072 2073 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y); 2074 SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div); 2075 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y); 2076 2077 return DAG.getNode(ISD::FSUB, SL, VT, X, Mul); 2078 } 2079 2080 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const { 2081 SDLoc SL(Op); 2082 SDValue Src = Op.getOperand(0); 2083 2084 // result = trunc(src) 2085 // if (src > 0.0 && src != result) 2086 // result += 1.0 2087 2088 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2089 2090 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64); 2091 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 2092 2093 EVT SetCCVT = 2094 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2095 2096 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT); 2097 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE); 2098 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc); 2099 2100 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero); 2101 // TODO: Should this propagate fast-math-flags? 2102 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); 2103 } 2104 2105 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL, 2106 SelectionDAG &DAG) { 2107 const unsigned FractBits = 52; 2108 const unsigned ExpBits = 11; 2109 2110 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 2111 Hi, 2112 DAG.getConstant(FractBits - 32, SL, MVT::i32), 2113 DAG.getConstant(ExpBits, SL, MVT::i32)); 2114 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart, 2115 DAG.getConstant(1023, SL, MVT::i32)); 2116 2117 return Exp; 2118 } 2119 2120 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const { 2121 SDLoc SL(Op); 2122 SDValue Src = Op.getOperand(0); 2123 2124 assert(Op.getValueType() == MVT::f64); 2125 2126 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2127 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2128 2129 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2130 2131 // Extract the upper half, since this is where we will find the sign and 2132 // exponent. 2133 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One); 2134 2135 SDValue Exp = extractF64Exponent(Hi, SL, DAG); 2136 2137 const unsigned FractBits = 52; 2138 2139 // Extract the sign bit. 2140 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32); 2141 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask); 2142 2143 // Extend back to 64-bits. 2144 SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit}); 2145 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64); 2146 2147 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src); 2148 const SDValue FractMask 2149 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64); 2150 2151 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp); 2152 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64); 2153 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not); 2154 2155 EVT SetCCVT = 2156 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32); 2157 2158 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32); 2159 2160 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT); 2161 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT); 2162 2163 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0); 2164 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1); 2165 2166 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2); 2167 } 2168 2169 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const { 2170 SDLoc SL(Op); 2171 SDValue Src = Op.getOperand(0); 2172 2173 assert(Op.getValueType() == MVT::f64); 2174 2175 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52"); 2176 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64); 2177 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src); 2178 2179 // TODO: Should this propagate fast-math-flags? 2180 2181 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign); 2182 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign); 2183 2184 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src); 2185 2186 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51"); 2187 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64); 2188 2189 EVT SetCCVT = 2190 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2191 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT); 2192 2193 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2); 2194 } 2195 2196 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const { 2197 // FNEARBYINT and FRINT are the same, except in their handling of FP 2198 // exceptions. Those aren't really meaningful for us, and OpenCL only has 2199 // rint, so just treat them as equivalent. 2200 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0)); 2201 } 2202 2203 // XXX - May require not supporting f32 denormals? 2204 2205 // Don't handle v2f16. The extra instructions to scalarize and repack around the 2206 // compare and vselect end up producing worse code than scalarizing the whole 2207 // operation. 2208 SDValue AMDGPUTargetLowering::LowerFROUND32_16(SDValue Op, SelectionDAG &DAG) const { 2209 SDLoc SL(Op); 2210 SDValue X = Op.getOperand(0); 2211 EVT VT = Op.getValueType(); 2212 2213 SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X); 2214 2215 // TODO: Should this propagate fast-math-flags? 2216 2217 SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T); 2218 2219 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff); 2220 2221 const SDValue Zero = DAG.getConstantFP(0.0, SL, VT); 2222 const SDValue One = DAG.getConstantFP(1.0, SL, VT); 2223 const SDValue Half = DAG.getConstantFP(0.5, SL, VT); 2224 2225 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X); 2226 2227 EVT SetCCVT = 2228 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 2229 2230 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE); 2231 2232 SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero); 2233 2234 return DAG.getNode(ISD::FADD, SL, VT, T, Sel); 2235 } 2236 2237 SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const { 2238 SDLoc SL(Op); 2239 SDValue X = Op.getOperand(0); 2240 2241 SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X); 2242 2243 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2244 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2245 const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32); 2246 const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32); 2247 EVT SetCCVT = 2248 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32); 2249 2250 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 2251 2252 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One); 2253 2254 SDValue Exp = extractF64Exponent(Hi, SL, DAG); 2255 2256 const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL, 2257 MVT::i64); 2258 2259 SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp); 2260 SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64, 2261 DAG.getConstant(INT64_C(0x0008000000000000), SL, 2262 MVT::i64), 2263 Exp); 2264 2265 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M); 2266 SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT, 2267 DAG.getConstant(0, SL, MVT::i64), Tmp0, 2268 ISD::SETNE); 2269 2270 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1, 2271 D, DAG.getConstant(0, SL, MVT::i64)); 2272 SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2); 2273 2274 K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64)); 2275 K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K); 2276 2277 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT); 2278 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT); 2279 SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ); 2280 2281 SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64, 2282 ExpEqNegOne, 2283 DAG.getConstantFP(1.0, SL, MVT::f64), 2284 DAG.getConstantFP(0.0, SL, MVT::f64)); 2285 2286 SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X); 2287 2288 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K); 2289 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K); 2290 2291 return K; 2292 } 2293 2294 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const { 2295 EVT VT = Op.getValueType(); 2296 2297 if (VT == MVT::f32 || VT == MVT::f16) 2298 return LowerFROUND32_16(Op, DAG); 2299 2300 if (VT == MVT::f64) 2301 return LowerFROUND64(Op, DAG); 2302 2303 llvm_unreachable("unhandled type"); 2304 } 2305 2306 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const { 2307 SDLoc SL(Op); 2308 SDValue Src = Op.getOperand(0); 2309 2310 // result = trunc(src); 2311 // if (src < 0.0 && src != result) 2312 // result += -1.0. 2313 2314 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2315 2316 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64); 2317 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64); 2318 2319 EVT SetCCVT = 2320 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2321 2322 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT); 2323 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE); 2324 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc); 2325 2326 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero); 2327 // TODO: Should this propagate fast-math-flags? 2328 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); 2329 } 2330 2331 SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG, 2332 double Log2BaseInverted) const { 2333 EVT VT = Op.getValueType(); 2334 2335 SDLoc SL(Op); 2336 SDValue Operand = Op.getOperand(0); 2337 SDValue Log2Operand = DAG.getNode(ISD::FLOG2, SL, VT, Operand); 2338 SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT); 2339 2340 return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand); 2341 } 2342 2343 // Return M_LOG2E of appropriate type 2344 static SDValue getLog2EVal(SelectionDAG &DAG, const SDLoc &SL, EVT VT) { 2345 switch (VT.getScalarType().getSimpleVT().SimpleTy) { 2346 case MVT::f32: 2347 return DAG.getConstantFP(1.44269504088896340735992468100189214f, SL, VT); 2348 case MVT::f16: 2349 return DAG.getConstantFP( 2350 APFloat(APFloat::IEEEhalf(), "1.44269504088896340735992468100189214"), 2351 SL, VT); 2352 case MVT::f64: 2353 return DAG.getConstantFP( 2354 APFloat(APFloat::IEEEdouble(), "0x1.71547652b82fep+0"), SL, VT); 2355 default: 2356 llvm_unreachable("unsupported fp type"); 2357 } 2358 } 2359 2360 // exp2(M_LOG2E_F * f); 2361 SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const { 2362 EVT VT = Op.getValueType(); 2363 SDLoc SL(Op); 2364 SDValue Src = Op.getOperand(0); 2365 2366 const SDValue K = getLog2EVal(DAG, SL, VT); 2367 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Src, K, Op->getFlags()); 2368 return DAG.getNode(ISD::FEXP2, SL, VT, Mul, Op->getFlags()); 2369 } 2370 2371 static bool isCtlzOpc(unsigned Opc) { 2372 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF; 2373 } 2374 2375 static bool isCttzOpc(unsigned Opc) { 2376 return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF; 2377 } 2378 2379 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const { 2380 SDLoc SL(Op); 2381 SDValue Src = Op.getOperand(0); 2382 bool ZeroUndef = Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF || 2383 Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF; 2384 2385 unsigned ISDOpc, NewOpc; 2386 if (isCtlzOpc(Op.getOpcode())) { 2387 ISDOpc = ISD::CTLZ_ZERO_UNDEF; 2388 NewOpc = AMDGPUISD::FFBH_U32; 2389 } else if (isCttzOpc(Op.getOpcode())) { 2390 ISDOpc = ISD::CTTZ_ZERO_UNDEF; 2391 NewOpc = AMDGPUISD::FFBL_B32; 2392 } else 2393 llvm_unreachable("Unexpected OPCode!!!"); 2394 2395 2396 if (ZeroUndef && Src.getValueType() == MVT::i32) 2397 return DAG.getNode(NewOpc, SL, MVT::i32, Src); 2398 2399 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2400 2401 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2402 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2403 2404 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 2405 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 2406 2407 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), 2408 *DAG.getContext(), MVT::i32); 2409 2410 SDValue HiOrLo = isCtlzOpc(Op.getOpcode()) ? Hi : Lo; 2411 SDValue Hi0orLo0 = DAG.getSetCC(SL, SetCCVT, HiOrLo, Zero, ISD::SETEQ); 2412 2413 SDValue OprLo = DAG.getNode(ISDOpc, SL, MVT::i32, Lo); 2414 SDValue OprHi = DAG.getNode(ISDOpc, SL, MVT::i32, Hi); 2415 2416 const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32); 2417 SDValue Add, NewOpr; 2418 if (isCtlzOpc(Op.getOpcode())) { 2419 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprLo, Bits32); 2420 // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x)) 2421 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprHi); 2422 } else { 2423 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprHi, Bits32); 2424 // cttz(x) = lo_32(x) == 0 ? cttz(hi_32(x)) + 32 : cttz(lo_32(x)) 2425 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprLo); 2426 } 2427 2428 if (!ZeroUndef) { 2429 // Test if the full 64-bit input is zero. 2430 2431 // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32, 2432 // which we probably don't want. 2433 SDValue LoOrHi = isCtlzOpc(Op.getOpcode()) ? Lo : Hi; 2434 SDValue Lo0OrHi0 = DAG.getSetCC(SL, SetCCVT, LoOrHi, Zero, ISD::SETEQ); 2435 SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0OrHi0, Hi0orLo0); 2436 2437 // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction 2438 // with the same cycles, otherwise it is slower. 2439 // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src, 2440 // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ); 2441 2442 const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32); 2443 2444 // The instruction returns -1 for 0 input, but the defined intrinsic 2445 // behavior is to return the number of bits. 2446 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, 2447 SrcIsZero, Bits32, NewOpr); 2448 } 2449 2450 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr); 2451 } 2452 2453 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, 2454 bool Signed) const { 2455 // Unsigned 2456 // cul2f(ulong u) 2457 //{ 2458 // uint lz = clz(u); 2459 // uint e = (u != 0) ? 127U + 63U - lz : 0; 2460 // u = (u << lz) & 0x7fffffffffffffffUL; 2461 // ulong t = u & 0xffffffffffUL; 2462 // uint v = (e << 23) | (uint)(u >> 40); 2463 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U); 2464 // return as_float(v + r); 2465 //} 2466 // Signed 2467 // cl2f(long l) 2468 //{ 2469 // long s = l >> 63; 2470 // float r = cul2f((l + s) ^ s); 2471 // return s ? -r : r; 2472 //} 2473 2474 SDLoc SL(Op); 2475 SDValue Src = Op.getOperand(0); 2476 SDValue L = Src; 2477 2478 SDValue S; 2479 if (Signed) { 2480 const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64); 2481 S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit); 2482 2483 SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S); 2484 L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S); 2485 } 2486 2487 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), 2488 *DAG.getContext(), MVT::f32); 2489 2490 2491 SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32); 2492 SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64); 2493 SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L); 2494 LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ); 2495 2496 SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32); 2497 SDValue E = DAG.getSelect(SL, MVT::i32, 2498 DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE), 2499 DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ), 2500 ZeroI32); 2501 2502 SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64, 2503 DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ), 2504 DAG.getConstant((-1ULL) >> 1, SL, MVT::i64)); 2505 2506 SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U, 2507 DAG.getConstant(0xffffffffffULL, SL, MVT::i64)); 2508 2509 SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64, 2510 U, DAG.getConstant(40, SL, MVT::i64)); 2511 2512 SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32, 2513 DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)), 2514 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, UShl)); 2515 2516 SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64); 2517 SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT); 2518 SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ); 2519 2520 SDValue One = DAG.getConstant(1, SL, MVT::i32); 2521 2522 SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One); 2523 2524 SDValue R = DAG.getSelect(SL, MVT::i32, 2525 RCmp, 2526 One, 2527 DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32)); 2528 R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R); 2529 R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R); 2530 2531 if (!Signed) 2532 return R; 2533 2534 SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R); 2535 return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R); 2536 } 2537 2538 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, 2539 bool Signed) const { 2540 SDLoc SL(Op); 2541 SDValue Src = Op.getOperand(0); 2542 2543 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2544 2545 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, 2546 DAG.getConstant(0, SL, MVT::i32)); 2547 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, 2548 DAG.getConstant(1, SL, MVT::i32)); 2549 2550 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP, 2551 SL, MVT::f64, Hi); 2552 2553 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo); 2554 2555 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi, 2556 DAG.getConstant(32, SL, MVT::i32)); 2557 // TODO: Should this propagate fast-math-flags? 2558 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo); 2559 } 2560 2561 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op, 2562 SelectionDAG &DAG) const { 2563 assert(Op.getOperand(0).getValueType() == MVT::i64 && 2564 "operation should be legal"); 2565 2566 // TODO: Factor out code common with LowerSINT_TO_FP. 2567 2568 EVT DestVT = Op.getValueType(); 2569 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) { 2570 SDLoc DL(Op); 2571 SDValue Src = Op.getOperand(0); 2572 2573 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src); 2574 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op)); 2575 SDValue FPRound = 2576 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag); 2577 2578 return FPRound; 2579 } 2580 2581 if (DestVT == MVT::f32) 2582 return LowerINT_TO_FP32(Op, DAG, false); 2583 2584 assert(DestVT == MVT::f64); 2585 return LowerINT_TO_FP64(Op, DAG, false); 2586 } 2587 2588 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op, 2589 SelectionDAG &DAG) const { 2590 assert(Op.getOperand(0).getValueType() == MVT::i64 && 2591 "operation should be legal"); 2592 2593 // TODO: Factor out code common with LowerUINT_TO_FP. 2594 2595 EVT DestVT = Op.getValueType(); 2596 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) { 2597 SDLoc DL(Op); 2598 SDValue Src = Op.getOperand(0); 2599 2600 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src); 2601 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op)); 2602 SDValue FPRound = 2603 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag); 2604 2605 return FPRound; 2606 } 2607 2608 if (DestVT == MVT::f32) 2609 return LowerINT_TO_FP32(Op, DAG, true); 2610 2611 assert(DestVT == MVT::f64); 2612 return LowerINT_TO_FP64(Op, DAG, true); 2613 } 2614 2615 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG, 2616 bool Signed) const { 2617 SDLoc SL(Op); 2618 2619 SDValue Src = Op.getOperand(0); 2620 2621 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2622 2623 SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL, 2624 MVT::f64); 2625 SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL, 2626 MVT::f64); 2627 // TODO: Should this propagate fast-math-flags? 2628 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0); 2629 2630 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul); 2631 2632 2633 SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc); 2634 2635 SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL, 2636 MVT::i32, FloorMul); 2637 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma); 2638 2639 SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi}); 2640 2641 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result); 2642 } 2643 2644 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const { 2645 SDLoc DL(Op); 2646 SDValue N0 = Op.getOperand(0); 2647 2648 // Convert to target node to get known bits 2649 if (N0.getValueType() == MVT::f32) 2650 return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0); 2651 2652 if (getTargetMachine().Options.UnsafeFPMath) { 2653 // There is a generic expand for FP_TO_FP16 with unsafe fast math. 2654 return SDValue(); 2655 } 2656 2657 assert(N0.getSimpleValueType() == MVT::f64); 2658 2659 // f64 -> f16 conversion using round-to-nearest-even rounding mode. 2660 const unsigned ExpMask = 0x7ff; 2661 const unsigned ExpBiasf64 = 1023; 2662 const unsigned ExpBiasf16 = 15; 2663 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 2664 SDValue One = DAG.getConstant(1, DL, MVT::i32); 2665 SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0); 2666 SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U, 2667 DAG.getConstant(32, DL, MVT::i64)); 2668 UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32); 2669 U = DAG.getZExtOrTrunc(U, DL, MVT::i32); 2670 SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2671 DAG.getConstant(20, DL, MVT::i64)); 2672 E = DAG.getNode(ISD::AND, DL, MVT::i32, E, 2673 DAG.getConstant(ExpMask, DL, MVT::i32)); 2674 // Subtract the fp64 exponent bias (1023) to get the real exponent and 2675 // add the f16 bias (15) to get the biased exponent for the f16 format. 2676 E = DAG.getNode(ISD::ADD, DL, MVT::i32, E, 2677 DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32)); 2678 2679 SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2680 DAG.getConstant(8, DL, MVT::i32)); 2681 M = DAG.getNode(ISD::AND, DL, MVT::i32, M, 2682 DAG.getConstant(0xffe, DL, MVT::i32)); 2683 2684 SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH, 2685 DAG.getConstant(0x1ff, DL, MVT::i32)); 2686 MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U); 2687 2688 SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ); 2689 M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set); 2690 2691 // (M != 0 ? 0x0200 : 0) | 0x7c00; 2692 SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32, 2693 DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32), 2694 Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32)); 2695 2696 // N = M | (E << 12); 2697 SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M, 2698 DAG.getNode(ISD::SHL, DL, MVT::i32, E, 2699 DAG.getConstant(12, DL, MVT::i32))); 2700 2701 // B = clamp(1-E, 0, 13); 2702 SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32, 2703 One, E); 2704 SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero); 2705 B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B, 2706 DAG.getConstant(13, DL, MVT::i32)); 2707 2708 SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M, 2709 DAG.getConstant(0x1000, DL, MVT::i32)); 2710 2711 SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B); 2712 SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B); 2713 SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE); 2714 D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1); 2715 2716 SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT); 2717 SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V, 2718 DAG.getConstant(0x7, DL, MVT::i32)); 2719 V = DAG.getNode(ISD::SRL, DL, MVT::i32, V, 2720 DAG.getConstant(2, DL, MVT::i32)); 2721 SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32), 2722 One, Zero, ISD::SETEQ); 2723 SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32), 2724 One, Zero, ISD::SETGT); 2725 V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1); 2726 V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1); 2727 2728 V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32), 2729 DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT); 2730 V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32), 2731 I, V, ISD::SETEQ); 2732 2733 // Extract the sign bit. 2734 SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2735 DAG.getConstant(16, DL, MVT::i32)); 2736 Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign, 2737 DAG.getConstant(0x8000, DL, MVT::i32)); 2738 2739 V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V); 2740 return DAG.getZExtOrTrunc(V, DL, Op.getValueType()); 2741 } 2742 2743 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op, 2744 SelectionDAG &DAG) const { 2745 SDValue Src = Op.getOperand(0); 2746 2747 // TODO: Factor out code common with LowerFP_TO_UINT. 2748 2749 EVT SrcVT = Src.getValueType(); 2750 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) { 2751 SDLoc DL(Op); 2752 2753 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src); 2754 SDValue FpToInt32 = 2755 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend); 2756 2757 return FpToInt32; 2758 } 2759 2760 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64) 2761 return LowerFP64_TO_INT(Op, DAG, true); 2762 2763 return SDValue(); 2764 } 2765 2766 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op, 2767 SelectionDAG &DAG) const { 2768 SDValue Src = Op.getOperand(0); 2769 2770 // TODO: Factor out code common with LowerFP_TO_SINT. 2771 2772 EVT SrcVT = Src.getValueType(); 2773 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) { 2774 SDLoc DL(Op); 2775 2776 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src); 2777 SDValue FpToInt32 = 2778 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend); 2779 2780 return FpToInt32; 2781 } 2782 2783 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64) 2784 return LowerFP64_TO_INT(Op, DAG, false); 2785 2786 return SDValue(); 2787 } 2788 2789 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 2790 SelectionDAG &DAG) const { 2791 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2792 MVT VT = Op.getSimpleValueType(); 2793 MVT ScalarVT = VT.getScalarType(); 2794 2795 assert(VT.isVector()); 2796 2797 SDValue Src = Op.getOperand(0); 2798 SDLoc DL(Op); 2799 2800 // TODO: Don't scalarize on Evergreen? 2801 unsigned NElts = VT.getVectorNumElements(); 2802 SmallVector<SDValue, 8> Args; 2803 DAG.ExtractVectorElements(Src, Args, 0, NElts); 2804 2805 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType()); 2806 for (unsigned I = 0; I < NElts; ++I) 2807 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp); 2808 2809 return DAG.getBuildVector(VT, DL, Args); 2810 } 2811 2812 //===----------------------------------------------------------------------===// 2813 // Custom DAG optimizations 2814 //===----------------------------------------------------------------------===// 2815 2816 static bool isU24(SDValue Op, SelectionDAG &DAG) { 2817 return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24; 2818 } 2819 2820 static bool isI24(SDValue Op, SelectionDAG &DAG) { 2821 EVT VT = Op.getValueType(); 2822 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated 2823 // as unsigned 24-bit values. 2824 AMDGPUTargetLowering::numBitsSigned(Op, DAG) < 24; 2825 } 2826 2827 static SDValue simplifyI24(SDNode *Node24, 2828 TargetLowering::DAGCombinerInfo &DCI) { 2829 SelectionDAG &DAG = DCI.DAG; 2830 SDValue LHS = Node24->getOperand(0); 2831 SDValue RHS = Node24->getOperand(1); 2832 2833 APInt Demanded = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 24); 2834 2835 // First try to simplify using GetDemandedBits which allows the operands to 2836 // have other uses, but will only perform simplifications that involve 2837 // bypassing some nodes for this user. 2838 SDValue DemandedLHS = DAG.GetDemandedBits(LHS, Demanded); 2839 SDValue DemandedRHS = DAG.GetDemandedBits(RHS, Demanded); 2840 if (DemandedLHS || DemandedRHS) 2841 return DAG.getNode(Node24->getOpcode(), SDLoc(Node24), Node24->getVTList(), 2842 DemandedLHS ? DemandedLHS : LHS, 2843 DemandedRHS ? DemandedRHS : RHS); 2844 2845 // Now try SimplifyDemandedBits which can simplify the nodes used by our 2846 // operands if this node is the only user. 2847 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2848 if (TLI.SimplifyDemandedBits(LHS, Demanded, DCI)) 2849 return SDValue(Node24, 0); 2850 if (TLI.SimplifyDemandedBits(RHS, Demanded, DCI)) 2851 return SDValue(Node24, 0); 2852 2853 return SDValue(); 2854 } 2855 2856 template <typename IntTy> 2857 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset, 2858 uint32_t Width, const SDLoc &DL) { 2859 if (Width + Offset < 32) { 2860 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width); 2861 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width); 2862 return DAG.getConstant(Result, DL, MVT::i32); 2863 } 2864 2865 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32); 2866 } 2867 2868 static bool hasVolatileUser(SDNode *Val) { 2869 for (SDNode *U : Val->uses()) { 2870 if (MemSDNode *M = dyn_cast<MemSDNode>(U)) { 2871 if (M->isVolatile()) 2872 return true; 2873 } 2874 } 2875 2876 return false; 2877 } 2878 2879 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const { 2880 // i32 vectors are the canonical memory type. 2881 if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT)) 2882 return false; 2883 2884 if (!VT.isByteSized()) 2885 return false; 2886 2887 unsigned Size = VT.getStoreSize(); 2888 2889 if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector()) 2890 return false; 2891 2892 if (Size == 3 || (Size > 4 && (Size % 4 != 0))) 2893 return false; 2894 2895 return true; 2896 } 2897 2898 // Find a load or store from corresponding pattern root. 2899 // Roots may be build_vector, bitconvert or their combinations. 2900 static MemSDNode* findMemSDNode(SDNode *N) { 2901 N = AMDGPUTargetLowering::stripBitcast(SDValue(N,0)).getNode(); 2902 if (MemSDNode *MN = dyn_cast<MemSDNode>(N)) 2903 return MN; 2904 assert(isa<BuildVectorSDNode>(N)); 2905 for (SDValue V : N->op_values()) 2906 if (MemSDNode *MN = 2907 dyn_cast<MemSDNode>(AMDGPUTargetLowering::stripBitcast(V))) 2908 return MN; 2909 llvm_unreachable("cannot find MemSDNode in the pattern!"); 2910 } 2911 2912 bool AMDGPUTargetLowering::SelectFlatOffset(bool IsSigned, 2913 SelectionDAG &DAG, 2914 SDNode *N, 2915 SDValue Addr, 2916 SDValue &VAddr, 2917 SDValue &Offset, 2918 SDValue &SLC) const { 2919 const GCNSubtarget &ST = 2920 DAG.getMachineFunction().getSubtarget<GCNSubtarget>(); 2921 int64_t OffsetVal = 0; 2922 2923 if (ST.hasFlatInstOffsets() && 2924 (!ST.hasFlatSegmentOffsetBug() || 2925 findMemSDNode(N)->getAddressSpace() != AMDGPUAS::FLAT_ADDRESS) && 2926 DAG.isBaseWithConstantOffset(Addr)) { 2927 SDValue N0 = Addr.getOperand(0); 2928 SDValue N1 = Addr.getOperand(1); 2929 int64_t COffsetVal = cast<ConstantSDNode>(N1)->getSExtValue(); 2930 2931 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 2932 if ((IsSigned && isInt<12>(COffsetVal)) || 2933 (!IsSigned && isUInt<11>(COffsetVal))) { 2934 Addr = N0; 2935 OffsetVal = COffsetVal; 2936 } 2937 } else { 2938 if ((IsSigned && isInt<13>(COffsetVal)) || 2939 (!IsSigned && isUInt<12>(COffsetVal))) { 2940 Addr = N0; 2941 OffsetVal = COffsetVal; 2942 } 2943 } 2944 } 2945 2946 VAddr = Addr; 2947 Offset = DAG.getTargetConstant(OffsetVal, SDLoc(), MVT::i16); 2948 SLC = DAG.getTargetConstant(0, SDLoc(), MVT::i1); 2949 2950 return true; 2951 } 2952 2953 // Replace load of an illegal type with a store of a bitcast to a friendlier 2954 // type. 2955 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N, 2956 DAGCombinerInfo &DCI) const { 2957 if (!DCI.isBeforeLegalize()) 2958 return SDValue(); 2959 2960 LoadSDNode *LN = cast<LoadSDNode>(N); 2961 if (LN->isVolatile() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN)) 2962 return SDValue(); 2963 2964 SDLoc SL(N); 2965 SelectionDAG &DAG = DCI.DAG; 2966 EVT VT = LN->getMemoryVT(); 2967 2968 unsigned Size = VT.getStoreSize(); 2969 unsigned Align = LN->getAlignment(); 2970 if (Align < Size && isTypeLegal(VT)) { 2971 bool IsFast; 2972 unsigned AS = LN->getAddressSpace(); 2973 2974 // Expand unaligned loads earlier than legalization. Due to visitation order 2975 // problems during legalization, the emitted instructions to pack and unpack 2976 // the bytes again are not eliminated in the case of an unaligned copy. 2977 if (!allowsMisalignedMemoryAccesses( 2978 VT, AS, Align, LN->getMemOperand()->getFlags(), &IsFast)) { 2979 if (VT.isVector()) 2980 return scalarizeVectorLoad(LN, DAG); 2981 2982 SDValue Ops[2]; 2983 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG); 2984 return DAG.getMergeValues(Ops, SDLoc(N)); 2985 } 2986 2987 if (!IsFast) 2988 return SDValue(); 2989 } 2990 2991 if (!shouldCombineMemoryType(VT)) 2992 return SDValue(); 2993 2994 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 2995 2996 SDValue NewLoad 2997 = DAG.getLoad(NewVT, SL, LN->getChain(), 2998 LN->getBasePtr(), LN->getMemOperand()); 2999 3000 SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad); 3001 DCI.CombineTo(N, BC, NewLoad.getValue(1)); 3002 return SDValue(N, 0); 3003 } 3004 3005 // Replace store of an illegal type with a store of a bitcast to a friendlier 3006 // type. 3007 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N, 3008 DAGCombinerInfo &DCI) const { 3009 if (!DCI.isBeforeLegalize()) 3010 return SDValue(); 3011 3012 StoreSDNode *SN = cast<StoreSDNode>(N); 3013 if (SN->isVolatile() || !ISD::isNormalStore(SN)) 3014 return SDValue(); 3015 3016 EVT VT = SN->getMemoryVT(); 3017 unsigned Size = VT.getStoreSize(); 3018 3019 SDLoc SL(N); 3020 SelectionDAG &DAG = DCI.DAG; 3021 unsigned Align = SN->getAlignment(); 3022 if (Align < Size && isTypeLegal(VT)) { 3023 bool IsFast; 3024 unsigned AS = SN->getAddressSpace(); 3025 3026 // Expand unaligned stores earlier than legalization. Due to visitation 3027 // order problems during legalization, the emitted instructions to pack and 3028 // unpack the bytes again are not eliminated in the case of an unaligned 3029 // copy. 3030 if (!allowsMisalignedMemoryAccesses( 3031 VT, AS, Align, SN->getMemOperand()->getFlags(), &IsFast)) { 3032 if (VT.isVector()) 3033 return scalarizeVectorStore(SN, DAG); 3034 3035 return expandUnalignedStore(SN, DAG); 3036 } 3037 3038 if (!IsFast) 3039 return SDValue(); 3040 } 3041 3042 if (!shouldCombineMemoryType(VT)) 3043 return SDValue(); 3044 3045 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 3046 SDValue Val = SN->getValue(); 3047 3048 //DCI.AddToWorklist(Val.getNode()); 3049 3050 bool OtherUses = !Val.hasOneUse(); 3051 SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val); 3052 if (OtherUses) { 3053 SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal); 3054 DAG.ReplaceAllUsesOfValueWith(Val, CastBack); 3055 } 3056 3057 return DAG.getStore(SN->getChain(), SL, CastVal, 3058 SN->getBasePtr(), SN->getMemOperand()); 3059 } 3060 3061 // FIXME: This should go in generic DAG combiner with an isTruncateFree check, 3062 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU 3063 // issues. 3064 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N, 3065 DAGCombinerInfo &DCI) const { 3066 SelectionDAG &DAG = DCI.DAG; 3067 SDValue N0 = N->getOperand(0); 3068 3069 // (vt2 (assertzext (truncate vt0:x), vt1)) -> 3070 // (vt2 (truncate (assertzext vt0:x, vt1))) 3071 if (N0.getOpcode() == ISD::TRUNCATE) { 3072 SDValue N1 = N->getOperand(1); 3073 EVT ExtVT = cast<VTSDNode>(N1)->getVT(); 3074 SDLoc SL(N); 3075 3076 SDValue Src = N0.getOperand(0); 3077 EVT SrcVT = Src.getValueType(); 3078 if (SrcVT.bitsGE(ExtVT)) { 3079 SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1); 3080 return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg); 3081 } 3082 } 3083 3084 return SDValue(); 3085 } 3086 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the 3087 /// binary operation \p Opc to it with the corresponding constant operands. 3088 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl( 3089 DAGCombinerInfo &DCI, const SDLoc &SL, 3090 unsigned Opc, SDValue LHS, 3091 uint32_t ValLo, uint32_t ValHi) const { 3092 SelectionDAG &DAG = DCI.DAG; 3093 SDValue Lo, Hi; 3094 std::tie(Lo, Hi) = split64BitValue(LHS, DAG); 3095 3096 SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32); 3097 SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32); 3098 3099 SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS); 3100 SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS); 3101 3102 // Re-visit the ands. It's possible we eliminated one of them and it could 3103 // simplify the vector. 3104 DCI.AddToWorklist(Lo.getNode()); 3105 DCI.AddToWorklist(Hi.getNode()); 3106 3107 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd}); 3108 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 3109 } 3110 3111 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N, 3112 DAGCombinerInfo &DCI) const { 3113 EVT VT = N->getValueType(0); 3114 3115 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3116 if (!RHS) 3117 return SDValue(); 3118 3119 SDValue LHS = N->getOperand(0); 3120 unsigned RHSVal = RHS->getZExtValue(); 3121 if (!RHSVal) 3122 return LHS; 3123 3124 SDLoc SL(N); 3125 SelectionDAG &DAG = DCI.DAG; 3126 3127 switch (LHS->getOpcode()) { 3128 default: 3129 break; 3130 case ISD::ZERO_EXTEND: 3131 case ISD::SIGN_EXTEND: 3132 case ISD::ANY_EXTEND: { 3133 SDValue X = LHS->getOperand(0); 3134 3135 if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 && 3136 isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) { 3137 // Prefer build_vector as the canonical form if packed types are legal. 3138 // (shl ([asz]ext i16:x), 16 -> build_vector 0, x 3139 SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL, 3140 { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) }); 3141 return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 3142 } 3143 3144 // shl (ext x) => zext (shl x), if shift does not overflow int 3145 if (VT != MVT::i64) 3146 break; 3147 KnownBits Known = DAG.computeKnownBits(X); 3148 unsigned LZ = Known.countMinLeadingZeros(); 3149 if (LZ < RHSVal) 3150 break; 3151 EVT XVT = X.getValueType(); 3152 SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0)); 3153 return DAG.getZExtOrTrunc(Shl, SL, VT); 3154 } 3155 } 3156 3157 if (VT != MVT::i64) 3158 return SDValue(); 3159 3160 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32)) 3161 3162 // On some subtargets, 64-bit shift is a quarter rate instruction. In the 3163 // common case, splitting this into a move and a 32-bit shift is faster and 3164 // the same code size. 3165 if (RHSVal < 32) 3166 return SDValue(); 3167 3168 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32); 3169 3170 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS); 3171 SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt); 3172 3173 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 3174 3175 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift}); 3176 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 3177 } 3178 3179 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N, 3180 DAGCombinerInfo &DCI) const { 3181 if (N->getValueType(0) != MVT::i64) 3182 return SDValue(); 3183 3184 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3185 if (!RHS) 3186 return SDValue(); 3187 3188 SelectionDAG &DAG = DCI.DAG; 3189 SDLoc SL(N); 3190 unsigned RHSVal = RHS->getZExtValue(); 3191 3192 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31) 3193 if (RHSVal == 32) { 3194 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); 3195 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi, 3196 DAG.getConstant(31, SL, MVT::i32)); 3197 3198 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift}); 3199 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec); 3200 } 3201 3202 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31) 3203 if (RHSVal == 63) { 3204 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); 3205 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi, 3206 DAG.getConstant(31, SL, MVT::i32)); 3207 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift}); 3208 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec); 3209 } 3210 3211 return SDValue(); 3212 } 3213 3214 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N, 3215 DAGCombinerInfo &DCI) const { 3216 auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3217 if (!RHS) 3218 return SDValue(); 3219 3220 EVT VT = N->getValueType(0); 3221 SDValue LHS = N->getOperand(0); 3222 unsigned ShiftAmt = RHS->getZExtValue(); 3223 SelectionDAG &DAG = DCI.DAG; 3224 SDLoc SL(N); 3225 3226 // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1) 3227 // this improves the ability to match BFE patterns in isel. 3228 if (LHS.getOpcode() == ISD::AND) { 3229 if (auto *Mask = dyn_cast<ConstantSDNode>(LHS.getOperand(1))) { 3230 if (Mask->getAPIntValue().isShiftedMask() && 3231 Mask->getAPIntValue().countTrailingZeros() == ShiftAmt) { 3232 return DAG.getNode( 3233 ISD::AND, SL, VT, 3234 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)), 3235 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1))); 3236 } 3237 } 3238 } 3239 3240 if (VT != MVT::i64) 3241 return SDValue(); 3242 3243 if (ShiftAmt < 32) 3244 return SDValue(); 3245 3246 // srl i64:x, C for C >= 32 3247 // => 3248 // build_pair (srl hi_32(x), C - 32), 0 3249 SDValue One = DAG.getConstant(1, SL, MVT::i32); 3250 SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 3251 3252 SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, LHS); 3253 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecOp, One); 3254 3255 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32); 3256 SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst); 3257 3258 SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero}); 3259 3260 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair); 3261 } 3262 3263 SDValue AMDGPUTargetLowering::performTruncateCombine( 3264 SDNode *N, DAGCombinerInfo &DCI) const { 3265 SDLoc SL(N); 3266 SelectionDAG &DAG = DCI.DAG; 3267 EVT VT = N->getValueType(0); 3268 SDValue Src = N->getOperand(0); 3269 3270 // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x) 3271 if (Src.getOpcode() == ISD::BITCAST && !VT.isVector()) { 3272 SDValue Vec = Src.getOperand(0); 3273 if (Vec.getOpcode() == ISD::BUILD_VECTOR) { 3274 SDValue Elt0 = Vec.getOperand(0); 3275 EVT EltVT = Elt0.getValueType(); 3276 if (VT.getSizeInBits() <= EltVT.getSizeInBits()) { 3277 if (EltVT.isFloatingPoint()) { 3278 Elt0 = DAG.getNode(ISD::BITCAST, SL, 3279 EltVT.changeTypeToInteger(), Elt0); 3280 } 3281 3282 return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0); 3283 } 3284 } 3285 } 3286 3287 // Equivalent of above for accessing the high element of a vector as an 3288 // integer operation. 3289 // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y) 3290 if (Src.getOpcode() == ISD::SRL && !VT.isVector()) { 3291 if (auto K = isConstOrConstSplat(Src.getOperand(1))) { 3292 if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) { 3293 SDValue BV = stripBitcast(Src.getOperand(0)); 3294 if (BV.getOpcode() == ISD::BUILD_VECTOR && 3295 BV.getValueType().getVectorNumElements() == 2) { 3296 SDValue SrcElt = BV.getOperand(1); 3297 EVT SrcEltVT = SrcElt.getValueType(); 3298 if (SrcEltVT.isFloatingPoint()) { 3299 SrcElt = DAG.getNode(ISD::BITCAST, SL, 3300 SrcEltVT.changeTypeToInteger(), SrcElt); 3301 } 3302 3303 return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt); 3304 } 3305 } 3306 } 3307 } 3308 3309 // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit. 3310 // 3311 // i16 (trunc (srl i64:x, K)), K <= 16 -> 3312 // i16 (trunc (srl (i32 (trunc x), K))) 3313 if (VT.getScalarSizeInBits() < 32) { 3314 EVT SrcVT = Src.getValueType(); 3315 if (SrcVT.getScalarSizeInBits() > 32 && 3316 (Src.getOpcode() == ISD::SRL || 3317 Src.getOpcode() == ISD::SRA || 3318 Src.getOpcode() == ISD::SHL)) { 3319 SDValue Amt = Src.getOperand(1); 3320 KnownBits Known = DAG.computeKnownBits(Amt); 3321 unsigned Size = VT.getScalarSizeInBits(); 3322 if ((Known.isConstant() && Known.getConstant().ule(Size)) || 3323 (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size))) { 3324 EVT MidVT = VT.isVector() ? 3325 EVT::getVectorVT(*DAG.getContext(), MVT::i32, 3326 VT.getVectorNumElements()) : MVT::i32; 3327 3328 EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout()); 3329 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT, 3330 Src.getOperand(0)); 3331 DCI.AddToWorklist(Trunc.getNode()); 3332 3333 if (Amt.getValueType() != NewShiftVT) { 3334 Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT); 3335 DCI.AddToWorklist(Amt.getNode()); 3336 } 3337 3338 SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT, 3339 Trunc, Amt); 3340 return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift); 3341 } 3342 } 3343 } 3344 3345 return SDValue(); 3346 } 3347 3348 // We need to specifically handle i64 mul here to avoid unnecessary conversion 3349 // instructions. If we only match on the legalized i64 mul expansion, 3350 // SimplifyDemandedBits will be unable to remove them because there will be 3351 // multiple uses due to the separate mul + mulh[su]. 3352 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL, 3353 SDValue N0, SDValue N1, unsigned Size, bool Signed) { 3354 if (Size <= 32) { 3355 unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24; 3356 return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1); 3357 } 3358 3359 // Because we want to eliminate extension instructions before the 3360 // operation, we need to create a single user here (i.e. not the separate 3361 // mul_lo + mul_hi) so that SimplifyDemandedBits will deal with it. 3362 3363 unsigned MulOpc = Signed ? AMDGPUISD::MUL_LOHI_I24 : AMDGPUISD::MUL_LOHI_U24; 3364 3365 SDValue Mul = DAG.getNode(MulOpc, SL, 3366 DAG.getVTList(MVT::i32, MVT::i32), N0, N1); 3367 3368 return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64, 3369 Mul.getValue(0), Mul.getValue(1)); 3370 } 3371 3372 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N, 3373 DAGCombinerInfo &DCI) const { 3374 EVT VT = N->getValueType(0); 3375 3376 unsigned Size = VT.getSizeInBits(); 3377 if (VT.isVector() || Size > 64) 3378 return SDValue(); 3379 3380 // There are i16 integer mul/mad. 3381 if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16)) 3382 return SDValue(); 3383 3384 SelectionDAG &DAG = DCI.DAG; 3385 SDLoc DL(N); 3386 3387 SDValue N0 = N->getOperand(0); 3388 SDValue N1 = N->getOperand(1); 3389 3390 // SimplifyDemandedBits has the annoying habit of turning useful zero_extends 3391 // in the source into any_extends if the result of the mul is truncated. Since 3392 // we can assume the high bits are whatever we want, use the underlying value 3393 // to avoid the unknown high bits from interfering. 3394 if (N0.getOpcode() == ISD::ANY_EXTEND) 3395 N0 = N0.getOperand(0); 3396 3397 if (N1.getOpcode() == ISD::ANY_EXTEND) 3398 N1 = N1.getOperand(0); 3399 3400 SDValue Mul; 3401 3402 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) { 3403 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32); 3404 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32); 3405 Mul = getMul24(DAG, DL, N0, N1, Size, false); 3406 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) { 3407 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32); 3408 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32); 3409 Mul = getMul24(DAG, DL, N0, N1, Size, true); 3410 } else { 3411 return SDValue(); 3412 } 3413 3414 // We need to use sext even for MUL_U24, because MUL_U24 is used 3415 // for signed multiply of 8 and 16-bit types. 3416 return DAG.getSExtOrTrunc(Mul, DL, VT); 3417 } 3418 3419 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N, 3420 DAGCombinerInfo &DCI) const { 3421 EVT VT = N->getValueType(0); 3422 3423 if (!Subtarget->hasMulI24() || VT.isVector()) 3424 return SDValue(); 3425 3426 SelectionDAG &DAG = DCI.DAG; 3427 SDLoc DL(N); 3428 3429 SDValue N0 = N->getOperand(0); 3430 SDValue N1 = N->getOperand(1); 3431 3432 if (!isI24(N0, DAG) || !isI24(N1, DAG)) 3433 return SDValue(); 3434 3435 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32); 3436 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32); 3437 3438 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1); 3439 DCI.AddToWorklist(Mulhi.getNode()); 3440 return DAG.getSExtOrTrunc(Mulhi, DL, VT); 3441 } 3442 3443 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N, 3444 DAGCombinerInfo &DCI) const { 3445 EVT VT = N->getValueType(0); 3446 3447 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32) 3448 return SDValue(); 3449 3450 SelectionDAG &DAG = DCI.DAG; 3451 SDLoc DL(N); 3452 3453 SDValue N0 = N->getOperand(0); 3454 SDValue N1 = N->getOperand(1); 3455 3456 if (!isU24(N0, DAG) || !isU24(N1, DAG)) 3457 return SDValue(); 3458 3459 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32); 3460 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32); 3461 3462 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1); 3463 DCI.AddToWorklist(Mulhi.getNode()); 3464 return DAG.getZExtOrTrunc(Mulhi, DL, VT); 3465 } 3466 3467 SDValue AMDGPUTargetLowering::performMulLoHi24Combine( 3468 SDNode *N, DAGCombinerInfo &DCI) const { 3469 SelectionDAG &DAG = DCI.DAG; 3470 3471 // Simplify demanded bits before splitting into multiple users. 3472 if (SDValue V = simplifyI24(N, DCI)) 3473 return V; 3474 3475 SDValue N0 = N->getOperand(0); 3476 SDValue N1 = N->getOperand(1); 3477 3478 bool Signed = (N->getOpcode() == AMDGPUISD::MUL_LOHI_I24); 3479 3480 unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24; 3481 unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24; 3482 3483 SDLoc SL(N); 3484 3485 SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1); 3486 SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1); 3487 return DAG.getMergeValues({ MulLo, MulHi }, SL); 3488 } 3489 3490 static bool isNegativeOne(SDValue Val) { 3491 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) 3492 return C->isAllOnesValue(); 3493 return false; 3494 } 3495 3496 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG, 3497 SDValue Op, 3498 const SDLoc &DL, 3499 unsigned Opc) const { 3500 EVT VT = Op.getValueType(); 3501 EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT); 3502 if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() && 3503 LegalVT != MVT::i16)) 3504 return SDValue(); 3505 3506 if (VT != MVT::i32) 3507 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op); 3508 3509 SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op); 3510 if (VT != MVT::i32) 3511 FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX); 3512 3513 return FFBX; 3514 } 3515 3516 // The native instructions return -1 on 0 input. Optimize out a select that 3517 // produces -1 on 0. 3518 // 3519 // TODO: If zero is not undef, we could also do this if the output is compared 3520 // against the bitwidth. 3521 // 3522 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly. 3523 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, 3524 SDValue LHS, SDValue RHS, 3525 DAGCombinerInfo &DCI) const { 3526 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 3527 if (!CmpRhs || !CmpRhs->isNullValue()) 3528 return SDValue(); 3529 3530 SelectionDAG &DAG = DCI.DAG; 3531 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 3532 SDValue CmpLHS = Cond.getOperand(0); 3533 3534 unsigned Opc = isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : 3535 AMDGPUISD::FFBH_U32; 3536 3537 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x 3538 // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x 3539 if (CCOpcode == ISD::SETEQ && 3540 (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) && 3541 RHS.getOperand(0) == CmpLHS && 3542 isNegativeOne(LHS)) { 3543 return getFFBX_U32(DAG, CmpLHS, SL, Opc); 3544 } 3545 3546 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x 3547 // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x 3548 if (CCOpcode == ISD::SETNE && 3549 (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) && 3550 LHS.getOperand(0) == CmpLHS && 3551 isNegativeOne(RHS)) { 3552 return getFFBX_U32(DAG, CmpLHS, SL, Opc); 3553 } 3554 3555 return SDValue(); 3556 } 3557 3558 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI, 3559 unsigned Op, 3560 const SDLoc &SL, 3561 SDValue Cond, 3562 SDValue N1, 3563 SDValue N2) { 3564 SelectionDAG &DAG = DCI.DAG; 3565 EVT VT = N1.getValueType(); 3566 3567 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond, 3568 N1.getOperand(0), N2.getOperand(0)); 3569 DCI.AddToWorklist(NewSelect.getNode()); 3570 return DAG.getNode(Op, SL, VT, NewSelect); 3571 } 3572 3573 // Pull a free FP operation out of a select so it may fold into uses. 3574 // 3575 // select c, (fneg x), (fneg y) -> fneg (select c, x, y) 3576 // select c, (fneg x), k -> fneg (select c, x, (fneg k)) 3577 // 3578 // select c, (fabs x), (fabs y) -> fabs (select c, x, y) 3579 // select c, (fabs x), +k -> fabs (select c, x, k) 3580 static SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI, 3581 SDValue N) { 3582 SelectionDAG &DAG = DCI.DAG; 3583 SDValue Cond = N.getOperand(0); 3584 SDValue LHS = N.getOperand(1); 3585 SDValue RHS = N.getOperand(2); 3586 3587 EVT VT = N.getValueType(); 3588 if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) || 3589 (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) { 3590 return distributeOpThroughSelect(DCI, LHS.getOpcode(), 3591 SDLoc(N), Cond, LHS, RHS); 3592 } 3593 3594 bool Inv = false; 3595 if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) { 3596 std::swap(LHS, RHS); 3597 Inv = true; 3598 } 3599 3600 // TODO: Support vector constants. 3601 ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 3602 if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS) { 3603 SDLoc SL(N); 3604 // If one side is an fneg/fabs and the other is a constant, we can push the 3605 // fneg/fabs down. If it's an fabs, the constant needs to be non-negative. 3606 SDValue NewLHS = LHS.getOperand(0); 3607 SDValue NewRHS = RHS; 3608 3609 // Careful: if the neg can be folded up, don't try to pull it back down. 3610 bool ShouldFoldNeg = true; 3611 3612 if (NewLHS.hasOneUse()) { 3613 unsigned Opc = NewLHS.getOpcode(); 3614 if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(Opc)) 3615 ShouldFoldNeg = false; 3616 if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL) 3617 ShouldFoldNeg = false; 3618 } 3619 3620 if (ShouldFoldNeg) { 3621 if (LHS.getOpcode() == ISD::FNEG) 3622 NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3623 else if (CRHS->isNegative()) 3624 return SDValue(); 3625 3626 if (Inv) 3627 std::swap(NewLHS, NewRHS); 3628 3629 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, 3630 Cond, NewLHS, NewRHS); 3631 DCI.AddToWorklist(NewSelect.getNode()); 3632 return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect); 3633 } 3634 } 3635 3636 return SDValue(); 3637 } 3638 3639 3640 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N, 3641 DAGCombinerInfo &DCI) const { 3642 if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0))) 3643 return Folded; 3644 3645 SDValue Cond = N->getOperand(0); 3646 if (Cond.getOpcode() != ISD::SETCC) 3647 return SDValue(); 3648 3649 EVT VT = N->getValueType(0); 3650 SDValue LHS = Cond.getOperand(0); 3651 SDValue RHS = Cond.getOperand(1); 3652 SDValue CC = Cond.getOperand(2); 3653 3654 SDValue True = N->getOperand(1); 3655 SDValue False = N->getOperand(2); 3656 3657 if (Cond.hasOneUse()) { // TODO: Look for multiple select uses. 3658 SelectionDAG &DAG = DCI.DAG; 3659 if (DAG.isConstantValueOfAnyType(True) && 3660 !DAG.isConstantValueOfAnyType(False)) { 3661 // Swap cmp + select pair to move constant to false input. 3662 // This will allow using VOPC cndmasks more often. 3663 // select (setcc x, y), k, x -> select (setccinv x, y), x, k 3664 3665 SDLoc SL(N); 3666 ISD::CondCode NewCC = getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), 3667 LHS.getValueType().isInteger()); 3668 3669 SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC); 3670 return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True); 3671 } 3672 3673 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) { 3674 SDValue MinMax 3675 = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI); 3676 // Revisit this node so we can catch min3/max3/med3 patterns. 3677 //DCI.AddToWorklist(MinMax.getNode()); 3678 return MinMax; 3679 } 3680 } 3681 3682 // There's no reason to not do this if the condition has other uses. 3683 return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI); 3684 } 3685 3686 static bool isInv2Pi(const APFloat &APF) { 3687 static const APFloat KF16(APFloat::IEEEhalf(), APInt(16, 0x3118)); 3688 static const APFloat KF32(APFloat::IEEEsingle(), APInt(32, 0x3e22f983)); 3689 static const APFloat KF64(APFloat::IEEEdouble(), APInt(64, 0x3fc45f306dc9c882)); 3690 3691 return APF.bitwiseIsEqual(KF16) || 3692 APF.bitwiseIsEqual(KF32) || 3693 APF.bitwiseIsEqual(KF64); 3694 } 3695 3696 // 0 and 1.0 / (0.5 * pi) do not have inline immmediates, so there is an 3697 // additional cost to negate them. 3698 bool AMDGPUTargetLowering::isConstantCostlierToNegate(SDValue N) const { 3699 if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N)) { 3700 if (C->isZero() && !C->isNegative()) 3701 return true; 3702 3703 if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF())) 3704 return true; 3705 } 3706 3707 return false; 3708 } 3709 3710 static unsigned inverseMinMax(unsigned Opc) { 3711 switch (Opc) { 3712 case ISD::FMAXNUM: 3713 return ISD::FMINNUM; 3714 case ISD::FMINNUM: 3715 return ISD::FMAXNUM; 3716 case ISD::FMAXNUM_IEEE: 3717 return ISD::FMINNUM_IEEE; 3718 case ISD::FMINNUM_IEEE: 3719 return ISD::FMAXNUM_IEEE; 3720 case AMDGPUISD::FMAX_LEGACY: 3721 return AMDGPUISD::FMIN_LEGACY; 3722 case AMDGPUISD::FMIN_LEGACY: 3723 return AMDGPUISD::FMAX_LEGACY; 3724 default: 3725 llvm_unreachable("invalid min/max opcode"); 3726 } 3727 } 3728 3729 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N, 3730 DAGCombinerInfo &DCI) const { 3731 SelectionDAG &DAG = DCI.DAG; 3732 SDValue N0 = N->getOperand(0); 3733 EVT VT = N->getValueType(0); 3734 3735 unsigned Opc = N0.getOpcode(); 3736 3737 // If the input has multiple uses and we can either fold the negate down, or 3738 // the other uses cannot, give up. This both prevents unprofitable 3739 // transformations and infinite loops: we won't repeatedly try to fold around 3740 // a negate that has no 'good' form. 3741 if (N0.hasOneUse()) { 3742 // This may be able to fold into the source, but at a code size cost. Don't 3743 // fold if the fold into the user is free. 3744 if (allUsesHaveSourceMods(N, 0)) 3745 return SDValue(); 3746 } else { 3747 if (fnegFoldsIntoOp(Opc) && 3748 (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode()))) 3749 return SDValue(); 3750 } 3751 3752 SDLoc SL(N); 3753 switch (Opc) { 3754 case ISD::FADD: { 3755 if (!mayIgnoreSignedZero(N0)) 3756 return SDValue(); 3757 3758 // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y)) 3759 SDValue LHS = N0.getOperand(0); 3760 SDValue RHS = N0.getOperand(1); 3761 3762 if (LHS.getOpcode() != ISD::FNEG) 3763 LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS); 3764 else 3765 LHS = LHS.getOperand(0); 3766 3767 if (RHS.getOpcode() != ISD::FNEG) 3768 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3769 else 3770 RHS = RHS.getOperand(0); 3771 3772 SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags()); 3773 if (Res.getOpcode() != ISD::FADD) 3774 return SDValue(); // Op got folded away. 3775 if (!N0.hasOneUse()) 3776 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3777 return Res; 3778 } 3779 case ISD::FMUL: 3780 case AMDGPUISD::FMUL_LEGACY: { 3781 // (fneg (fmul x, y)) -> (fmul x, (fneg y)) 3782 // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y)) 3783 SDValue LHS = N0.getOperand(0); 3784 SDValue RHS = N0.getOperand(1); 3785 3786 if (LHS.getOpcode() == ISD::FNEG) 3787 LHS = LHS.getOperand(0); 3788 else if (RHS.getOpcode() == ISD::FNEG) 3789 RHS = RHS.getOperand(0); 3790 else 3791 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3792 3793 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags()); 3794 if (Res.getOpcode() != Opc) 3795 return SDValue(); // Op got folded away. 3796 if (!N0.hasOneUse()) 3797 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3798 return Res; 3799 } 3800 case ISD::FMA: 3801 case ISD::FMAD: { 3802 if (!mayIgnoreSignedZero(N0)) 3803 return SDValue(); 3804 3805 // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z)) 3806 SDValue LHS = N0.getOperand(0); 3807 SDValue MHS = N0.getOperand(1); 3808 SDValue RHS = N0.getOperand(2); 3809 3810 if (LHS.getOpcode() == ISD::FNEG) 3811 LHS = LHS.getOperand(0); 3812 else if (MHS.getOpcode() == ISD::FNEG) 3813 MHS = MHS.getOperand(0); 3814 else 3815 MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS); 3816 3817 if (RHS.getOpcode() != ISD::FNEG) 3818 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3819 else 3820 RHS = RHS.getOperand(0); 3821 3822 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS); 3823 if (Res.getOpcode() != Opc) 3824 return SDValue(); // Op got folded away. 3825 if (!N0.hasOneUse()) 3826 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3827 return Res; 3828 } 3829 case ISD::FMAXNUM: 3830 case ISD::FMINNUM: 3831 case ISD::FMAXNUM_IEEE: 3832 case ISD::FMINNUM_IEEE: 3833 case AMDGPUISD::FMAX_LEGACY: 3834 case AMDGPUISD::FMIN_LEGACY: { 3835 // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y) 3836 // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y) 3837 // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y) 3838 // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y) 3839 3840 SDValue LHS = N0.getOperand(0); 3841 SDValue RHS = N0.getOperand(1); 3842 3843 // 0 doesn't have a negated inline immediate. 3844 // TODO: This constant check should be generalized to other operations. 3845 if (isConstantCostlierToNegate(RHS)) 3846 return SDValue(); 3847 3848 SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS); 3849 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3850 unsigned Opposite = inverseMinMax(Opc); 3851 3852 SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags()); 3853 if (Res.getOpcode() != Opposite) 3854 return SDValue(); // Op got folded away. 3855 if (!N0.hasOneUse()) 3856 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3857 return Res; 3858 } 3859 case AMDGPUISD::FMED3: { 3860 SDValue Ops[3]; 3861 for (unsigned I = 0; I < 3; ++I) 3862 Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags()); 3863 3864 SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags()); 3865 if (Res.getOpcode() != AMDGPUISD::FMED3) 3866 return SDValue(); // Op got folded away. 3867 if (!N0.hasOneUse()) 3868 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3869 return Res; 3870 } 3871 case ISD::FP_EXTEND: 3872 case ISD::FTRUNC: 3873 case ISD::FRINT: 3874 case ISD::FNEARBYINT: // XXX - Should fround be handled? 3875 case ISD::FSIN: 3876 case ISD::FCANONICALIZE: 3877 case AMDGPUISD::RCP: 3878 case AMDGPUISD::RCP_LEGACY: 3879 case AMDGPUISD::RCP_IFLAG: 3880 case AMDGPUISD::SIN_HW: { 3881 SDValue CvtSrc = N0.getOperand(0); 3882 if (CvtSrc.getOpcode() == ISD::FNEG) { 3883 // (fneg (fp_extend (fneg x))) -> (fp_extend x) 3884 // (fneg (rcp (fneg x))) -> (rcp x) 3885 return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0)); 3886 } 3887 3888 if (!N0.hasOneUse()) 3889 return SDValue(); 3890 3891 // (fneg (fp_extend x)) -> (fp_extend (fneg x)) 3892 // (fneg (rcp x)) -> (rcp (fneg x)) 3893 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc); 3894 return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags()); 3895 } 3896 case ISD::FP_ROUND: { 3897 SDValue CvtSrc = N0.getOperand(0); 3898 3899 if (CvtSrc.getOpcode() == ISD::FNEG) { 3900 // (fneg (fp_round (fneg x))) -> (fp_round x) 3901 return DAG.getNode(ISD::FP_ROUND, SL, VT, 3902 CvtSrc.getOperand(0), N0.getOperand(1)); 3903 } 3904 3905 if (!N0.hasOneUse()) 3906 return SDValue(); 3907 3908 // (fneg (fp_round x)) -> (fp_round (fneg x)) 3909 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc); 3910 return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1)); 3911 } 3912 case ISD::FP16_TO_FP: { 3913 // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal 3914 // f16, but legalization of f16 fneg ends up pulling it out of the source. 3915 // Put the fneg back as a legal source operation that can be matched later. 3916 SDLoc SL(N); 3917 3918 SDValue Src = N0.getOperand(0); 3919 EVT SrcVT = Src.getValueType(); 3920 3921 // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000) 3922 SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src, 3923 DAG.getConstant(0x8000, SL, SrcVT)); 3924 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg); 3925 } 3926 default: 3927 return SDValue(); 3928 } 3929 } 3930 3931 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N, 3932 DAGCombinerInfo &DCI) const { 3933 SelectionDAG &DAG = DCI.DAG; 3934 SDValue N0 = N->getOperand(0); 3935 3936 if (!N0.hasOneUse()) 3937 return SDValue(); 3938 3939 switch (N0.getOpcode()) { 3940 case ISD::FP16_TO_FP: { 3941 assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal"); 3942 SDLoc SL(N); 3943 SDValue Src = N0.getOperand(0); 3944 EVT SrcVT = Src.getValueType(); 3945 3946 // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff) 3947 SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src, 3948 DAG.getConstant(0x7fff, SL, SrcVT)); 3949 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs); 3950 } 3951 default: 3952 return SDValue(); 3953 } 3954 } 3955 3956 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N, 3957 DAGCombinerInfo &DCI) const { 3958 const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); 3959 if (!CFP) 3960 return SDValue(); 3961 3962 // XXX - Should this flush denormals? 3963 const APFloat &Val = CFP->getValueAPF(); 3964 APFloat One(Val.getSemantics(), "1.0"); 3965 return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0)); 3966 } 3967 3968 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N, 3969 DAGCombinerInfo &DCI) const { 3970 SelectionDAG &DAG = DCI.DAG; 3971 SDLoc DL(N); 3972 3973 switch(N->getOpcode()) { 3974 default: 3975 break; 3976 case ISD::BITCAST: { 3977 EVT DestVT = N->getValueType(0); 3978 3979 // Push casts through vector builds. This helps avoid emitting a large 3980 // number of copies when materializing floating point vector constants. 3981 // 3982 // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) => 3983 // vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y)) 3984 if (DestVT.isVector()) { 3985 SDValue Src = N->getOperand(0); 3986 if (Src.getOpcode() == ISD::BUILD_VECTOR) { 3987 EVT SrcVT = Src.getValueType(); 3988 unsigned NElts = DestVT.getVectorNumElements(); 3989 3990 if (SrcVT.getVectorNumElements() == NElts) { 3991 EVT DestEltVT = DestVT.getVectorElementType(); 3992 3993 SmallVector<SDValue, 8> CastedElts; 3994 SDLoc SL(N); 3995 for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) { 3996 SDValue Elt = Src.getOperand(I); 3997 CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt)); 3998 } 3999 4000 return DAG.getBuildVector(DestVT, SL, CastedElts); 4001 } 4002 } 4003 } 4004 4005 if (DestVT.getSizeInBits() != 64 && !DestVT.isVector()) 4006 break; 4007 4008 // Fold bitcasts of constants. 4009 // 4010 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k) 4011 // TODO: Generalize and move to DAGCombiner 4012 SDValue Src = N->getOperand(0); 4013 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) { 4014 if (Src.getValueType() == MVT::i64) { 4015 SDLoc SL(N); 4016 uint64_t CVal = C->getZExtValue(); 4017 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 4018 DAG.getConstant(Lo_32(CVal), SL, MVT::i32), 4019 DAG.getConstant(Hi_32(CVal), SL, MVT::i32)); 4020 return DAG.getNode(ISD::BITCAST, SL, DestVT, BV); 4021 } 4022 } 4023 4024 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) { 4025 const APInt &Val = C->getValueAPF().bitcastToAPInt(); 4026 SDLoc SL(N); 4027 uint64_t CVal = Val.getZExtValue(); 4028 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 4029 DAG.getConstant(Lo_32(CVal), SL, MVT::i32), 4030 DAG.getConstant(Hi_32(CVal), SL, MVT::i32)); 4031 4032 return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec); 4033 } 4034 4035 break; 4036 } 4037 case ISD::SHL: { 4038 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 4039 break; 4040 4041 return performShlCombine(N, DCI); 4042 } 4043 case ISD::SRL: { 4044 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 4045 break; 4046 4047 return performSrlCombine(N, DCI); 4048 } 4049 case ISD::SRA: { 4050 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 4051 break; 4052 4053 return performSraCombine(N, DCI); 4054 } 4055 case ISD::TRUNCATE: 4056 return performTruncateCombine(N, DCI); 4057 case ISD::MUL: 4058 return performMulCombine(N, DCI); 4059 case ISD::MULHS: 4060 return performMulhsCombine(N, DCI); 4061 case ISD::MULHU: 4062 return performMulhuCombine(N, DCI); 4063 case AMDGPUISD::MUL_I24: 4064 case AMDGPUISD::MUL_U24: 4065 case AMDGPUISD::MULHI_I24: 4066 case AMDGPUISD::MULHI_U24: { 4067 if (SDValue V = simplifyI24(N, DCI)) 4068 return V; 4069 return SDValue(); 4070 } 4071 case AMDGPUISD::MUL_LOHI_I24: 4072 case AMDGPUISD::MUL_LOHI_U24: 4073 return performMulLoHi24Combine(N, DCI); 4074 case ISD::SELECT: 4075 return performSelectCombine(N, DCI); 4076 case ISD::FNEG: 4077 return performFNegCombine(N, DCI); 4078 case ISD::FABS: 4079 return performFAbsCombine(N, DCI); 4080 case AMDGPUISD::BFE_I32: 4081 case AMDGPUISD::BFE_U32: { 4082 assert(!N->getValueType(0).isVector() && 4083 "Vector handling of BFE not implemented"); 4084 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2)); 4085 if (!Width) 4086 break; 4087 4088 uint32_t WidthVal = Width->getZExtValue() & 0x1f; 4089 if (WidthVal == 0) 4090 return DAG.getConstant(0, DL, MVT::i32); 4091 4092 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 4093 if (!Offset) 4094 break; 4095 4096 SDValue BitsFrom = N->getOperand(0); 4097 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f; 4098 4099 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32; 4100 4101 if (OffsetVal == 0) { 4102 // This is already sign / zero extended, so try to fold away extra BFEs. 4103 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal); 4104 4105 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom); 4106 if (OpSignBits >= SignBits) 4107 return BitsFrom; 4108 4109 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal); 4110 if (Signed) { 4111 // This is a sign_extend_inreg. Replace it to take advantage of existing 4112 // DAG Combines. If not eliminated, we will match back to BFE during 4113 // selection. 4114 4115 // TODO: The sext_inreg of extended types ends, although we can could 4116 // handle them in a single BFE. 4117 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom, 4118 DAG.getValueType(SmallVT)); 4119 } 4120 4121 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT); 4122 } 4123 4124 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) { 4125 if (Signed) { 4126 return constantFoldBFE<int32_t>(DAG, 4127 CVal->getSExtValue(), 4128 OffsetVal, 4129 WidthVal, 4130 DL); 4131 } 4132 4133 return constantFoldBFE<uint32_t>(DAG, 4134 CVal->getZExtValue(), 4135 OffsetVal, 4136 WidthVal, 4137 DL); 4138 } 4139 4140 if ((OffsetVal + WidthVal) >= 32 && 4141 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) { 4142 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32); 4143 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32, 4144 BitsFrom, ShiftVal); 4145 } 4146 4147 if (BitsFrom.hasOneUse()) { 4148 APInt Demanded = APInt::getBitsSet(32, 4149 OffsetVal, 4150 OffsetVal + WidthVal); 4151 4152 KnownBits Known; 4153 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 4154 !DCI.isBeforeLegalizeOps()); 4155 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4156 if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) || 4157 TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) { 4158 DCI.CommitTargetLoweringOpt(TLO); 4159 } 4160 } 4161 4162 break; 4163 } 4164 case ISD::LOAD: 4165 return performLoadCombine(N, DCI); 4166 case ISD::STORE: 4167 return performStoreCombine(N, DCI); 4168 case AMDGPUISD::RCP: 4169 case AMDGPUISD::RCP_IFLAG: 4170 return performRcpCombine(N, DCI); 4171 case ISD::AssertZext: 4172 case ISD::AssertSext: 4173 return performAssertSZExtCombine(N, DCI); 4174 } 4175 return SDValue(); 4176 } 4177 4178 //===----------------------------------------------------------------------===// 4179 // Helper functions 4180 //===----------------------------------------------------------------------===// 4181 4182 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG, 4183 const TargetRegisterClass *RC, 4184 unsigned Reg, EVT VT, 4185 const SDLoc &SL, 4186 bool RawReg) const { 4187 MachineFunction &MF = DAG.getMachineFunction(); 4188 MachineRegisterInfo &MRI = MF.getRegInfo(); 4189 unsigned VReg; 4190 4191 if (!MRI.isLiveIn(Reg)) { 4192 VReg = MRI.createVirtualRegister(RC); 4193 MRI.addLiveIn(Reg, VReg); 4194 } else { 4195 VReg = MRI.getLiveInVirtReg(Reg); 4196 } 4197 4198 if (RawReg) 4199 return DAG.getRegister(VReg, VT); 4200 4201 return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT); 4202 } 4203 4204 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG, 4205 EVT VT, 4206 const SDLoc &SL, 4207 int64_t Offset) const { 4208 MachineFunction &MF = DAG.getMachineFunction(); 4209 MachineFrameInfo &MFI = MF.getFrameInfo(); 4210 4211 int FI = MFI.CreateFixedObject(VT.getStoreSize(), Offset, true); 4212 auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset); 4213 SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32); 4214 4215 return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, 4, 4216 MachineMemOperand::MODereferenceable | 4217 MachineMemOperand::MOInvariant); 4218 } 4219 4220 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG, 4221 const SDLoc &SL, 4222 SDValue Chain, 4223 SDValue ArgVal, 4224 int64_t Offset) const { 4225 MachineFunction &MF = DAG.getMachineFunction(); 4226 MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset); 4227 4228 SDValue Ptr = DAG.getConstant(Offset, SL, MVT::i32); 4229 SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, 4, 4230 MachineMemOperand::MODereferenceable); 4231 return Store; 4232 } 4233 4234 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG, 4235 const TargetRegisterClass *RC, 4236 EVT VT, const SDLoc &SL, 4237 const ArgDescriptor &Arg) const { 4238 assert(Arg && "Attempting to load missing argument"); 4239 4240 SDValue V = Arg.isRegister() ? 4241 CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL) : 4242 loadStackInputValue(DAG, VT, SL, Arg.getStackOffset()); 4243 4244 if (!Arg.isMasked()) 4245 return V; 4246 4247 unsigned Mask = Arg.getMask(); 4248 unsigned Shift = countTrailingZeros<unsigned>(Mask); 4249 V = DAG.getNode(ISD::SRL, SL, VT, V, 4250 DAG.getShiftAmountConstant(Shift, VT, SL)); 4251 return DAG.getNode(ISD::AND, SL, VT, V, 4252 DAG.getConstant(Mask >> Shift, SL, VT)); 4253 } 4254 4255 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset( 4256 const MachineFunction &MF, const ImplicitParameter Param) const { 4257 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>(); 4258 const AMDGPUSubtarget &ST = 4259 AMDGPUSubtarget::get(getTargetMachine(), MF.getFunction()); 4260 unsigned ExplicitArgOffset = ST.getExplicitKernelArgOffset(MF.getFunction()); 4261 unsigned Alignment = ST.getAlignmentForImplicitArgPtr(); 4262 uint64_t ArgOffset = alignTo(MFI->getExplicitKernArgSize(), Alignment) + 4263 ExplicitArgOffset; 4264 switch (Param) { 4265 case GRID_DIM: 4266 return ArgOffset; 4267 case GRID_OFFSET: 4268 return ArgOffset + 4; 4269 } 4270 llvm_unreachable("unexpected implicit parameter type"); 4271 } 4272 4273 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node; 4274 4275 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const { 4276 switch ((AMDGPUISD::NodeType)Opcode) { 4277 case AMDGPUISD::FIRST_NUMBER: break; 4278 // AMDIL DAG nodes 4279 NODE_NAME_CASE(UMUL); 4280 NODE_NAME_CASE(BRANCH_COND); 4281 4282 // AMDGPU DAG nodes 4283 NODE_NAME_CASE(IF) 4284 NODE_NAME_CASE(ELSE) 4285 NODE_NAME_CASE(LOOP) 4286 NODE_NAME_CASE(CALL) 4287 NODE_NAME_CASE(TC_RETURN) 4288 NODE_NAME_CASE(TRAP) 4289 NODE_NAME_CASE(RET_FLAG) 4290 NODE_NAME_CASE(RETURN_TO_EPILOG) 4291 NODE_NAME_CASE(ENDPGM) 4292 NODE_NAME_CASE(DWORDADDR) 4293 NODE_NAME_CASE(FRACT) 4294 NODE_NAME_CASE(SETCC) 4295 NODE_NAME_CASE(SETREG) 4296 NODE_NAME_CASE(FMA_W_CHAIN) 4297 NODE_NAME_CASE(FMUL_W_CHAIN) 4298 NODE_NAME_CASE(CLAMP) 4299 NODE_NAME_CASE(COS_HW) 4300 NODE_NAME_CASE(SIN_HW) 4301 NODE_NAME_CASE(FMAX_LEGACY) 4302 NODE_NAME_CASE(FMIN_LEGACY) 4303 NODE_NAME_CASE(FMAX3) 4304 NODE_NAME_CASE(SMAX3) 4305 NODE_NAME_CASE(UMAX3) 4306 NODE_NAME_CASE(FMIN3) 4307 NODE_NAME_CASE(SMIN3) 4308 NODE_NAME_CASE(UMIN3) 4309 NODE_NAME_CASE(FMED3) 4310 NODE_NAME_CASE(SMED3) 4311 NODE_NAME_CASE(UMED3) 4312 NODE_NAME_CASE(FDOT2) 4313 NODE_NAME_CASE(URECIP) 4314 NODE_NAME_CASE(DIV_SCALE) 4315 NODE_NAME_CASE(DIV_FMAS) 4316 NODE_NAME_CASE(DIV_FIXUP) 4317 NODE_NAME_CASE(FMAD_FTZ) 4318 NODE_NAME_CASE(TRIG_PREOP) 4319 NODE_NAME_CASE(RCP) 4320 NODE_NAME_CASE(RSQ) 4321 NODE_NAME_CASE(RCP_LEGACY) 4322 NODE_NAME_CASE(RSQ_LEGACY) 4323 NODE_NAME_CASE(RCP_IFLAG) 4324 NODE_NAME_CASE(FMUL_LEGACY) 4325 NODE_NAME_CASE(RSQ_CLAMP) 4326 NODE_NAME_CASE(LDEXP) 4327 NODE_NAME_CASE(FP_CLASS) 4328 NODE_NAME_CASE(DOT4) 4329 NODE_NAME_CASE(CARRY) 4330 NODE_NAME_CASE(BORROW) 4331 NODE_NAME_CASE(BFE_U32) 4332 NODE_NAME_CASE(BFE_I32) 4333 NODE_NAME_CASE(BFI) 4334 NODE_NAME_CASE(BFM) 4335 NODE_NAME_CASE(FFBH_U32) 4336 NODE_NAME_CASE(FFBH_I32) 4337 NODE_NAME_CASE(FFBL_B32) 4338 NODE_NAME_CASE(MUL_U24) 4339 NODE_NAME_CASE(MUL_I24) 4340 NODE_NAME_CASE(MULHI_U24) 4341 NODE_NAME_CASE(MULHI_I24) 4342 NODE_NAME_CASE(MUL_LOHI_U24) 4343 NODE_NAME_CASE(MUL_LOHI_I24) 4344 NODE_NAME_CASE(MAD_U24) 4345 NODE_NAME_CASE(MAD_I24) 4346 NODE_NAME_CASE(MAD_I64_I32) 4347 NODE_NAME_CASE(MAD_U64_U32) 4348 NODE_NAME_CASE(PERM) 4349 NODE_NAME_CASE(TEXTURE_FETCH) 4350 NODE_NAME_CASE(EXPORT) 4351 NODE_NAME_CASE(EXPORT_DONE) 4352 NODE_NAME_CASE(R600_EXPORT) 4353 NODE_NAME_CASE(CONST_ADDRESS) 4354 NODE_NAME_CASE(REGISTER_LOAD) 4355 NODE_NAME_CASE(REGISTER_STORE) 4356 NODE_NAME_CASE(SAMPLE) 4357 NODE_NAME_CASE(SAMPLEB) 4358 NODE_NAME_CASE(SAMPLED) 4359 NODE_NAME_CASE(SAMPLEL) 4360 NODE_NAME_CASE(CVT_F32_UBYTE0) 4361 NODE_NAME_CASE(CVT_F32_UBYTE1) 4362 NODE_NAME_CASE(CVT_F32_UBYTE2) 4363 NODE_NAME_CASE(CVT_F32_UBYTE3) 4364 NODE_NAME_CASE(CVT_PKRTZ_F16_F32) 4365 NODE_NAME_CASE(CVT_PKNORM_I16_F32) 4366 NODE_NAME_CASE(CVT_PKNORM_U16_F32) 4367 NODE_NAME_CASE(CVT_PK_I16_I32) 4368 NODE_NAME_CASE(CVT_PK_U16_U32) 4369 NODE_NAME_CASE(FP_TO_FP16) 4370 NODE_NAME_CASE(FP16_ZEXT) 4371 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR) 4372 NODE_NAME_CASE(CONST_DATA_PTR) 4373 NODE_NAME_CASE(PC_ADD_REL_OFFSET) 4374 NODE_NAME_CASE(LDS) 4375 NODE_NAME_CASE(KILL) 4376 NODE_NAME_CASE(DUMMY_CHAIN) 4377 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break; 4378 NODE_NAME_CASE(INIT_EXEC) 4379 NODE_NAME_CASE(INIT_EXEC_FROM_INPUT) 4380 NODE_NAME_CASE(SENDMSG) 4381 NODE_NAME_CASE(SENDMSGHALT) 4382 NODE_NAME_CASE(INTERP_MOV) 4383 NODE_NAME_CASE(INTERP_P1) 4384 NODE_NAME_CASE(INTERP_P2) 4385 NODE_NAME_CASE(INTERP_P1LL_F16) 4386 NODE_NAME_CASE(INTERP_P1LV_F16) 4387 NODE_NAME_CASE(INTERP_P2_F16) 4388 NODE_NAME_CASE(LOAD_D16_HI) 4389 NODE_NAME_CASE(LOAD_D16_LO) 4390 NODE_NAME_CASE(LOAD_D16_HI_I8) 4391 NODE_NAME_CASE(LOAD_D16_HI_U8) 4392 NODE_NAME_CASE(LOAD_D16_LO_I8) 4393 NODE_NAME_CASE(LOAD_D16_LO_U8) 4394 NODE_NAME_CASE(STORE_MSKOR) 4395 NODE_NAME_CASE(LOAD_CONSTANT) 4396 NODE_NAME_CASE(TBUFFER_STORE_FORMAT) 4397 NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16) 4398 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT) 4399 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16) 4400 NODE_NAME_CASE(DS_ORDERED_COUNT) 4401 NODE_NAME_CASE(ATOMIC_CMP_SWAP) 4402 NODE_NAME_CASE(ATOMIC_INC) 4403 NODE_NAME_CASE(ATOMIC_DEC) 4404 NODE_NAME_CASE(ATOMIC_LOAD_FMIN) 4405 NODE_NAME_CASE(ATOMIC_LOAD_FMAX) 4406 NODE_NAME_CASE(BUFFER_LOAD) 4407 NODE_NAME_CASE(BUFFER_LOAD_UBYTE) 4408 NODE_NAME_CASE(BUFFER_LOAD_USHORT) 4409 NODE_NAME_CASE(BUFFER_LOAD_BYTE) 4410 NODE_NAME_CASE(BUFFER_LOAD_SHORT) 4411 NODE_NAME_CASE(BUFFER_LOAD_FORMAT) 4412 NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16) 4413 NODE_NAME_CASE(SBUFFER_LOAD) 4414 NODE_NAME_CASE(BUFFER_STORE) 4415 NODE_NAME_CASE(BUFFER_STORE_BYTE) 4416 NODE_NAME_CASE(BUFFER_STORE_SHORT) 4417 NODE_NAME_CASE(BUFFER_STORE_FORMAT) 4418 NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16) 4419 NODE_NAME_CASE(BUFFER_ATOMIC_SWAP) 4420 NODE_NAME_CASE(BUFFER_ATOMIC_ADD) 4421 NODE_NAME_CASE(BUFFER_ATOMIC_SUB) 4422 NODE_NAME_CASE(BUFFER_ATOMIC_SMIN) 4423 NODE_NAME_CASE(BUFFER_ATOMIC_UMIN) 4424 NODE_NAME_CASE(BUFFER_ATOMIC_SMAX) 4425 NODE_NAME_CASE(BUFFER_ATOMIC_UMAX) 4426 NODE_NAME_CASE(BUFFER_ATOMIC_AND) 4427 NODE_NAME_CASE(BUFFER_ATOMIC_OR) 4428 NODE_NAME_CASE(BUFFER_ATOMIC_XOR) 4429 NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP) 4430 NODE_NAME_CASE(BUFFER_ATOMIC_FADD) 4431 NODE_NAME_CASE(BUFFER_ATOMIC_PK_FADD) 4432 NODE_NAME_CASE(ATOMIC_FADD) 4433 NODE_NAME_CASE(ATOMIC_PK_FADD) 4434 4435 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break; 4436 } 4437 return nullptr; 4438 } 4439 4440 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand, 4441 SelectionDAG &DAG, int Enabled, 4442 int &RefinementSteps, 4443 bool &UseOneConstNR, 4444 bool Reciprocal) const { 4445 EVT VT = Operand.getValueType(); 4446 4447 if (VT == MVT::f32) { 4448 RefinementSteps = 0; 4449 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand); 4450 } 4451 4452 // TODO: There is also f64 rsq instruction, but the documentation is less 4453 // clear on its precision. 4454 4455 return SDValue(); 4456 } 4457 4458 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand, 4459 SelectionDAG &DAG, int Enabled, 4460 int &RefinementSteps) const { 4461 EVT VT = Operand.getValueType(); 4462 4463 if (VT == MVT::f32) { 4464 // Reciprocal, < 1 ulp error. 4465 // 4466 // This reciprocal approximation converges to < 0.5 ulp error with one 4467 // newton rhapson performed with two fused multiple adds (FMAs). 4468 4469 RefinementSteps = 0; 4470 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand); 4471 } 4472 4473 // TODO: There is also f64 rcp instruction, but the documentation is less 4474 // clear on its precision. 4475 4476 return SDValue(); 4477 } 4478 4479 void AMDGPUTargetLowering::computeKnownBitsForTargetNode( 4480 const SDValue Op, KnownBits &Known, 4481 const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const { 4482 4483 Known.resetAll(); // Don't know anything. 4484 4485 unsigned Opc = Op.getOpcode(); 4486 4487 switch (Opc) { 4488 default: 4489 break; 4490 case AMDGPUISD::CARRY: 4491 case AMDGPUISD::BORROW: { 4492 Known.Zero = APInt::getHighBitsSet(32, 31); 4493 break; 4494 } 4495 4496 case AMDGPUISD::BFE_I32: 4497 case AMDGPUISD::BFE_U32: { 4498 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4499 if (!CWidth) 4500 return; 4501 4502 uint32_t Width = CWidth->getZExtValue() & 0x1f; 4503 4504 if (Opc == AMDGPUISD::BFE_U32) 4505 Known.Zero = APInt::getHighBitsSet(32, 32 - Width); 4506 4507 break; 4508 } 4509 case AMDGPUISD::FP_TO_FP16: 4510 case AMDGPUISD::FP16_ZEXT: { 4511 unsigned BitWidth = Known.getBitWidth(); 4512 4513 // High bits are zero. 4514 Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16); 4515 break; 4516 } 4517 case AMDGPUISD::MUL_U24: 4518 case AMDGPUISD::MUL_I24: { 4519 KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); 4520 KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1); 4521 unsigned TrailZ = LHSKnown.countMinTrailingZeros() + 4522 RHSKnown.countMinTrailingZeros(); 4523 Known.Zero.setLowBits(std::min(TrailZ, 32u)); 4524 4525 // Truncate to 24 bits. 4526 LHSKnown = LHSKnown.trunc(24); 4527 RHSKnown = RHSKnown.trunc(24); 4528 4529 bool Negative = false; 4530 if (Opc == AMDGPUISD::MUL_I24) { 4531 unsigned LHSValBits = 24 - LHSKnown.countMinSignBits(); 4532 unsigned RHSValBits = 24 - RHSKnown.countMinSignBits(); 4533 unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u); 4534 if (MaxValBits >= 32) 4535 break; 4536 bool LHSNegative = LHSKnown.isNegative(); 4537 bool LHSPositive = LHSKnown.isNonNegative(); 4538 bool RHSNegative = RHSKnown.isNegative(); 4539 bool RHSPositive = RHSKnown.isNonNegative(); 4540 if ((!LHSNegative && !LHSPositive) || (!RHSNegative && !RHSPositive)) 4541 break; 4542 Negative = (LHSNegative && RHSPositive) || (LHSPositive && RHSNegative); 4543 if (Negative) 4544 Known.One.setHighBits(32 - MaxValBits); 4545 else 4546 Known.Zero.setHighBits(32 - MaxValBits); 4547 } else { 4548 unsigned LHSValBits = 24 - LHSKnown.countMinLeadingZeros(); 4549 unsigned RHSValBits = 24 - RHSKnown.countMinLeadingZeros(); 4550 unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u); 4551 if (MaxValBits >= 32) 4552 break; 4553 Known.Zero.setHighBits(32 - MaxValBits); 4554 } 4555 break; 4556 } 4557 case AMDGPUISD::PERM: { 4558 ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4559 if (!CMask) 4560 return; 4561 4562 KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); 4563 KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1); 4564 unsigned Sel = CMask->getZExtValue(); 4565 4566 for (unsigned I = 0; I < 32; I += 8) { 4567 unsigned SelBits = Sel & 0xff; 4568 if (SelBits < 4) { 4569 SelBits *= 8; 4570 Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I; 4571 Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I; 4572 } else if (SelBits < 7) { 4573 SelBits = (SelBits & 3) * 8; 4574 Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I; 4575 Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I; 4576 } else if (SelBits == 0x0c) { 4577 Known.Zero |= 0xff << I; 4578 } else if (SelBits > 0x0c) { 4579 Known.One |= 0xff << I; 4580 } 4581 Sel >>= 8; 4582 } 4583 break; 4584 } 4585 case AMDGPUISD::BUFFER_LOAD_UBYTE: { 4586 Known.Zero.setHighBits(24); 4587 break; 4588 } 4589 case AMDGPUISD::BUFFER_LOAD_USHORT: { 4590 Known.Zero.setHighBits(16); 4591 break; 4592 } 4593 case AMDGPUISD::LDS: { 4594 auto GA = cast<GlobalAddressSDNode>(Op.getOperand(0).getNode()); 4595 unsigned Align = GA->getGlobal()->getAlignment(); 4596 4597 Known.Zero.setHighBits(16); 4598 if (Align) 4599 Known.Zero.setLowBits(Log2_32(Align)); 4600 break; 4601 } 4602 case ISD::INTRINSIC_WO_CHAIN: { 4603 unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4604 switch (IID) { 4605 case Intrinsic::amdgcn_mbcnt_lo: 4606 case Intrinsic::amdgcn_mbcnt_hi: { 4607 const GCNSubtarget &ST = 4608 DAG.getMachineFunction().getSubtarget<GCNSubtarget>(); 4609 // These return at most the wavefront size - 1. 4610 unsigned Size = Op.getValueType().getSizeInBits(); 4611 Known.Zero.setHighBits(Size - ST.getWavefrontSizeLog2()); 4612 break; 4613 } 4614 default: 4615 break; 4616 } 4617 } 4618 } 4619 } 4620 4621 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode( 4622 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 4623 unsigned Depth) const { 4624 switch (Op.getOpcode()) { 4625 case AMDGPUISD::BFE_I32: { 4626 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4627 if (!Width) 4628 return 1; 4629 4630 unsigned SignBits = 32 - Width->getZExtValue() + 1; 4631 if (!isNullConstant(Op.getOperand(1))) 4632 return SignBits; 4633 4634 // TODO: Could probably figure something out with non-0 offsets. 4635 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); 4636 return std::max(SignBits, Op0SignBits); 4637 } 4638 4639 case AMDGPUISD::BFE_U32: { 4640 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4641 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1; 4642 } 4643 4644 case AMDGPUISD::CARRY: 4645 case AMDGPUISD::BORROW: 4646 return 31; 4647 case AMDGPUISD::BUFFER_LOAD_BYTE: 4648 return 25; 4649 case AMDGPUISD::BUFFER_LOAD_SHORT: 4650 return 17; 4651 case AMDGPUISD::BUFFER_LOAD_UBYTE: 4652 return 24; 4653 case AMDGPUISD::BUFFER_LOAD_USHORT: 4654 return 16; 4655 case AMDGPUISD::FP_TO_FP16: 4656 case AMDGPUISD::FP16_ZEXT: 4657 return 16; 4658 default: 4659 return 1; 4660 } 4661 } 4662 4663 bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 4664 const SelectionDAG &DAG, 4665 bool SNaN, 4666 unsigned Depth) const { 4667 unsigned Opcode = Op.getOpcode(); 4668 switch (Opcode) { 4669 case AMDGPUISD::FMIN_LEGACY: 4670 case AMDGPUISD::FMAX_LEGACY: { 4671 if (SNaN) 4672 return true; 4673 4674 // TODO: Can check no nans on one of the operands for each one, but which 4675 // one? 4676 return false; 4677 } 4678 case AMDGPUISD::FMUL_LEGACY: 4679 case AMDGPUISD::CVT_PKRTZ_F16_F32: { 4680 if (SNaN) 4681 return true; 4682 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4683 DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4684 } 4685 case AMDGPUISD::FMED3: 4686 case AMDGPUISD::FMIN3: 4687 case AMDGPUISD::FMAX3: 4688 case AMDGPUISD::FMAD_FTZ: { 4689 if (SNaN) 4690 return true; 4691 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4692 DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4693 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4694 } 4695 case AMDGPUISD::CVT_F32_UBYTE0: 4696 case AMDGPUISD::CVT_F32_UBYTE1: 4697 case AMDGPUISD::CVT_F32_UBYTE2: 4698 case AMDGPUISD::CVT_F32_UBYTE3: 4699 return true; 4700 4701 case AMDGPUISD::RCP: 4702 case AMDGPUISD::RSQ: 4703 case AMDGPUISD::RCP_LEGACY: 4704 case AMDGPUISD::RSQ_LEGACY: 4705 case AMDGPUISD::RSQ_CLAMP: { 4706 if (SNaN) 4707 return true; 4708 4709 // TODO: Need is known positive check. 4710 return false; 4711 } 4712 case AMDGPUISD::LDEXP: 4713 case AMDGPUISD::FRACT: { 4714 if (SNaN) 4715 return true; 4716 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4717 } 4718 case AMDGPUISD::DIV_SCALE: 4719 case AMDGPUISD::DIV_FMAS: 4720 case AMDGPUISD::DIV_FIXUP: 4721 case AMDGPUISD::TRIG_PREOP: 4722 // TODO: Refine on operands. 4723 return SNaN; 4724 case AMDGPUISD::SIN_HW: 4725 case AMDGPUISD::COS_HW: { 4726 // TODO: Need check for infinity 4727 return SNaN; 4728 } 4729 case ISD::INTRINSIC_WO_CHAIN: { 4730 unsigned IntrinsicID 4731 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4732 // TODO: Handle more intrinsics 4733 switch (IntrinsicID) { 4734 case Intrinsic::amdgcn_cubeid: 4735 return true; 4736 4737 case Intrinsic::amdgcn_frexp_mant: { 4738 if (SNaN) 4739 return true; 4740 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4741 } 4742 case Intrinsic::amdgcn_cvt_pkrtz: { 4743 if (SNaN) 4744 return true; 4745 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4746 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4747 } 4748 case Intrinsic::amdgcn_fdot2: 4749 // TODO: Refine on operand 4750 return SNaN; 4751 default: 4752 return false; 4753 } 4754 } 4755 default: 4756 return false; 4757 } 4758 } 4759 4760 TargetLowering::AtomicExpansionKind 4761 AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { 4762 switch (RMW->getOperation()) { 4763 case AtomicRMWInst::Nand: 4764 case AtomicRMWInst::FAdd: 4765 case AtomicRMWInst::FSub: 4766 return AtomicExpansionKind::CmpXChg; 4767 default: 4768 return AtomicExpansionKind::None; 4769 } 4770 } 4771